Commit 40d59faa by David Ormsbee

Merge pull request #1330 from MITx/diana/open-ended-ui-updates

Rubric Integration and other UI improvements
parents 0b9262a4 1d4b674c
...@@ -21,6 +21,8 @@ from .xml_module import XmlDescriptor ...@@ -21,6 +21,8 @@ from .xml_module import XmlDescriptor
from xmodule.modulestore import Location from xmodule.modulestore import Location
import self_assessment_module import self_assessment_module
import open_ended_module import open_ended_module
from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
from .stringify import stringify_children
log = logging.getLogger("mitx.courseware") log = logging.getLogger("mitx.courseware")
...@@ -138,12 +140,19 @@ class CombinedOpenEndedModule(XModule): ...@@ -138,12 +140,19 @@ class CombinedOpenEndedModule(XModule):
# completion (doesn't matter if you self-assessed correct/incorrect). # completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) self._max_score = int(self.metadata.get('max_score', MAX_SCORE))
rubric_renderer = CombinedOpenEndedRubric(system, True)
try:
rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric']))
except RubricParsingError:
log.error("Failed to parse rubric in location: {1}".format(location))
raise
#Static data is passed to the child modules to render #Static data is passed to the child modules to render
self.static_data = { self.static_data = {
'max_score': self._max_score, 'max_score': self._max_score,
'max_attempts': self.max_attempts, 'max_attempts': self.max_attempts,
'prompt': definition['prompt'], 'prompt': definition['prompt'],
'rubric': definition['rubric'] 'rubric': definition['rubric'],
'display_name': self.display_name
} }
self.task_xml = definition['task_xml'] self.task_xml = definition['task_xml']
...@@ -295,6 +304,7 @@ class CombinedOpenEndedModule(XModule): ...@@ -295,6 +304,7 @@ class CombinedOpenEndedModule(XModule):
'task_count': len(self.task_xml), 'task_count': len(self.task_xml),
'task_number': self.current_task_number + 1, 'task_number': self.current_task_number + 1,
'status': self.get_status(), 'status': self.get_status(),
'display_name': self.display_name
} }
return context return context
......
...@@ -3,20 +3,37 @@ from lxml import etree ...@@ -3,20 +3,37 @@ from lxml import etree
log=logging.getLogger(__name__) log=logging.getLogger(__name__)
class RubricParsingError(Exception):
pass
class CombinedOpenEndedRubric(object): class CombinedOpenEndedRubric(object):
@staticmethod def __init__ (self, system, view_only = False):
def render_rubric(rubric_xml, system): self.has_score = False
self.view_only = view_only
self.system = system
def render_rubric(self, rubric_xml):
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
Input:
rubric_xml: an string that has not been parsed into xml that
represents this particular rubric
Output:
html: the html that corresponds to the xml given
'''
try: try:
rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml) rubric_categories = self.extract_categories(rubric_xml)
html = system.render_template('open_ended_rubric.html', {'rubric_categories' : rubric_categories}) html = self.system.render_template('open_ended_rubric.html',
{'categories' : rubric_categories,
'has_score': self.has_score,
'view_only': self.view_only})
except: except:
log.exception("Could not parse the rubric.") raise RubricParsingError("[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml))
html = rubric_xml
return html return html
@staticmethod def extract_categories(self, element):
def extract_rubric_categories(element):
''' '''
Contstruct a list of categories such that the structure looks like: Contstruct a list of categories such that the structure looks like:
[ { category: "Category 1 Name", [ { category: "Category 1 Name",
...@@ -28,17 +45,18 @@ class CombinedOpenEndedRubric(object): ...@@ -28,17 +45,18 @@ class CombinedOpenEndedRubric(object):
{text: "Option 3 Name", points: 2]}] {text: "Option 3 Name", points: 2]}]
''' '''
if isinstance(element, basestring):
element = etree.fromstring(element) element = etree.fromstring(element)
categories = [] categories = []
for category in element: for category in element:
if category.tag != 'category': if category.tag != 'category':
raise Exception("[capa.inputtypes.extract_categories] Expected a <category> tag: got {0} instead".format(category.tag)) raise RubricParsingError("[extract_categories] Expected a <category> tag: got {0} instead".format(category.tag))
else: else:
categories.append(CombinedOpenEndedRubric.extract_category(category)) categories.append(self.extract_category(category))
return categories return categories
@staticmethod
def extract_category(category): def extract_category(self, category):
''' '''
construct an individual category construct an individual category
{category: "Category 1 Name", {category: "Category 1 Name",
...@@ -47,42 +65,33 @@ class CombinedOpenEndedRubric(object): ...@@ -47,42 +65,33 @@ class CombinedOpenEndedRubric(object):
all sorting and auto-point generation occurs in this function all sorting and auto-point generation occurs in this function
''' '''
has_score=False
descriptionxml = category[0] descriptionxml = category[0]
scorexml = category[1]
if scorexml.tag == "option":
optionsxml = category[1:] optionsxml = category[1:]
else: scorexml = category[1]
score = None
if scorexml.tag == 'score':
score_text = scorexml.text
optionsxml = category[2:] optionsxml = category[2:]
has_score=True score = int(score_text)
self.has_score = True
# if we are missing the score tag and we are expecting one
elif self.has_score:
raise RubricParsingError("[extract_category] Category {0} is missing a score".format(descriptionxml.text))
# parse description # parse description
if descriptionxml.tag != 'description': if descriptionxml.tag != 'description':
raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) raise RubricParsingError("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag))
if has_score:
if scorexml.tag != 'score':
raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag))
for option in optionsxml:
if option.tag != "option":
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag))
description = descriptionxml.text description = descriptionxml.text
if has_score:
score = int(scorexml.text)
else:
score = 0
cur_points = 0 cur_points = 0
options = [] options = []
autonumbering = True autonumbering = True
# parse options # parse options
for option in optionsxml: for option in optionsxml:
if option.tag != 'option': if option.tag != 'option':
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag)) raise RubricParsingError("[extract_category]: expected option tag, got {0} instead".format(option.tag))
else: else:
pointstr = option.get("points") pointstr = option.get("points")
if pointstr: if pointstr:
...@@ -91,25 +100,24 @@ class CombinedOpenEndedRubric(object): ...@@ -91,25 +100,24 @@ class CombinedOpenEndedRubric(object):
try: try:
points = int(pointstr) points = int(pointstr)
except ValueError: except ValueError:
raise Exception("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead".format(pointstr))
elif autonumbering: elif autonumbering:
# use the generated one if we're in the right mode # use the generated one if we're in the right mode
points = cur_points points = cur_points
cur_points = cur_points + 1 cur_points = cur_points + 1
else: else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly dfined.") raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points
optiontext = option.text optiontext = option.text
selected = False options.append({'text': option.text, 'points': points, 'selected': selected})
if has_score:
if points == score:
selected = True
options.append({'text': option.text, 'points': points, 'selected' : selected})
# sort and check for duplicates # sort and check for duplicates
options = sorted(options, key=lambda option: option['points']) options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options) CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score, 'has_score' : has_score} return {'description': description, 'options': options}
@staticmethod @staticmethod
def validate_options(options): def validate_options(options):
...@@ -117,12 +125,12 @@ class CombinedOpenEndedRubric(object): ...@@ -117,12 +125,12 @@ class CombinedOpenEndedRubric(object):
Validates a set of options. This can and should be extended to filter out other bad edge cases Validates a set of options. This can and should be extended to filter out other bad edge cases
''' '''
if len(options) == 0: if len(options) == 0:
raise Exception("[extract_category]: no options associated with this category") raise RubricParsingError("[extract_category]: no options associated with this category")
if len(options) == 1: if len(options) == 1:
return return
prev = options[0]['points'] prev = options[0]['points']
for option in options[1:]: for option in options[1:]:
if prev == option['points']: if prev == option['points']:
raise Exception("[extract_category]: found duplicate point values between two different options") raise RubricParsingError("[extract_category]: found duplicate point values between two different options")
else: else:
prev = option['points'] prev = option['points']
...@@ -20,6 +20,7 @@ h2 { ...@@ -20,6 +20,7 @@ h2 {
color: darken($error-red, 10%); color: darken($error-red, 10%);
} }
section.problem { section.problem {
@media print { @media print {
display: block; display: block;
...@@ -756,4 +757,49 @@ section.problem { ...@@ -756,4 +757,49 @@ section.problem {
} }
} }
} }
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px;
margin: 10px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:3px;
position: relative;
padding: 15px;
width: 150px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: .9em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
} }
...@@ -37,9 +37,13 @@ section.combined-open-ended { ...@@ -37,9 +37,13 @@ section.combined-open-ended {
.result-container .result-container
{ {
float:left; float:left;
width: 93%; width: 100%;
position:relative; position:relative;
} }
h4
{
margin-bottom:10px;
}
} }
section.combined-open-ended-status { section.combined-open-ended-status {
...@@ -49,15 +53,19 @@ section.combined-open-ended-status { ...@@ -49,15 +53,19 @@ section.combined-open-ended-status {
color: #2C2C2C; color: #2C2C2C;
font-family: monospace; font-family: monospace;
font-size: 1em; font-size: 1em;
padding-top: 10px; padding: 10px;
.show-results {
margin-top: .3em;
text-align:right;
}
.show-results-button {
font: 1em monospace;
}
} }
.statusitem-current { .statusitem-current {
background-color: #BEBEBE; background-color: #d4d4d4;
color: #2C2C2C; color: #222;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
} }
span { span {
...@@ -93,6 +101,7 @@ section.combined-open-ended-status { ...@@ -93,6 +101,7 @@ section.combined-open-ended-status {
div.result-container { div.result-container {
.evaluation { .evaluation {
p { p {
margin-bottom: 1px; margin-bottom: 1px;
} }
...@@ -104,6 +113,7 @@ div.result-container { ...@@ -104,6 +113,7 @@ div.result-container {
} }
.evaluation-response { .evaluation-response {
margin-bottom: 10px;
header { header {
text-align: right; text-align: right;
a { a {
...@@ -134,6 +144,7 @@ div.result-container { ...@@ -134,6 +144,7 @@ div.result-container {
} }
.external-grader-message { .external-grader-message {
margin-bottom: 5px;
section { section {
padding-left: 20px; padding-left: 20px;
background-color: #FAFAFA; background-color: #FAFAFA;
...@@ -141,6 +152,7 @@ div.result-container { ...@@ -141,6 +152,7 @@ div.result-container {
font-family: monospace; font-family: monospace;
font-size: 1em; font-size: 1em;
padding-top: 10px; padding-top: 10px;
padding-bottom:30px;
header { header {
font-size: 1.4em; font-size: 1.4em;
} }
...@@ -221,12 +233,13 @@ div.result-container { ...@@ -221,12 +233,13 @@ div.result-container {
div.result-container, section.open-ended-child { div.result-container, section.open-ended-child {
.rubric { .rubric {
margin-bottom:25px;
tr { tr {
margin:10px 0px; margin:10px 0px;
height: 100%; height: 100%;
} }
td { td {
padding: 20px 0px; padding: 20px 0px 25px 0px;
margin: 10px 0px; margin: 10px 0px;
height: 100%; height: 100%;
} }
...@@ -236,16 +249,16 @@ div.result-container, section.open-ended-child { ...@@ -236,16 +249,16 @@ div.result-container, section.open-ended-child {
} }
label, label,
.view-only { .view-only {
margin:10px; margin:2px;
position: relative; position: relative;
padding: 15px; padding: 10px 15px 25px 15px;
width: 200px; width: 145px;
height:100%; height:100%;
display: inline-block; display: inline-block;
min-height: 50px; min-height: 50px;
min-width: 50px; min-width: 50px;
background-color: #CCC; background-color: #CCC;
font-size: 1em; font-size: .85em;
} }
.grade { .grade {
position: absolute; position: absolute;
...@@ -257,12 +270,6 @@ div.result-container, section.open-ended-child { ...@@ -257,12 +270,6 @@ div.result-container, section.open-ended-child {
background: #666; background: #666;
color: white; color: white;
} }
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
} }
} }
...@@ -461,7 +468,6 @@ section.open-ended-child { ...@@ -461,7 +468,6 @@ section.open-ended-child {
p { p {
line-height: 20px; line-height: 20px;
text-transform: capitalize;
margin-bottom: 0; margin-bottom: 0;
float: left; float: left;
} }
...@@ -598,13 +604,15 @@ section.open-ended-child { ...@@ -598,13 +604,15 @@ section.open-ended-child {
} }
} }
div.open-ended-alert { div.open-ended-alert,
.save_message {
padding: 8px 12px; padding: 8px 12px;
border: 1px solid #EBE8BF; border: 1px solid #EBE8BF;
border-radius: 3px; border-radius: 3px;
background: #FFFCDD; background: #FFFCDD;
font-size: 0.9em; font-size: 0.9em;
margin-top: 10px; margin-top: 10px;
margin-bottom:5px;
} }
div.capa_reset { div.capa_reset {
...@@ -623,4 +631,31 @@ section.open-ended-child { ...@@ -623,4 +631,31 @@ section.open-ended-child {
font-size: 0.9em; font-size: 0.9em;
} }
.assessment-container {
margin: 40px 0px 30px 0px;
.scoring-container
{
p
{
margin-bottom: 1em;
}
label {
margin: 10px;
padding: 5px;
display: inline-block;
min-width: 50px;
background-color: #CCC;
text-size: 1.5em;
}
input[type=radio]:checked + label {
background: #666;
color: white;
}
input[class='grade-selection'] {
display: none;
}
}
}
} }
...@@ -9,20 +9,34 @@ class @Collapsible ...@@ -9,20 +9,34 @@ class @Collapsible
### ###
el: container el: container
### ###
# standard longform + shortfom pattern
el.find('.longform').hide() el.find('.longform').hide()
el.find('.shortform').append('<a href="#" class="full">See full output</a>') el.find('.shortform').append('<a href="#" class="full">See full output</a>')
# custom longform + shortform text pattern
short_custom = el.find('.shortform-custom')
# set up each one individually
short_custom.each (index, elt) =>
open_text = $(elt).data('open-text')
close_text = $(elt).data('close-text')
$(elt).append("<a href='#' class='full-custom'>"+ open_text + "</a>")
$(elt).find('.full-custom').click (event) => @toggleFull(event, open_text, close_text)
# collapsible pattern
el.find('.collapsible header + section').hide() el.find('.collapsible header + section').hide()
el.find('.full').click @toggleFull
# set up triggers
el.find('.full').click (event) => @toggleFull(event, "See full output", "Hide output")
el.find('.collapsible header a').click @toggleHint el.find('.collapsible header a').click @toggleHint
@toggleFull: (event) => @toggleFull: (event, open_text, close_text) =>
event.preventDefault() event.preventDefault()
$(event.target).parent().siblings().slideToggle() $(event.target).parent().siblings().slideToggle()
$(event.target).parent().parent().toggleClass('open') $(event.target).parent().parent().toggleClass('open')
if $(event.target).text() == 'See full output' if $(event.target).text() == open_text
new_text = 'Hide output' new_text = close_text
else else
new_text = 'See full output' new_text = open_text
$(event.target).text(new_text) $(event.target).text(new_text)
@toggleHint: (event) => @toggleHint: (event) =>
......
...@@ -109,7 +109,8 @@ class @CombinedOpenEnded ...@@ -109,7 +109,8 @@ class @CombinedOpenEnded
@reset_button.hide() @reset_button.hide()
@next_problem_button.hide() @next_problem_button.hide()
@hint_area.attr('disabled', false) @hint_area.attr('disabled', false)
if @child_state == 'done'
@rubric_wrapper.hide()
if @child_type=="openended" if @child_type=="openended"
@skip_button.hide() @skip_button.hide()
if @allow_reset=="True" if @allow_reset=="True"
...@@ -139,6 +140,7 @@ class @CombinedOpenEnded ...@@ -139,6 +140,7 @@ class @CombinedOpenEnded
else else
@submit_button.click @message_post @submit_button.click @message_post
else if @child_state == 'done' else if @child_state == 'done'
@rubric_wrapper.hide()
@answer_area.attr("disabled", true) @answer_area.attr("disabled", true)
@hint_area.attr('disabled', true) @hint_area.attr('disabled', true)
@submit_button.hide() @submit_button.hide()
...@@ -151,7 +153,7 @@ class @CombinedOpenEnded ...@@ -151,7 +153,7 @@ class @CombinedOpenEnded
find_assessment_elements: -> find_assessment_elements: ->
@assessment = @$('select.assessment') @assessment = @$('input[name="grade-selection"]')
find_hint_elements: -> find_hint_elements: ->
@hint_area = @$('textarea.post_assessment') @hint_area = @$('textarea.post_assessment')
...@@ -163,6 +165,7 @@ class @CombinedOpenEnded ...@@ -163,6 +165,7 @@ class @CombinedOpenEnded
$.postWithPrefix "#{@ajax_url}/save_answer", data, (response) => $.postWithPrefix "#{@ajax_url}/save_answer", data, (response) =>
if response.success if response.success
@rubric_wrapper.html(response.rubric_html) @rubric_wrapper.html(response.rubric_html)
@rubric_wrapper.show()
@child_state = 'assessing' @child_state = 'assessing'
@find_assessment_elements() @find_assessment_elements()
@rebind() @rebind()
...@@ -174,7 +177,8 @@ class @CombinedOpenEnded ...@@ -174,7 +177,8 @@ class @CombinedOpenEnded
save_assessment: (event) => save_assessment: (event) =>
event.preventDefault() event.preventDefault()
if @child_state == 'assessing' if @child_state == 'assessing'
data = {'assessment' : @assessment.find(':selected').text()} checked_assessment = @$('input[name="grade-selection"]:checked')
data = {'assessment' : checked_assessment.val()}
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
if response.success if response.success
@child_state = response.state @child_state = response.state
...@@ -183,6 +187,7 @@ class @CombinedOpenEnded ...@@ -183,6 +187,7 @@ class @CombinedOpenEnded
@hint_wrapper.html(response.hint_html) @hint_wrapper.html(response.hint_html)
@find_hint_elements() @find_hint_elements()
else if @child_state == 'done' else if @child_state == 'done'
@rubric_wrapper.hide()
@message_wrapper.html(response.message_html) @message_wrapper.html(response.message_html)
@rebind() @rebind()
......
...@@ -121,6 +121,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -121,6 +121,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'rubric': rubric_string, 'rubric': rubric_string,
'initial_display': self.initial_display, 'initial_display': self.initial_display,
'answer': self.answer, 'answer': self.answer,
'problem_id': self.display_name
}) })
updated_grader_payload = json.dumps(parsed_grader_payload) updated_grader_payload = json.dumps(parsed_grader_payload)
...@@ -381,7 +382,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -381,7 +382,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
rubric_feedback="" rubric_feedback=""
feedback = self._convert_longform_feedback_to_html(response_items) feedback = self._convert_longform_feedback_to_html(response_items)
if response_items['rubric_scores_complete']==True: if response_items['rubric_scores_complete']==True:
rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml'], system) rubric_renderer = CombinedOpenEndedRubric(system, True)
rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
if not response_items['success']: if not response_items['success']:
return system.render_template("open_ended_error.html", return system.render_template("open_ended_error.html",
...@@ -446,8 +448,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -446,8 +448,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'success': score_result['success'], 'success': score_result['success'],
'grader_id': score_result['grader_id'][i], 'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'], 'submission_id': score_result['submission_id'],
'rubric_scores_complete' : score_result['rubric_scores_complete'], 'rubric_scores_complete' : score_result['rubric_scores_complete'][i],
'rubric_xml' : score_result['rubric_xml'], 'rubric_xml' : score_result['rubric_xml'][i],
} }
feedback_items.append(self._format_feedback(new_score_result, system)) feedback_items.append(self._format_feedback(new_score_result, system))
if join_feedback: if join_feedback:
......
...@@ -93,6 +93,7 @@ class OpenEndedChild(object): ...@@ -93,6 +93,7 @@ class OpenEndedChild(object):
self.prompt = static_data['prompt'] self.prompt = static_data['prompt']
self.rubric = static_data['rubric'] self.rubric = static_data['rubric']
self.display_name = static_data['display_name']
# Used for progress / grading. Currently get credit just for # Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect). # completion (doesn't matter if you self-assessed correct/incorrect).
......
...@@ -75,7 +75,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -75,7 +75,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'previous_answer': previous_answer, 'previous_answer': previous_answer,
'ajax_url': system.ajax_url, 'ajax_url': system.ajax_url,
'initial_rubric': self.get_rubric_html(system), 'initial_rubric': self.get_rubric_html(system),
'initial_hint': self.get_hint_html(system), 'initial_hint': "",
'initial_message': self.get_message_html(), 'initial_message': self.get_message_html(),
'state': self.state, 'state': self.state,
'allow_reset': self._allow_reset(), 'allow_reset': self._allow_reset(),
...@@ -122,7 +122,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -122,7 +122,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
if self.state == self.INITIAL: if self.state == self.INITIAL:
return '' return ''
rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric, system) rubric_renderer = CombinedOpenEndedRubric(system, True)
rubric_html = rubric_renderer.render_rubric(self.rubric)
# we'll render it # we'll render it
context = {'rubric': rubric_html, context = {'rubric': rubric_html,
...@@ -235,13 +236,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -235,13 +236,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
d = {'success': True, } d = {'success': True, }
if score == self.max_score():
self.change_state(self.DONE) self.change_state(self.DONE)
d['message_html'] = self.get_message_html() d['message_html'] = self.get_message_html()
d['allow_reset'] = self._allow_reset() d['allow_reset'] = self._allow_reset()
else:
self.change_state(self.POST_ASSESSMENT)
d['hint_html'] = self.get_hint_html(system)
d['state'] = self.state d['state'] = self.state
return d return d
......
...@@ -42,7 +42,8 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -42,7 +42,8 @@ class SelfAssessmentTest(unittest.TestCase):
'max_attempts': 10, 'max_attempts': 10,
'rubric': etree.XML(rubric), 'rubric': etree.XML(rubric),
'prompt': prompt, 'prompt': prompt,
'max_score': 1 'max_score': 1,
'display_name': "Name"
} }
module = SelfAssessmentModule(test_system, self.location, module = SelfAssessmentModule(test_system, self.location,
...@@ -56,8 +57,6 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -56,8 +57,6 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertEqual(module.state, module.ASSESSING) self.assertEqual(module.state, module.ASSESSING)
module.save_assessment({'assessment': '0'}, test_system) module.save_assessment({'assessment': '0'}, test_system)
self.assertEqual(module.state, module.POST_ASSESSMENT)
module.save_hint({'hint': 'this is a hint'}, test_system)
self.assertEqual(module.state, module.DONE) self.assertEqual(module.state, module.DONE)
d = module.reset({}) d = module.reset({})
......
...@@ -11,6 +11,10 @@ from django.http import HttpResponse, Http404 ...@@ -11,6 +11,10 @@ from django.http import HttpResponse, Http404
from courseware.access import has_access from courseware.access import has_access
from util.json_request import expect_json from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
from lxml import etree
from mitxmako.shortcuts import render_to_string
from xmodule.x_module import ModuleSystem
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -27,6 +31,7 @@ class GradingService(object): ...@@ -27,6 +31,7 @@ class GradingService(object):
self.url = config['url'] self.url = config['url']
self.login_url = self.url + '/login/' self.login_url = self.url + '/login/'
self.session = requests.session() self.session = requests.session()
self.system = ModuleSystem(None, None, None, render_to_string, None)
def _login(self): def _login(self):
""" """
...@@ -98,3 +103,33 @@ class GradingService(object): ...@@ -98,3 +103,33 @@ class GradingService(object):
return response return response
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
response_json = json.loads(response)
if 'rubric' in response_json:
rubric = response_json['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, False)
rubric_html = rubric_renderer.render_rubric(rubric)
response_json['rubric'] = rubric_html
return response_json
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError, RubricParsingError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
...@@ -20,7 +20,9 @@ from grading_service import GradingServiceError ...@@ -20,7 +20,9 @@ from grading_service import GradingServiceError
from courseware.access import has_access from courseware.access import has_access
from util.json_request import expect_json from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric
from student.models import unique_id_for_user from student.models import unique_id_for_user
from lxml import etree
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -84,15 +86,17 @@ class PeerGradingService(GradingService): ...@@ -84,15 +86,17 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id): def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url, response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id}) {'location': problem_location, 'grader_id': grader_id})
return response return json.dumps(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key): def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores):
data = {'grader_id' : grader_id, data = {'grader_id' : grader_id,
'submission_id' : submission_id, 'submission_id' : submission_id,
'score' : score, 'score' : score,
'feedback' : feedback, 'feedback' : feedback,
'submission_key': submission_key, 'submission_key': submission_key,
'location': location} 'location': location,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_grade_url, data) return self.post(self.save_grade_url, data)
def is_student_calibrated(self, problem_location, grader_id): def is_student_calibrated(self, problem_location, grader_id):
...@@ -101,15 +105,19 @@ class PeerGradingService(GradingService): ...@@ -101,15 +105,19 @@ class PeerGradingService(GradingService):
def show_calibration_essay(self, problem_location, grader_id): def show_calibration_essay(self, problem_location, grader_id):
params = {'problem_id' : problem_location, 'student_id': grader_id} params = {'problem_id' : problem_location, 'student_id': grader_id}
return self.get(self.show_calibration_essay_url, params) response = self.get(self.show_calibration_essay_url, params)
return json.dumps(self._render_rubric(response))
def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, feedback): def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key,
score, feedback, rubric_scores):
data = {'location': problem_location, data = {'location': problem_location,
'student_id': grader_id, 'student_id': grader_id,
'calibration_essay_id': calibration_essay_id, 'calibration_essay_id': calibration_essay_id,
'submission_key': submission_key, 'submission_key': submission_key,
'score': score, 'score': score,
'feedback': feedback} 'feedback': feedback,
'rubric_scores[]': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_calibration_essay_url, data) return self.post(self.save_calibration_essay_url, data)
def get_problem_list(self, course_id, grader_id): def get_problem_list(self, course_id, grader_id):
...@@ -196,7 +204,7 @@ def get_next_submission(request, course_id): ...@@ -196,7 +204,7 @@ def get_next_submission(request, course_id):
mimetype="application/json") mimetype="application/json")
except GradingServiceError: except GradingServiceError:
log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}"
.format(staff_grading_service().url, location, grader_id)) .format(peer_grading_service().url, location, grader_id))
return json.dumps({'success': False, return json.dumps({'success': False,
'error': 'Could not connect to grading service'}) 'error': 'Could not connect to grading service'})
...@@ -216,7 +224,7 @@ def save_grade(request, course_id): ...@@ -216,7 +224,7 @@ def save_grade(request, course_id):
error: if there was an error in the submission, this is the error message error: if there was an error in the submission, this is the error message
""" """
_check_post(request) _check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback']) required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required) success, message = _check_required(request, required)
if not success: if not success:
return _err_response(message) return _err_response(message)
...@@ -227,14 +235,15 @@ def save_grade(request, course_id): ...@@ -227,14 +235,15 @@ def save_grade(request, course_id):
score = p['score'] score = p['score']
feedback = p['feedback'] feedback = p['feedback']
submission_key = p['submission_key'] submission_key = p['submission_key']
rubric_scores = p.getlist('rubric_scores[]')
try: try:
response = peer_grading_service().save_grade(location, grader_id, submission_id, response = peer_grading_service().save_grade(location, grader_id, submission_id,
score, feedback, submission_key) score, feedback, submission_key, rubric_scores)
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}""" submission_key: {3}, score: {4}"""
.format(staff_grading_service().url, .format(peer_grading_service().url,
location, submission_id, submission_key, score) location, submission_id, submission_key, score)
) )
return json.dumps({'success': False, return json.dumps({'success': False,
...@@ -273,7 +282,7 @@ def is_student_calibrated(request, course_id): ...@@ -273,7 +282,7 @@ def is_student_calibrated(request, course_id):
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}"
.format(staff_grading_service().url, grader_id, location)) .format(peer_grading_service().url, grader_id, location))
return json.dumps({'success': False, return json.dumps({'success': False,
'error': 'Could not connect to grading service'}) 'error': 'Could not connect to grading service'})
...@@ -317,9 +326,15 @@ def show_calibration_essay(request, course_id): ...@@ -317,9 +326,15 @@ def show_calibration_essay(request, course_id):
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
log.exception("Error from grading service. server url: {0}, location: {0}" log.exception("Error from grading service. server url: {0}, location: {0}"
.format(staff_grading_service().url, location)) .format(peer_grading_service().url, location))
return json.dumps({'success': False, return json.dumps({'success': False,
'error': 'Could not connect to grading service'}) 'error': 'Could not connect to grading service'})
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return json.dumps({'success': False,
'error': 'Error displaying submission'})
def save_calibration_essay(request, course_id): def save_calibration_essay(request, course_id):
...@@ -341,7 +356,7 @@ def save_calibration_essay(request, course_id): ...@@ -341,7 +356,7 @@ def save_calibration_essay(request, course_id):
""" """
_check_post(request) _check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback']) required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required) success, message = _check_required(request, required)
if not success: if not success:
return _err_response(message) return _err_response(message)
...@@ -352,9 +367,11 @@ def save_calibration_essay(request, course_id): ...@@ -352,9 +367,11 @@ def save_calibration_essay(request, course_id):
submission_key = p['submission_key'] submission_key = p['submission_key']
score = p['score'] score = p['score']
feedback = p['feedback'] feedback = p['feedback']
rubric_scores = p.getlist('rubric_scores[]')
try: try:
response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id, submission_key, score, feedback) response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id,
submission_key, score, feedback, rubric_scores)
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
......
...@@ -17,6 +17,8 @@ from courseware.access import has_access ...@@ -17,6 +17,8 @@ from courseware.access import has_access
from util.json_request import expect_json from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor from xmodule.course_module import CourseDescriptor
from student.models import unique_id_for_user from student.models import unique_id_for_user
from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -46,14 +48,14 @@ class MockStaffGradingService(object): ...@@ -46,14 +48,14 @@ class MockStaffGradingService(object):
self.cnt += 1 self.cnt += 1
return json.dumps({'success': True, return json.dumps({'success': True,
'problem_list': [ 'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', \ json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}), 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', \ json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10}) 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10})
]}) ]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores):
return self.get_next(course_id, 'fake location', grader_id) return self.get_next(course_id, 'fake location', grader_id)
...@@ -107,12 +109,13 @@ class StaffGradingService(GradingService): ...@@ -107,12 +109,13 @@ class StaffGradingService(GradingService):
Raises: Raises:
GradingServiceError: something went wrong with the connection. GradingServiceError: something went wrong with the connection.
""" """
return self.get(self.get_next_url, response = self.get(self.get_next_url,
params={'location': location, params={'location': location,
'grader_id': grader_id}) 'grader_id': grader_id})
return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores):
""" """
Save a score and feedback for a submission. Save a score and feedback for a submission.
...@@ -129,7 +132,9 @@ class StaffGradingService(GradingService): ...@@ -129,7 +132,9 @@ class StaffGradingService(GradingService):
'score': score, 'score': score,
'feedback': feedback, 'feedback': feedback,
'grader_id': grader_id, 'grader_id': grader_id,
'skipped': skipped} 'skipped': skipped,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_grade_url, data=data) return self.post(self.save_grade_url, data=data)
...@@ -143,6 +148,7 @@ class StaffGradingService(GradingService): ...@@ -143,6 +148,7 @@ class StaffGradingService(GradingService):
# importing this file doesn't create objects that may not have the right config # importing this file doesn't create objects that may not have the right config
_service = None _service = None
def staff_grading_service(): def staff_grading_service():
""" """
Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True, Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
...@@ -286,7 +292,7 @@ def save_grade(request, course_id): ...@@ -286,7 +292,7 @@ def save_grade(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = set(['score', 'feedback', 'submission_id', 'location']) required = set(['score', 'feedback', 'submission_id', 'location', 'rubric_scores[]'])
actual = set(request.POST.keys()) actual = set(request.POST.keys())
missing = required - actual missing = required - actual
if len(missing) > 0: if len(missing) > 0:
...@@ -299,13 +305,15 @@ def save_grade(request, course_id): ...@@ -299,13 +305,15 @@ def save_grade(request, course_id):
location = p['location'] location = p['location']
skipped = 'skipped' in p skipped = 'skipped' in p
try: try:
result_json = staff_grading_service().save_grade(course_id, result_json = staff_grading_service().save_grade(course_id,
grader_id, grader_id,
p['submission_id'], p['submission_id'],
p['score'], p['score'],
p['feedback'], p['feedback'],
skipped) skipped,
p.getlist('rubric_scores[]'))
except GradingServiceError: except GradingServiceError:
log.exception("Error saving grade") log.exception("Error saving grade")
return _err_response('Could not connect to grading service') return _err_response('Could not connect to grading service')
......
...@@ -94,7 +94,8 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -94,7 +94,8 @@ class TestStaffGradingService(ct.PageLoader):
data = {'score': '12', data = {'score': '12',
'feedback': 'great!', 'feedback': 'great!',
'submission_id': '123', 'submission_id': '123',
'location': self.location} 'location': self.location,
'rubric_scores[]': ['1', '2']}
r = self.check_for_post_code(200, url, data) r = self.check_for_post_code(200, url, data)
d = json.loads(r.content) d = json.loads(r.content)
self.assertTrue(d['success'], str(d)) self.assertTrue(d['success'], str(d))
......
...@@ -10,4 +10,18 @@ class PeerGrading ...@@ -10,4 +10,18 @@ class PeerGrading
@message_container = $('.message-container') @message_container = $('.message-container')
@message_container.toggle(not @message_container.is(':empty')) @message_container.toggle(not @message_container.is(':empty'))
@problem_list = $('.problem-list')
@construct_progress_bar()
construct_progress_bar: () =>
problems = @problem_list.find('tr').next()
problems.each( (index, element) =>
problem = $(element)
progress_bar = problem.find('.progress-bar')
bar_value = parseInt(problem.data('graded'))
bar_max = parseInt(problem.data('required')) + bar_value
progress_bar.progressbar({value: bar_value, max: bar_max})
)
$(document).ready(() -> new PeerGrading()) $(document).ready(() -> new PeerGrading())
...@@ -56,13 +56,41 @@ The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for t ...@@ -56,13 +56,41 @@ The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for t
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p> <p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
''' '''
rubric: ''' rubric: '''
<ul> <table class="rubric"><tbody><tr><th>Purpose</th>
<li>Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.</li>
<li>Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.</li>
<li>Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.</li>
</ul>
<p>Please score your response according to how many of the above components you identified:</p> <td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
''' '''
max_score: 4 max_score: 4
else if cmd == 'get_next_submission' else if cmd == 'get_next_submission'
...@@ -82,13 +110,41 @@ Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim q ...@@ -82,13 +110,41 @@ Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim q
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p> <p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
''' '''
rubric: ''' rubric: '''
<ul> <table class="rubric"><tbody><tr><th>Purpose</th>
<li>Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.</li>
<li>Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.</li> <td>
<li>Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.</li> <input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</ul> </td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<p>Please score your response according to how many of the above components you identified:</p> <td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
''' '''
max_score: 4 max_score: 4
else if cmd == 'save_calibration_essay' else if cmd == 'save_calibration_essay'
...@@ -137,7 +193,8 @@ class PeerGradingProblem ...@@ -137,7 +193,8 @@ class PeerGradingProblem
@feedback_area = $('.feedback-area') @feedback_area = $('.feedback-area')
@score_selection_container = $('.score-selection-container') @score_selection_container = $('.score-selection-container')
@score = null @rubric_selection_container = $('.rubric-selection-container')
@grade = null
@calibration = null @calibration = null
@submit_button = $('.submit-button') @submit_button = $('.submit-button')
...@@ -175,9 +232,23 @@ class PeerGradingProblem ...@@ -175,9 +232,23 @@ class PeerGradingProblem
fetch_submission_essay: () => fetch_submission_essay: () =>
@backend.post('get_next_submission', {location: @location}, @render_submission) @backend.post('get_next_submission', {location: @location}, @render_submission)
# finds the scores for each rubric category
get_score_list: () =>
# find the number of categories:
num_categories = $('table.rubric tr').length
score_lst = []
# get the score for each one
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
score_lst.push(score)
return score_lst
construct_data: () -> construct_data: () ->
data = data =
score: @score rubric_scores: @get_score_list()
score: @grade
location: @location location: @location
submission_id: @essay_id_input.val() submission_id: @essay_id_input.val()
submission_key: @submission_key_input.val() submission_key: @submission_key_input.val()
...@@ -244,8 +315,16 @@ class PeerGradingProblem ...@@ -244,8 +315,16 @@ class PeerGradingProblem
# called after a grade is selected on the interface # called after a grade is selected on the interface
graded_callback: (event) => graded_callback: (event) =>
@grading_message.hide() @grade = $("input[name='grade-selection']:checked").val()
@score = event.target.value if @grade == undefined
return
# check to see whether or not any categories have not been scored
num_categories = $('table.rubric tr').length
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
if score == undefined
return
# show button if we have scores for all categories
@show_submit_button() @show_submit_button()
...@@ -322,7 +401,7 @@ class PeerGradingProblem ...@@ -322,7 +401,7 @@ class PeerGradingProblem
@submission_container.append(@make_paragraphs(response.student_response)) @submission_container.append(@make_paragraphs(response.student_response))
@prompt_container.html(response.prompt) @prompt_container.html(response.prompt)
@rubric_container.html(response.rubric) @rubric_selection_container.html(response.rubric)
@submission_key_input.val(response.submission_key) @submission_key_input.val(response.submission_key)
@essay_id_input.val(response.submission_id) @essay_id_input.val(response.submission_id)
@setup_score_selection(response.max_score) @setup_score_selection(response.max_score)
...@@ -336,10 +415,10 @@ class PeerGradingProblem ...@@ -336,10 +415,10 @@ class PeerGradingProblem
# display correct grade # display correct grade
@calibration_feedback_panel.slideDown() @calibration_feedback_panel.slideDown()
calibration_wrapper = $('.calibration-feedback-wrapper') calibration_wrapper = $('.calibration-feedback-wrapper')
calibration_wrapper.html("<p>The score you gave was: #{@score}. The actual score is: #{response.actual_score}</p>") calibration_wrapper.html("<p>The score you gave was: #{@grade}. The actual score is: #{response.actual_score}</p>")
score = parseInt(@score) score = parseInt(@grade)
actual_score = parseInt(response.actual_score) actual_score = parseInt(response.actual_score)
if score == actual_score if score == actual_score
...@@ -366,8 +445,12 @@ class PeerGradingProblem ...@@ -366,8 +445,12 @@ class PeerGradingProblem
@submit_button.show() @submit_button.show()
setup_score_selection: (max_score) => setup_score_selection: (max_score) =>
# first, get rid of all the old inputs, if any. # first, get rid of all the old inputs, if any.
@score_selection_container.html('Choose score: ') @score_selection_container.html("""
<h3>Overall Score</h3>
<p>Choose an overall score for this submission.</p>
""")
# Now create new labels and inputs for each possible score. # Now create new labels and inputs for each possible score.
for score in [0..max_score] for score in [0..max_score]
...@@ -375,12 +458,13 @@ class PeerGradingProblem ...@@ -375,12 +458,13 @@ class PeerGradingProblem
label = """<label for="#{id}">#{score}</label>""" label = """<label for="#{id}">#{score}</label>"""
input = """ input = """
<input type="radio" name="score-selection" id="#{id}" value="#{score}"/> <input type="radio" name="grade-selection" id="#{id}" value="#{score}"/>
""" # " fix broken parsing in emacs """ # " fix broken parsing in emacs
@score_selection_container.append(input + label) @score_selection_container.append(input + label)
# And now hook up an event handler again # And now hook up an event handler again
$("input[name='score-selection']").change @graded_callback $("input[name='score-selection']").change @graded_callback
$("input[name='grade-selection']").change @graded_callback
......
...@@ -42,14 +42,41 @@ class StaffGradingBackend ...@@ -42,14 +42,41 @@ class StaffGradingBackend
The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
''' '''
rubric: ''' rubric: '''
<ul> <table class="rubric"><tbody><tr><th>Purpose</th>
<li>Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.</li>
<li>Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.</li>
<li>Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.</li>
</ul>
<p>Please score your response according to how many of the above components you identified:</p> <td>
''' <input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>'''
submission_id: @mock_cnt submission_id: @mock_cnt
max_score: 2 + @mock_cnt % 3 max_score: 2 + @mock_cnt % 3
ml_error_info : 'ML accuracy info: ' + @mock_cnt ml_error_info : 'ML accuracy info: ' + @mock_cnt
...@@ -134,12 +161,11 @@ class StaffGrading ...@@ -134,12 +161,11 @@ class StaffGrading
@submission_container = $('.submission-container') @submission_container = $('.submission-container')
@submission_wrapper = $('.submission-wrapper') @submission_wrapper = $('.submission-wrapper')
@rubric_container = $('.rubric-container')
@rubric_wrapper = $('.rubric-wrapper')
@grading_wrapper = $('.grading-wrapper') @grading_wrapper = $('.grading-wrapper')
@feedback_area = $('.feedback-area') @feedback_area = $('.feedback-area')
@score_selection_container = $('.score-selection-container') @score_selection_container = $('.score-selection-container')
@grade_selection_container = $('.grade-selection-container')
@submit_button = $('.submit-button') @submit_button = $('.submit-button')
@action_button = $('.action-button') @action_button = $('.action-button')
...@@ -166,8 +192,9 @@ class StaffGrading ...@@ -166,8 +192,9 @@ class StaffGrading
@min_for_ml = 0 @min_for_ml = 0
@num_graded = 0 @num_graded = 0
@num_pending = 0 @num_pending = 0
@score_lst = []
@grade = null
@score = null
@problems = null @problems = null
# action handlers # action handlers
...@@ -182,30 +209,53 @@ class StaffGrading ...@@ -182,30 +209,53 @@ class StaffGrading
setup_score_selection: => setup_score_selection: =>
# first, get rid of all the old inputs, if any. # first, get rid of all the old inputs, if any.
@score_selection_container.html('Choose score: ') @grade_selection_container.html("""
<h3>Overall Score</h3>
<p>Choose an overall score for this submission.</p>
""")
# Now create new labels and inputs for each possible score. # Now create new labels and inputs for each possible score.
for score in [0..@max_score] for score in [0..@max_score]
id = 'score-' + score id = 'score-' + score
label = """<label for="#{id}">#{score}</label>""" label = """<label for="#{id}">#{score}</label>"""
input = """ input = """
<input type="radio" name="score-selection" id="#{id}" value="#{score}"/> <input type="radio" class="grade-selection" name="grade-selection" id="#{id}" value="#{score}"/>
""" # " fix broken parsing in emacs """ # " fix broken parsing in emacs
@score_selection_container.append(input + label) @grade_selection_container.append(input + label)
$('.grade-selection').click => @graded_callback()
# And now hook up an event handler again
$("input[name='score-selection']").change @graded_callback @score_selection_container.html(@rubric)
$('.score-selection').click => @graded_callback()
graded_callback: () =>
@grade = $("input[name='grade-selection']:checked").val()
if @grade == undefined
return
# check to see whether or not any categories have not been scored
num_categories = $('table.rubric tr').length
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
if score == undefined
return
# show button if we have scores for all categories
@state = state_graded
@submit_button.show()
set_button_text: (text) => set_button_text: (text) =>
@action_button.attr('value', text) @action_button.attr('value', text)
graded_callback: (event) => # finds the scores for each rubric category
@score = event.target.value get_score_list: () =>
@state = state_graded # find the number of categories:
@message = '' num_categories = $('table.rubric tr').length
@render_view()
score_lst = []
# get the score for each one
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
score_lst.push(score)
return score_lst
ajax_callback: (response) => ajax_callback: (response) =>
# always clear out errors and messages on transition. # always clear out errors and messages on transition.
...@@ -231,7 +281,8 @@ class StaffGrading ...@@ -231,7 +281,8 @@ class StaffGrading
skip_and_get_next: () => skip_and_get_next: () =>
data = data =
score: @score score: @grade
rubric_scores: @get_score_list()
feedback: @feedback_area.val() feedback: @feedback_area.val()
submission_id: @submission_id submission_id: @submission_id
location: @location location: @location
...@@ -244,7 +295,8 @@ class StaffGrading ...@@ -244,7 +295,8 @@ class StaffGrading
submit_and_get_next: () -> submit_and_get_next: () ->
data = data =
score: @score score: @grade
rubric_scores: @get_score_list()
feedback: @feedback_area.val() feedback: @feedback_area.val()
submission_id: @submission_id submission_id: @submission_id
location: @location location: @location
...@@ -261,8 +313,8 @@ class StaffGrading ...@@ -261,8 +313,8 @@ class StaffGrading
@rubric = response.rubric @rubric = response.rubric
@submission_id = response.submission_id @submission_id = response.submission_id
@feedback_area.val('') @feedback_area.val('')
@grade = null
@max_score = response.max_score @max_score = response.max_score
@score = null
@ml_error_info=response.ml_error_info @ml_error_info=response.ml_error_info
@prompt_name = response.problem_name @prompt_name = response.problem_name
@num_graded = response.num_graded @num_graded = response.num_graded
...@@ -282,14 +334,21 @@ class StaffGrading ...@@ -282,14 +334,21 @@ class StaffGrading
@ml_error_info = null @ml_error_info = null
@submission_id = null @submission_id = null
@message = message @message = message
@score = null @grade = null
@max_score = 0 @max_score = 0
@state = state_no_data @state = state_no_data
render_view: () -> render_view: () ->
# clear the problem list and breadcrumbs # clear the problem list and breadcrumbs
@problem_list.html('') @problem_list.html('''
<tr>
<th>Problem Name</th>
<th>Graded</th>
<th>Available to Grade</th>
<th>Required</th>
<th>Progress</th>
</tr>
''')
@breadcrumbs.html('') @breadcrumbs.html('')
@problem_list_container.toggle(@list_view) @problem_list_container.toggle(@list_view)
if @backend.mock_backend if @backend.mock_backend
...@@ -306,7 +365,6 @@ class StaffGrading ...@@ -306,7 +365,6 @@ class StaffGrading
@state == state_no_data) @state == state_no_data)
@prompt_wrapper.toggle(show_grading_elements) @prompt_wrapper.toggle(show_grading_elements)
@submission_wrapper.toggle(show_grading_elements) @submission_wrapper.toggle(show_grading_elements)
@rubric_wrapper.toggle(show_grading_elements)
@grading_wrapper.toggle(show_grading_elements) @grading_wrapper.toggle(show_grading_elements)
@meta_info_wrapper.toggle(show_grading_elements) @meta_info_wrapper.toggle(show_grading_elements)
@action_button.hide() @action_button.hide()
...@@ -318,7 +376,7 @@ class StaffGrading ...@@ -318,7 +376,7 @@ class StaffGrading
problem_link:(problem) -> problem_link:(problem) ->
link = $('<a>').attr('href', "javascript:void(0)").append( link = $('<a>').attr('href', "javascript:void(0)").append(
"#{problem.problem_name} (#{problem.num_graded} graded, #{problem.num_pending} pending, required to grade #{problem.num_required} more)") "#{problem.problem_name}")
.click => .click =>
@get_next_submission problem.location @get_next_submission problem.location
...@@ -331,7 +389,17 @@ class StaffGrading ...@@ -331,7 +389,17 @@ class StaffGrading
render_list: () -> render_list: () ->
for problem in @problems for problem in @problems
@problem_list.append($('<li>').append(@problem_link(problem))) problem_row = $('<tr>')
problem_row.append($('<td class="problem-name">').append(@problem_link(problem)))
problem_row.append($('<td>').append("#{problem.num_graded}"))
problem_row.append($('<td>').append("#{problem.num_pending}"))
problem_row.append($('<td>').append("#{problem.num_required}"))
row_progress_bar = $('<div>').addClass('progress-bar')
progress_value = parseInt(problem.num_graded)
progress_max = parseInt(problem.num_required) + progress_value
row_progress_bar.progressbar({value: progress_value, max: progress_max})
problem_row.append($('<td>').append(row_progress_bar))
@problem_list.append(problem_row)
render_problem: () -> render_problem: () ->
# make the view elements match the state. Idempotent. # make the view elements match the state. Idempotent.
...@@ -353,7 +421,7 @@ class StaffGrading ...@@ -353,7 +421,7 @@ class StaffGrading
else if @state == state_grading else if @state == state_grading
@ml_error_info_container.html(@ml_error_info) @ml_error_info_container.html(@ml_error_info)
meta_list = $("<ul>") meta_list = $("<ul>")
meta_list.append("<li><span class='meta-info'>Pending - </span> #{@num_pending}</li>") meta_list.append("<li><span class='meta-info'>Available - </span> #{@num_pending}</li>")
meta_list.append("<li><span class='meta-info'>Graded - </span> #{@num_graded}</li>") meta_list.append("<li><span class='meta-info'>Graded - </span> #{@num_graded}</li>")
meta_list.append("<li><span class='meta-info'>Needed for ML - </span> #{Math.max(@min_for_ml - @num_graded, 0)}</li>") meta_list.append("<li><span class='meta-info'>Needed for ML - </span> #{Math.max(@min_for_ml - @num_graded, 0)}</li>")
@problem_meta_info.html(meta_list) @problem_meta_info.html(meta_list)
...@@ -361,8 +429,6 @@ class StaffGrading ...@@ -361,8 +429,6 @@ class StaffGrading
@prompt_container.html(@prompt) @prompt_container.html(@prompt)
@prompt_name_container.html("#{@prompt_name}") @prompt_name_container.html("#{@prompt_name}")
@submission_container.html(@make_paragraphs(@submission)) @submission_container.html(@make_paragraphs(@submission))
@rubric_container.html(@rubric)
# no submit button until user picks grade. # no submit button until user picks grade.
show_submit_button = false show_submit_button = false
show_action_button = false show_action_button = false
......
...@@ -24,15 +24,33 @@ div.peer-grading{ ...@@ -24,15 +24,33 @@ div.peer-grading{
color: white; color: white;
} }
input[name='score-selection'] { input[name='score-selection'],
input[name='grade-selection'] {
display: none; display: none;
} }
ul .problem-list
{ {
li text-align: center;
table-layout: auto;
width:100%;
th
{
padding: 10px;
}
td
{
padding:10px;
}
td.problem-name
{
text-align:left;
}
.ui-progressbar
{ {
margin: 16px 0px; height:1em;
margin:0px;
padding:0px;
} }
} }
...@@ -106,6 +124,7 @@ div.peer-grading{ ...@@ -106,6 +124,7 @@ div.peer-grading{
margin: 0px; margin: 0px;
background: #eee; background: #eee;
height: 10em; height: 10em;
width:47.6%;
h3 h3
{ {
text-align:center; text-align:center;
...@@ -120,12 +139,10 @@ div.peer-grading{ ...@@ -120,12 +139,10 @@ div.peer-grading{
.calibration-panel .calibration-panel
{ {
float:left; float:left;
width:48%;
} }
.grading-panel .grading-panel
{ {
float:right; float:right;
width: 48%;
} }
.current-state .current-state
{ {
...@@ -159,5 +176,49 @@ div.peer-grading{ ...@@ -159,5 +176,49 @@ div.peer-grading{
} }
} }
padding: 40px; padding: 40px;
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px 25px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:2px;
position: relative;
padding: 15px 15px 25px 15px;
width: 150px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: .9em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
} }
<section id="combined-open-ended" class="combined-open-ended" data-ajax-url="${ajax_url}" data-allow_reset="${allow_reset}" data-state="${state}" data-task-count="${task_count}" data-task-number="${task_number}"> <section id="combined-open-ended" class="combined-open-ended" data-ajax-url="${ajax_url}" data-allow_reset="${allow_reset}" data-state="${state}" data-task-count="${task_count}" data-task-number="${task_number}">
<h2>${display_name}</h2>
<div class="status-container"> <div class="status-container">
<h4>Status</h4><br/>
${status | n} ${status | n}
</div> </div>
<div class="item-container"> <div class="item-container">
<h4>Problem</h4><br/> <h4>Problem</h4>
<div class="problem-container">
% for item in items: % for item in items:
<div class="item">${item['content'] | n}</div> <div class="item">${item['content'] | n}</div>
% endfor % endfor
</div>
<input type="button" value="Reset" class="reset-button" name="reset"/> <input type="button" value="Reset" class="reset-button" name="reset"/>
<input type="button" value="Next Step" class="next-step-button" name="reset"/> <input type="button" value="Next Step" class="next-step-button" name="reset"/>
</div> </div>
<a name="results" />
<div class="result-container"> <div class="result-container">
</div> </div>
</section> </section>
......
<div class="result-container"> <div class="result-container">
<h4>Results from Step ${task_number}</h4><br/> <h4>Results from Step ${task_number}</h4>
${results | n} ${results | n}
</div> </div>
%if status_list[0]['state'] != 'initial':
<h4>Status</h4>
<div class="status-elements">
<section id="combined-open-ended-status" class="combined-open-ended-status"> <section id="combined-open-ended-status" class="combined-open-ended-status">
%for i in xrange(0,len(status_list)): %for i in xrange(0,len(status_list)):
<%status=status_list[i]%> <%status=status_list[i]%>
%if i==len(status_list)-1: %if i==len(status_list)-1:
<div class="statusitem-current" data-status-number="${i}"> <div class="statusitem statusitem-current" data-status-number="${i}">
%else: %else:
<div class="statusitem" data-status-number="${i}"> <div class="statusitem" data-status-number="${i}">
%endif %endif
...@@ -20,9 +23,12 @@ ...@@ -20,9 +23,12 @@
%if status['type']=="openended" and status['state'] in ['done', 'post_assessment']: %if status['type']=="openended" and status['state'] in ['done', 'post_assessment']:
<div class="show-results"> <div class="show-results">
<a href="#" class="show-results-button">Show results from step ${status['task_number']}</a> <a href="#results" class="show-results-button">Show results from Step ${status['task_number']}</a>
</div> </div>
%endif %endif
</div> </div>
%endfor %endfor
</section> </section>
</div>
%endif
...@@ -33,8 +33,8 @@ ...@@ -33,8 +33,8 @@
</div> </div>
<h2>Problem List</h2> <h2>Problem List</h2>
<ul class="problem-list"> <table class="problem-list">
</ul> </table>
</section> </section>
<!-- Grading View --> <!-- Grading View -->
...@@ -54,11 +54,6 @@ ...@@ -54,11 +54,6 @@
<div class="prompt-container"> <div class="prompt-container">
</div> </div>
</div> </div>
<div class="rubric-wrapper">
<h3>Grading Rubric</h3>
<div class="rubric-container">
</div>
</div>
</section> </section>
...@@ -78,6 +73,8 @@ ...@@ -78,6 +73,8 @@
<div class="evaluation"> <div class="evaluation">
<p class="score-selection-container"> <p class="score-selection-container">
</p> </p>
<p class="grade-selection-container">
</p>
<textarea name="feedback" placeholder="Feedback for student (optional)" <textarea name="feedback" placeholder="Feedback for student (optional)"
class="feedback-area" cols="70" ></textarea> class="feedback-area" cols="70" ></textarea>
</div> </div>
......
...@@ -10,11 +10,11 @@ ...@@ -10,11 +10,11 @@
% if state == 'initial': % if state == 'initial':
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span> <span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
% elif state in ['done', 'post_assessment'] and correct == 'correct': % elif state in ['done', 'post_assessment'] and correct == 'correct':
<span class="correct" id="status_${id}">Correct</span> <span class="correct" id="status_${id}"></span> <p>Correct</p>
% elif state in ['done', 'post_assessment'] and correct == 'incorrect': % elif state in ['done', 'post_assessment'] and correct == 'incorrect':
<span class="incorrect" id="status_${id}">Incorrect</span> <span class="incorrect" id="status_${id}"></span> <p>Incorrect. </p>
% elif state == 'assessing': % elif state == 'assessing':
<span class="grading" id="status_${id}">Submitted for grading</span> <span class="grading" id="status_${id}">Submitted for grading.</span>
% endif % endif
% if hidden: % if hidden:
......
<section> <section>
<header>Feedback</header> <header>Feedback</header>
<div class="shortform"> <div class="shortform-custom" data-open-text='Show detailed results' data-close-text='Hide detailed results'>
<div class="result-output"> <div class="result-output">
<p>Score: ${score}</p> <p>Score: ${score}</p>
% if grader_type == "ML": % if grader_type == "ML":
......
<table class="rubric"> <form class="rubric-template" id="inputtype_${id}">
% for i in range(len(rubric_categories)): <h3>Rubric</h3>
<% category = rubric_categories[i] %> % if view_only and has_score:
<tr> <p>This is the rubric that was used to grade your submission. The highlighted selection matches how the grader feels you performed in each category.</p>
<th> % elif view_only:
${category['description']} <p>Use the below rubric to rate this submission.</p>
% if category['has_score'] == True: % else:
(Your score: ${category['score']}) <p>Select the criteria you feel best represents this submission in each category.</p>
% endif % endif
</th> <table class="rubric">
% for i in range(len(categories)):
<% category = categories[i] %>
<tr>
<th>${category['description']}</th>
% for j in range(len(category['options'])): % for j in range(len(category['options'])):
<% option = category['options'][j] %> <% option = category['options'][j] %>
<td> <td>
% if view_only:
## if this is the selected rubric block, show it highlighted
% if option['selected']:
<div class="view-only selected-grade">
% else:
<div class="view-only"> <div class="view-only">
${option['text']}
% if option.has_key('selected'):
% if option['selected'] == True:
<div class="selected-grade">[${option['points']} points]</div>
%else:
<div class="grade">[${option['points']} points]</div>
% endif % endif
% else: ${option['text']}
<div class="grade">[${option['points']} points]</div> <div class="grade">[${option['points']} points]</div>
%endif
</div> </div>
% else:
<input type="radio" class="score-selection" name="score-selection-${i}" id="score-${i}-${j}" value="${option['points']}"/>
<label for="score-${i}-${j}">${option['text']}</label>
% endif
</td> </td>
% endfor % endfor
</tr> </tr>
% endfor % endfor
</table> </table>
\ No newline at end of file </form>
...@@ -26,13 +26,37 @@ ...@@ -26,13 +26,37 @@
Nothing to grade! Nothing to grade!
</div> </div>
%else: %else:
<ul class="problem-list"> <div class="problem-list-container">
<table class="problem-list">
<tr>
<th>Problem Name</th>
<th>Graded</th>
<th>Available</th>
<th>Required</th>
<th>Progress</th>
</tr>
%for problem in problem_list: %for problem in problem_list:
<li> <tr data-graded="${problem['num_graded']}" data-required="${problem['num_required']}">
<a href="${ajax_url}problem?location=${problem['location']}">${problem['problem_name']} (${problem['num_graded']} graded, ${problem['num_pending']} pending, required to grade ${problem['num_required']} more)</a> <td class="problem-name">
</li> <a href="${ajax_url}problem?location=${problem['location']}">${problem['problem_name']}</a>
</td>
<td>
${problem['num_graded']}
</td>
<td>
${problem['num_pending']}
</td>
<td>
${problem['num_required']}
</td>
<td>
<div class="progress-bar">
</div>
</td>
</tr>
%endfor %endfor
</ul> </table>
</div>
%endif %endif
%endif %endif
</div> </div>
......
...@@ -44,20 +44,13 @@ ...@@ -44,20 +44,13 @@
</div> </div>
<div class="prompt-wrapper"> <div class="prompt-wrapper">
<div class="prompt-information-container collapsible"> <h2>Question</h2>
<header><a href="javascript:void(0)">Question</a></header> <div class="prompt-information-container">
<section> <section>
<div class="prompt-container"> <div class="prompt-container">
</div> </div>
</section> </section>
</div> </div>
<div class="rubric-wrapper collapsible">
<header><a href="javascript:void(0)">Rubric</a></header>
<section>
<div class="rubric-container">
</div>
</section>
</div>
</div> </div>
...@@ -74,6 +67,7 @@ ...@@ -74,6 +67,7 @@
<input type="hidden" name="essay-id" value="" /> <input type="hidden" name="essay-id" value="" />
</div> </div>
<div class="evaluation"> <div class="evaluation">
<p class="rubric-selection-container"></p>
<p class="score-selection-container"> <p class="score-selection-container">
</p> </p>
<textarea name="feedback" placeholder="Feedback for student (optional)" <textarea name="feedback" placeholder="Feedback for student (optional)"
......
<div class="assessment"> <div class="assessment-container">
<div class="rubric"> <div class="rubric">
<h3>Self-assess your answer with this rubric:</h3>
${rubric | n } ${rubric | n }
</div> </div>
% if not read_only: % if not read_only:
<select name="assessment" class="assessment"> <div class="scoring-container">
<h3>Scoring</h3>
<p>Please select a score below:</p>
<div class="grade-selection">
%for i in xrange(0,max_score+1): %for i in xrange(0,max_score+1):
<option value="${i}">${i}</option> <% id = "score-{0}".format(i) %>
<input type="radio" class="grade-selection" name="grade-selection" value="${i}" id="${id}">
<label for="${id}">${i}</label>
%endfor %endfor
</select> </div>
</div>
% endif % endif
</div> </div>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment