Commit 40d59faa by David Ormsbee

Merge pull request #1330 from MITx/diana/open-ended-ui-updates

Rubric Integration and other UI improvements
parents 0b9262a4 1d4b674c
......@@ -21,6 +21,8 @@ from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
import self_assessment_module
import open_ended_module
from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
from .stringify import stringify_children
log = logging.getLogger("mitx.courseware")
......@@ -138,12 +140,19 @@ class CombinedOpenEndedModule(XModule):
# completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = int(self.metadata.get('max_score', MAX_SCORE))
rubric_renderer = CombinedOpenEndedRubric(system, True)
try:
rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric']))
except RubricParsingError:
log.error("Failed to parse rubric in location: {1}".format(location))
raise
#Static data is passed to the child modules to render
self.static_data = {
'max_score': self._max_score,
'max_attempts': self.max_attempts,
'prompt': definition['prompt'],
'rubric': definition['rubric']
'rubric': definition['rubric'],
'display_name': self.display_name
}
self.task_xml = definition['task_xml']
......@@ -295,6 +304,7 @@ class CombinedOpenEndedModule(XModule):
'task_count': len(self.task_xml),
'task_number': self.current_task_number + 1,
'status': self.get_status(),
'display_name': self.display_name
}
return context
......
......@@ -3,20 +3,37 @@ from lxml import etree
log=logging.getLogger(__name__)
class RubricParsingError(Exception):
pass
class CombinedOpenEndedRubric(object):
@staticmethod
def render_rubric(rubric_xml, system):
def __init__ (self, system, view_only = False):
self.has_score = False
self.view_only = view_only
self.system = system
def render_rubric(self, rubric_xml):
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
Input:
rubric_xml: an string that has not been parsed into xml that
represents this particular rubric
Output:
html: the html that corresponds to the xml given
'''
try:
rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml)
html = system.render_template('open_ended_rubric.html', {'rubric_categories' : rubric_categories})
rubric_categories = self.extract_categories(rubric_xml)
html = self.system.render_template('open_ended_rubric.html',
{'categories' : rubric_categories,
'has_score': self.has_score,
'view_only': self.view_only})
except:
log.exception("Could not parse the rubric.")
html = rubric_xml
raise RubricParsingError("[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml))
return html
@staticmethod
def extract_rubric_categories(element):
def extract_categories(self, element):
'''
Contstruct a list of categories such that the structure looks like:
[ { category: "Category 1 Name",
......@@ -28,17 +45,18 @@ class CombinedOpenEndedRubric(object):
{text: "Option 3 Name", points: 2]}]
'''
if isinstance(element, basestring):
element = etree.fromstring(element)
categories = []
for category in element:
if category.tag != 'category':
raise Exception("[capa.inputtypes.extract_categories] Expected a <category> tag: got {0} instead".format(category.tag))
raise RubricParsingError("[extract_categories] Expected a <category> tag: got {0} instead".format(category.tag))
else:
categories.append(CombinedOpenEndedRubric.extract_category(category))
categories.append(self.extract_category(category))
return categories
@staticmethod
def extract_category(category):
def extract_category(self, category):
'''
construct an individual category
{category: "Category 1 Name",
......@@ -47,42 +65,33 @@ class CombinedOpenEndedRubric(object):
all sorting and auto-point generation occurs in this function
'''
has_score=False
descriptionxml = category[0]
scorexml = category[1]
if scorexml.tag == "option":
optionsxml = category[1:]
else:
scorexml = category[1]
score = None
if scorexml.tag == 'score':
score_text = scorexml.text
optionsxml = category[2:]
has_score=True
score = int(score_text)
self.has_score = True
# if we are missing the score tag and we are expecting one
elif self.has_score:
raise RubricParsingError("[extract_category] Category {0} is missing a score".format(descriptionxml.text))
# parse description
if descriptionxml.tag != 'description':
raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag))
if has_score:
if scorexml.tag != 'score':
raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag))
for option in optionsxml:
if option.tag != "option":
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag))
raise RubricParsingError("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag))
description = descriptionxml.text
if has_score:
score = int(scorexml.text)
else:
score = 0
cur_points = 0
options = []
autonumbering = True
# parse options
for option in optionsxml:
if option.tag != 'option':
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag))
raise RubricParsingError("[extract_category]: expected option tag, got {0} instead".format(option.tag))
else:
pointstr = option.get("points")
if pointstr:
......@@ -91,25 +100,24 @@ class CombinedOpenEndedRubric(object):
try:
points = int(pointstr)
except ValueError:
raise Exception("[extract_category]: expected points to have int, got {0} instead".format(pointstr))
raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead".format(pointstr))
elif autonumbering:
# use the generated one if we're in the right mode
points = cur_points
cur_points = cur_points + 1
else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly dfined.")
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points
optiontext = option.text
selected = False
if has_score:
if points == score:
selected = True
options.append({'text': option.text, 'points': points, 'selected' : selected})
options.append({'text': option.text, 'points': points, 'selected': selected})
# sort and check for duplicates
options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score, 'has_score' : has_score}
return {'description': description, 'options': options}
@staticmethod
def validate_options(options):
......@@ -117,12 +125,12 @@ class CombinedOpenEndedRubric(object):
Validates a set of options. This can and should be extended to filter out other bad edge cases
'''
if len(options) == 0:
raise Exception("[extract_category]: no options associated with this category")
raise RubricParsingError("[extract_category]: no options associated with this category")
if len(options) == 1:
return
prev = options[0]['points']
for option in options[1:]:
if prev == option['points']:
raise Exception("[extract_category]: found duplicate point values between two different options")
raise RubricParsingError("[extract_category]: found duplicate point values between two different options")
else:
prev = option['points']
......@@ -20,6 +20,7 @@ h2 {
color: darken($error-red, 10%);
}
section.problem {
@media print {
display: block;
......@@ -756,4 +757,49 @@ section.problem {
}
}
}
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px;
margin: 10px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:3px;
position: relative;
padding: 15px;
width: 150px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: .9em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
......@@ -37,9 +37,13 @@ section.combined-open-ended {
.result-container
{
float:left;
width: 93%;
width: 100%;
position:relative;
}
h4
{
margin-bottom:10px;
}
}
section.combined-open-ended-status {
......@@ -49,15 +53,19 @@ section.combined-open-ended-status {
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
padding: 10px;
.show-results {
margin-top: .3em;
text-align:right;
}
.show-results-button {
font: 1em monospace;
}
}
.statusitem-current {
background-color: #BEBEBE;
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
background-color: #d4d4d4;
color: #222;
}
span {
......@@ -93,6 +101,7 @@ section.combined-open-ended-status {
div.result-container {
.evaluation {
p {
margin-bottom: 1px;
}
......@@ -104,6 +113,7 @@ div.result-container {
}
.evaluation-response {
margin-bottom: 10px;
header {
text-align: right;
a {
......@@ -134,6 +144,7 @@ div.result-container {
}
.external-grader-message {
margin-bottom: 5px;
section {
padding-left: 20px;
background-color: #FAFAFA;
......@@ -141,6 +152,7 @@ div.result-container {
font-family: monospace;
font-size: 1em;
padding-top: 10px;
padding-bottom:30px;
header {
font-size: 1.4em;
}
......@@ -221,12 +233,13 @@ div.result-container {
div.result-container, section.open-ended-child {
.rubric {
margin-bottom:25px;
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px;
padding: 20px 0px 25px 0px;
margin: 10px 0px;
height: 100%;
}
......@@ -236,16 +249,16 @@ div.result-container, section.open-ended-child {
}
label,
.view-only {
margin:10px;
margin:2px;
position: relative;
padding: 15px;
width: 200px;
padding: 10px 15px 25px 15px;
width: 145px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: 1em;
font-size: .85em;
}
.grade {
position: absolute;
......@@ -257,12 +270,6 @@ div.result-container, section.open-ended-child {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
......@@ -461,7 +468,6 @@ section.open-ended-child {
p {
line-height: 20px;
text-transform: capitalize;
margin-bottom: 0;
float: left;
}
......@@ -598,13 +604,15 @@ section.open-ended-child {
}
}
div.open-ended-alert {
div.open-ended-alert,
.save_message {
padding: 8px 12px;
border: 1px solid #EBE8BF;
border-radius: 3px;
background: #FFFCDD;
font-size: 0.9em;
margin-top: 10px;
margin-bottom:5px;
}
div.capa_reset {
......@@ -623,4 +631,31 @@ section.open-ended-child {
font-size: 0.9em;
}
.assessment-container {
margin: 40px 0px 30px 0px;
.scoring-container
{
p
{
margin-bottom: 1em;
}
label {
margin: 10px;
padding: 5px;
display: inline-block;
min-width: 50px;
background-color: #CCC;
text-size: 1.5em;
}
input[type=radio]:checked + label {
background: #666;
color: white;
}
input[class='grade-selection'] {
display: none;
}
}
}
}
......@@ -9,20 +9,34 @@ class @Collapsible
###
el: container
###
# standard longform + shortfom pattern
el.find('.longform').hide()
el.find('.shortform').append('<a href="#" class="full">See full output</a>')
# custom longform + shortform text pattern
short_custom = el.find('.shortform-custom')
# set up each one individually
short_custom.each (index, elt) =>
open_text = $(elt).data('open-text')
close_text = $(elt).data('close-text')
$(elt).append("<a href='#' class='full-custom'>"+ open_text + "</a>")
$(elt).find('.full-custom').click (event) => @toggleFull(event, open_text, close_text)
# collapsible pattern
el.find('.collapsible header + section').hide()
el.find('.full').click @toggleFull
# set up triggers
el.find('.full').click (event) => @toggleFull(event, "See full output", "Hide output")
el.find('.collapsible header a').click @toggleHint
@toggleFull: (event) =>
@toggleFull: (event, open_text, close_text) =>
event.preventDefault()
$(event.target).parent().siblings().slideToggle()
$(event.target).parent().parent().toggleClass('open')
if $(event.target).text() == 'See full output'
new_text = 'Hide output'
if $(event.target).text() == open_text
new_text = close_text
else
new_text = 'See full output'
new_text = open_text
$(event.target).text(new_text)
@toggleHint: (event) =>
......
......@@ -109,7 +109,8 @@ class @CombinedOpenEnded
@reset_button.hide()
@next_problem_button.hide()
@hint_area.attr('disabled', false)
if @child_state == 'done'
@rubric_wrapper.hide()
if @child_type=="openended"
@skip_button.hide()
if @allow_reset=="True"
......@@ -139,6 +140,7 @@ class @CombinedOpenEnded
else
@submit_button.click @message_post
else if @child_state == 'done'
@rubric_wrapper.hide()
@answer_area.attr("disabled", true)
@hint_area.attr('disabled', true)
@submit_button.hide()
......@@ -151,7 +153,7 @@ class @CombinedOpenEnded
find_assessment_elements: ->
@assessment = @$('select.assessment')
@assessment = @$('input[name="grade-selection"]')
find_hint_elements: ->
@hint_area = @$('textarea.post_assessment')
......@@ -163,6 +165,7 @@ class @CombinedOpenEnded
$.postWithPrefix "#{@ajax_url}/save_answer", data, (response) =>
if response.success
@rubric_wrapper.html(response.rubric_html)
@rubric_wrapper.show()
@child_state = 'assessing'
@find_assessment_elements()
@rebind()
......@@ -174,7 +177,8 @@ class @CombinedOpenEnded
save_assessment: (event) =>
event.preventDefault()
if @child_state == 'assessing'
data = {'assessment' : @assessment.find(':selected').text()}
checked_assessment = @$('input[name="grade-selection"]:checked')
data = {'assessment' : checked_assessment.val()}
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
if response.success
@child_state = response.state
......@@ -183,6 +187,7 @@ class @CombinedOpenEnded
@hint_wrapper.html(response.hint_html)
@find_hint_elements()
else if @child_state == 'done'
@rubric_wrapper.hide()
@message_wrapper.html(response.message_html)
@rebind()
......
......@@ -121,6 +121,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
'problem_id': self.display_name
})
updated_grader_payload = json.dumps(parsed_grader_payload)
......@@ -381,7 +382,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
rubric_feedback=""
feedback = self._convert_longform_feedback_to_html(response_items)
if response_items['rubric_scores_complete']==True:
rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml'], system)
rubric_renderer = CombinedOpenEndedRubric(system, True)
rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
if not response_items['success']:
return system.render_template("open_ended_error.html",
......@@ -446,8 +448,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'],
'rubric_scores_complete' : score_result['rubric_scores_complete'],
'rubric_xml' : score_result['rubric_xml'],
'rubric_scores_complete' : score_result['rubric_scores_complete'][i],
'rubric_xml' : score_result['rubric_xml'][i],
}
feedback_items.append(self._format_feedback(new_score_result, system))
if join_feedback:
......
......@@ -93,6 +93,7 @@ class OpenEndedChild(object):
self.prompt = static_data['prompt']
self.rubric = static_data['rubric']
self.display_name = static_data['display_name']
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
......
......@@ -75,7 +75,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'previous_answer': previous_answer,
'ajax_url': system.ajax_url,
'initial_rubric': self.get_rubric_html(system),
'initial_hint': self.get_hint_html(system),
'initial_hint': "",
'initial_message': self.get_message_html(),
'state': self.state,
'allow_reset': self._allow_reset(),
......@@ -122,7 +122,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
if self.state == self.INITIAL:
return ''
rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric, system)
rubric_renderer = CombinedOpenEndedRubric(system, True)
rubric_html = rubric_renderer.render_rubric(self.rubric)
# we'll render it
context = {'rubric': rubric_html,
......@@ -235,13 +236,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
d = {'success': True, }
if score == self.max_score():
self.change_state(self.DONE)
d['message_html'] = self.get_message_html()
d['allow_reset'] = self._allow_reset()
else:
self.change_state(self.POST_ASSESSMENT)
d['hint_html'] = self.get_hint_html(system)
d['state'] = self.state
return d
......
......@@ -42,7 +42,8 @@ class SelfAssessmentTest(unittest.TestCase):
'max_attempts': 10,
'rubric': etree.XML(rubric),
'prompt': prompt,
'max_score': 1
'max_score': 1,
'display_name': "Name"
}
module = SelfAssessmentModule(test_system, self.location,
......@@ -56,8 +57,6 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertEqual(module.state, module.ASSESSING)
module.save_assessment({'assessment': '0'}, test_system)
self.assertEqual(module.state, module.POST_ASSESSMENT)
module.save_hint({'hint': 'this is a hint'}, test_system)
self.assertEqual(module.state, module.DONE)
d = module.reset({})
......
......@@ -11,6 +11,10 @@ from django.http import HttpResponse, Http404
from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
from lxml import etree
from mitxmako.shortcuts import render_to_string
from xmodule.x_module import ModuleSystem
log = logging.getLogger(__name__)
......@@ -27,6 +31,7 @@ class GradingService(object):
self.url = config['url']
self.login_url = self.url + '/login/'
self.session = requests.session()
self.system = ModuleSystem(None, None, None, render_to_string, None)
def _login(self):
"""
......@@ -98,3 +103,33 @@ class GradingService(object):
return response
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
response_json = json.loads(response)
if 'rubric' in response_json:
rubric = response_json['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, False)
rubric_html = rubric_renderer.render_rubric(rubric)
response_json['rubric'] = rubric_html
return response_json
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError, RubricParsingError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
......@@ -20,7 +20,9 @@ from grading_service import GradingServiceError
from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric
from student.models import unique_id_for_user
from lxml import etree
log = logging.getLogger(__name__)
......@@ -84,15 +86,17 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id})
return response
return json.dumps(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key):
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores):
data = {'grader_id' : grader_id,
'submission_id' : submission_id,
'score' : score,
'feedback' : feedback,
'submission_key': submission_key,
'location': location}
'location': location,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_grade_url, data)
def is_student_calibrated(self, problem_location, grader_id):
......@@ -101,15 +105,19 @@ class PeerGradingService(GradingService):
def show_calibration_essay(self, problem_location, grader_id):
params = {'problem_id' : problem_location, 'student_id': grader_id}
return self.get(self.show_calibration_essay_url, params)
response = self.get(self.show_calibration_essay_url, params)
return json.dumps(self._render_rubric(response))
def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, feedback):
def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key,
score, feedback, rubric_scores):
data = {'location': problem_location,
'student_id': grader_id,
'calibration_essay_id': calibration_essay_id,
'submission_key': submission_key,
'score': score,
'feedback': feedback}
'feedback': feedback,
'rubric_scores[]': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_calibration_essay_url, data)
def get_problem_list(self, course_id, grader_id):
......@@ -196,7 +204,7 @@ def get_next_submission(request, course_id):
mimetype="application/json")
except GradingServiceError:
log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}"
.format(staff_grading_service().url, location, grader_id))
.format(peer_grading_service().url, location, grader_id))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
......@@ -216,7 +224,7 @@ def save_grade(request, course_id):
error: if there was an error in the submission, this is the error message
"""
_check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback'])
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
......@@ -227,14 +235,15 @@ def save_grade(request, course_id):
score = p['score']
feedback = p['feedback']
submission_key = p['submission_key']
rubric_scores = p.getlist('rubric_scores[]')
try:
response = peer_grading_service().save_grade(location, grader_id, submission_id,
score, feedback, submission_key)
score, feedback, submission_key, rubric_scores)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(staff_grading_service().url,
.format(peer_grading_service().url,
location, submission_id, submission_key, score)
)
return json.dumps({'success': False,
......@@ -273,7 +282,7 @@ def is_student_calibrated(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}"
.format(staff_grading_service().url, grader_id, location))
.format(peer_grading_service().url, grader_id, location))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
......@@ -317,9 +326,15 @@ def show_calibration_essay(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error from grading service. server url: {0}, location: {0}"
.format(staff_grading_service().url, location))
.format(peer_grading_service().url, location))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return json.dumps({'success': False,
'error': 'Error displaying submission'})
def save_calibration_essay(request, course_id):
......@@ -341,7 +356,7 @@ def save_calibration_essay(request, course_id):
"""
_check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback'])
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
......@@ -352,9 +367,11 @@ def save_calibration_essay(request, course_id):
submission_key = p['submission_key']
score = p['score']
feedback = p['feedback']
rubric_scores = p.getlist('rubric_scores[]')
try:
response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id, submission_key, score, feedback)
response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id,
submission_key, score, feedback, rubric_scores)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
......
......@@ -17,6 +17,8 @@ from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from student.models import unique_id_for_user
from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
log = logging.getLogger(__name__)
......@@ -46,14 +48,14 @@ class MockStaffGradingService(object):
self.cnt += 1
return json.dumps({'success': True,
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', \
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', \
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores):
return self.get_next(course_id, 'fake location', grader_id)
......@@ -107,12 +109,13 @@ class StaffGradingService(GradingService):
Raises:
GradingServiceError: something went wrong with the connection.
"""
return self.get(self.get_next_url,
response = self.get(self.get_next_url,
params={'location': location,
'grader_id': grader_id})
return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores):
"""
Save a score and feedback for a submission.
......@@ -129,7 +132,9 @@ class StaffGradingService(GradingService):
'score': score,
'feedback': feedback,
'grader_id': grader_id,
'skipped': skipped}
'skipped': skipped,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_grade_url, data=data)
......@@ -143,6 +148,7 @@ class StaffGradingService(GradingService):
# importing this file doesn't create objects that may not have the right config
_service = None
def staff_grading_service():
"""
Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
......@@ -286,7 +292,7 @@ def save_grade(request, course_id):
if request.method != 'POST':
raise Http404
required = set(['score', 'feedback', 'submission_id', 'location'])
required = set(['score', 'feedback', 'submission_id', 'location', 'rubric_scores[]'])
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
......@@ -299,13 +305,15 @@ def save_grade(request, course_id):
location = p['location']
skipped = 'skipped' in p
try:
result_json = staff_grading_service().save_grade(course_id,
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped)
skipped,
p.getlist('rubric_scores[]'))
except GradingServiceError:
log.exception("Error saving grade")
return _err_response('Could not connect to grading service')
......
......@@ -94,7 +94,8 @@ class TestStaffGradingService(ct.PageLoader):
data = {'score': '12',
'feedback': 'great!',
'submission_id': '123',
'location': self.location}
'location': self.location,
'rubric_scores[]': ['1', '2']}
r = self.check_for_post_code(200, url, data)
d = json.loads(r.content)
self.assertTrue(d['success'], str(d))
......
......@@ -10,4 +10,18 @@ class PeerGrading
@message_container = $('.message-container')
@message_container.toggle(not @message_container.is(':empty'))
@problem_list = $('.problem-list')
@construct_progress_bar()
construct_progress_bar: () =>
problems = @problem_list.find('tr').next()
problems.each( (index, element) =>
problem = $(element)
progress_bar = problem.find('.progress-bar')
bar_value = parseInt(problem.data('graded'))
bar_max = parseInt(problem.data('required')) + bar_value
progress_bar.progressbar({value: bar_value, max: bar_max})
)
$(document).ready(() -> new PeerGrading())
......@@ -24,15 +24,33 @@ div.peer-grading{
color: white;
}
input[name='score-selection'] {
input[name='score-selection'],
input[name='grade-selection'] {
display: none;
}
ul
.problem-list
{
li
text-align: center;
table-layout: auto;
width:100%;
th
{
padding: 10px;
}
td
{
padding:10px;
}
td.problem-name
{
text-align:left;
}
.ui-progressbar
{
margin: 16px 0px;
height:1em;
margin:0px;
padding:0px;
}
}
......@@ -106,6 +124,7 @@ div.peer-grading{
margin: 0px;
background: #eee;
height: 10em;
width:47.6%;
h3
{
text-align:center;
......@@ -120,12 +139,10 @@ div.peer-grading{
.calibration-panel
{
float:left;
width:48%;
}
.grading-panel
{
float:right;
width: 48%;
}
.current-state
{
......@@ -159,5 +176,49 @@ div.peer-grading{
}
}
padding: 40px;
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px 25px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:2px;
position: relative;
padding: 15px 15px 25px 15px;
width: 150px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: .9em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
<section id="combined-open-ended" class="combined-open-ended" data-ajax-url="${ajax_url}" data-allow_reset="${allow_reset}" data-state="${state}" data-task-count="${task_count}" data-task-number="${task_number}">
<h2>${display_name}</h2>
<div class="status-container">
<h4>Status</h4><br/>
${status | n}
</div>
<div class="item-container">
<h4>Problem</h4><br/>
<h4>Problem</h4>
<div class="problem-container">
% for item in items:
<div class="item">${item['content'] | n}</div>
% endfor
</div>
<input type="button" value="Reset" class="reset-button" name="reset"/>
<input type="button" value="Next Step" class="next-step-button" name="reset"/>
</div>
<a name="results" />
<div class="result-container">
</div>
</section>
......
<div class="result-container">
<h4>Results from Step ${task_number}</h4><br/>
<h4>Results from Step ${task_number}</h4>
${results | n}
</div>
%if status_list[0]['state'] != 'initial':
<h4>Status</h4>
<div class="status-elements">
<section id="combined-open-ended-status" class="combined-open-ended-status">
%for i in xrange(0,len(status_list)):
<%status=status_list[i]%>
%if i==len(status_list)-1:
<div class="statusitem-current" data-status-number="${i}">
<div class="statusitem statusitem-current" data-status-number="${i}">
%else:
<div class="statusitem" data-status-number="${i}">
%endif
......@@ -20,9 +23,12 @@
%if status['type']=="openended" and status['state'] in ['done', 'post_assessment']:
<div class="show-results">
<a href="#" class="show-results-button">Show results from step ${status['task_number']}</a>
<a href="#results" class="show-results-button">Show results from Step ${status['task_number']}</a>
</div>
%endif
</div>
%endfor
</section>
</div>
%endif
......@@ -33,8 +33,8 @@
</div>
<h2>Problem List</h2>
<ul class="problem-list">
</ul>
<table class="problem-list">
</table>
</section>
<!-- Grading View -->
......@@ -54,11 +54,6 @@
<div class="prompt-container">
</div>
</div>
<div class="rubric-wrapper">
<h3>Grading Rubric</h3>
<div class="rubric-container">
</div>
</div>
</section>
......@@ -78,6 +73,8 @@
<div class="evaluation">
<p class="score-selection-container">
</p>
<p class="grade-selection-container">
</p>
<textarea name="feedback" placeholder="Feedback for student (optional)"
class="feedback-area" cols="70" ></textarea>
</div>
......
......@@ -10,11 +10,11 @@
% if state == 'initial':
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
% elif state in ['done', 'post_assessment'] and correct == 'correct':
<span class="correct" id="status_${id}">Correct</span>
<span class="correct" id="status_${id}"></span> <p>Correct</p>
% elif state in ['done', 'post_assessment'] and correct == 'incorrect':
<span class="incorrect" id="status_${id}">Incorrect</span>
<span class="incorrect" id="status_${id}"></span> <p>Incorrect. </p>
% elif state == 'assessing':
<span class="grading" id="status_${id}">Submitted for grading</span>
<span class="grading" id="status_${id}">Submitted for grading.</span>
% endif
% if hidden:
......
<section>
<header>Feedback</header>
<div class="shortform">
<div class="shortform-custom" data-open-text='Show detailed results' data-close-text='Hide detailed results'>
<div class="result-output">
<p>Score: ${score}</p>
% if grader_type == "ML":
......
<table class="rubric">
% for i in range(len(rubric_categories)):
<% category = rubric_categories[i] %>
<tr>
<th>
${category['description']}
% if category['has_score'] == True:
(Your score: ${category['score']})
<form class="rubric-template" id="inputtype_${id}">
<h3>Rubric</h3>
% if view_only and has_score:
<p>This is the rubric that was used to grade your submission. The highlighted selection matches how the grader feels you performed in each category.</p>
% elif view_only:
<p>Use the below rubric to rate this submission.</p>
% else:
<p>Select the criteria you feel best represents this submission in each category.</p>
% endif
</th>
<table class="rubric">
% for i in range(len(categories)):
<% category = categories[i] %>
<tr>
<th>${category['description']}</th>
% for j in range(len(category['options'])):
<% option = category['options'][j] %>
<td>
% if view_only:
## if this is the selected rubric block, show it highlighted
% if option['selected']:
<div class="view-only selected-grade">
% else:
<div class="view-only">
${option['text']}
% if option.has_key('selected'):
% if option['selected'] == True:
<div class="selected-grade">[${option['points']} points]</div>
%else:
<div class="grade">[${option['points']} points]</div>
% endif
% else:
${option['text']}
<div class="grade">[${option['points']} points]</div>
%endif
</div>
% else:
<input type="radio" class="score-selection" name="score-selection-${i}" id="score-${i}-${j}" value="${option['points']}"/>
<label for="score-${i}-${j}">${option['text']}</label>
% endif
</td>
% endfor
</tr>
% endfor
</table>
\ No newline at end of file
</table>
</form>
......@@ -26,13 +26,37 @@
Nothing to grade!
</div>
%else:
<ul class="problem-list">
<div class="problem-list-container">
<table class="problem-list">
<tr>
<th>Problem Name</th>
<th>Graded</th>
<th>Available</th>
<th>Required</th>
<th>Progress</th>
</tr>
%for problem in problem_list:
<li>
<a href="${ajax_url}problem?location=${problem['location']}">${problem['problem_name']} (${problem['num_graded']} graded, ${problem['num_pending']} pending, required to grade ${problem['num_required']} more)</a>
</li>
<tr data-graded="${problem['num_graded']}" data-required="${problem['num_required']}">
<td class="problem-name">
<a href="${ajax_url}problem?location=${problem['location']}">${problem['problem_name']}</a>
</td>
<td>
${problem['num_graded']}
</td>
<td>
${problem['num_pending']}
</td>
<td>
${problem['num_required']}
</td>
<td>
<div class="progress-bar">
</div>
</td>
</tr>
%endfor
</ul>
</table>
</div>
%endif
%endif
</div>
......
......@@ -44,20 +44,13 @@
</div>
<div class="prompt-wrapper">
<div class="prompt-information-container collapsible">
<header><a href="javascript:void(0)">Question</a></header>
<h2>Question</h2>
<div class="prompt-information-container">
<section>
<div class="prompt-container">
</div>
</section>
</div>
<div class="rubric-wrapper collapsible">
<header><a href="javascript:void(0)">Rubric</a></header>
<section>
<div class="rubric-container">
</div>
</section>
</div>
</div>
......@@ -74,6 +67,7 @@
<input type="hidden" name="essay-id" value="" />
</div>
<div class="evaluation">
<p class="rubric-selection-container"></p>
<p class="score-selection-container">
</p>
<textarea name="feedback" placeholder="Feedback for student (optional)"
......
<div class="assessment">
<div class="assessment-container">
<div class="rubric">
<h3>Self-assess your answer with this rubric:</h3>
${rubric | n }
</div>
% if not read_only:
<select name="assessment" class="assessment">
<div class="scoring-container">
<h3>Scoring</h3>
<p>Please select a score below:</p>
<div class="grade-selection">
%for i in xrange(0,max_score+1):
<option value="${i}">${i}</option>
<% id = "score-{0}".format(i) %>
<input type="radio" class="grade-selection" name="grade-selection" value="${i}" id="${id}">
<label for="${id}">${i}</label>
%endfor
</select>
</div>
</div>
% endif
</div>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment