Commit 3ec2a83e by Diana Huang

Merge branch 'diana/open-ended-ui-updates' into tests/diana/update-oe-unit-tests

Fix

Conflicts:
	common/lib/xmodule/xmodule/tests/test_self_assessment.py
parents bb521a20 cc67017d
......@@ -21,6 +21,8 @@ from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
import self_assessment_module
import open_ended_module
from combined_open_ended_rubric import CombinedOpenEndedRubric
from .stringify import stringify_children
log = logging.getLogger("mitx.courseware")
......@@ -138,12 +140,19 @@ class CombinedOpenEndedModule(XModule):
# completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = int(self.metadata.get('max_score', MAX_SCORE))
rubric_renderer = CombinedOpenEndedRubric(system, True)
success, rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric']))
if not success:
error_message="Could not parse rubric : {0}".format(definition['rubric'])
log.exception(error_message)
raise Exception
#Static data is passed to the child modules to render
self.static_data = {
'max_score': self._max_score,
'max_attempts': self.max_attempts,
'prompt': definition['prompt'],
'rubric': definition['rubric']
'rubric': definition['rubric'],
'display_name': self.display_name
}
self.task_xml = definition['task_xml']
......@@ -295,6 +304,7 @@ class CombinedOpenEndedModule(XModule):
'task_count': len(self.task_xml),
'task_number': self.current_task_number + 1,
'status': self.get_status(),
'display_name': self.display_name
}
return context
......
......@@ -5,18 +5,42 @@ log=logging.getLogger(__name__)
class CombinedOpenEndedRubric(object):
@staticmethod
def render_rubric(rubric_xml, system):
def __init__ (self, system, view_only = False):
self.has_score = False
self.view_only = view_only
self.system = system
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
Input:
rubric_xml: an string that has not been parsed into xml that
represents this particular rubric
Output:
html: the html that corresponds to the xml given
'''
def render_rubric(self, rubric_xml):
success = False
try:
rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml)
html = system.render_template('open_ended_rubric.html', {'rubric_categories' : rubric_categories})
rubric_categories = self.extract_categories(rubric_xml)
html = self.system.render_template('open_ended_rubric.html',
{'categories' : rubric_categories,
'has_score': self.has_score,
'view_only': self.view_only})
success = True
except:
log.exception("Could not parse the rubric.")
try:
html = etree.tostring(rubric_xml, pretty_print=True)
except:
log.exception("Rubric XML is a string, not an XML object : {0}".format(rubric_xml))
if isinstance(rubric_xml, basestring):
html = rubric_xml
return html
else:
html = "Invalid rubric. Please contact course staff."
return success, html
@staticmethod
def extract_rubric_categories(element):
def extract_categories(self, element):
'''
Contstruct a list of categories such that the structure looks like:
[ { category: "Category 1 Name",
......@@ -28,17 +52,18 @@ class CombinedOpenEndedRubric(object):
{text: "Option 3 Name", points: 2]}]
'''
if isinstance(element, basestring):
element = etree.fromstring(element)
categories = []
for category in element:
if category.tag != 'category':
raise Exception("[capa.inputtypes.extract_categories] Expected a <category> tag: got {0} instead".format(category.tag))
raise Exception("[extract_categories] Expected a <category> tag: got {0} instead".format(category.tag))
else:
categories.append(CombinedOpenEndedRubric.extract_category(category))
categories.append(self.extract_category(category))
return categories
@staticmethod
def extract_category(category):
def extract_category(self, category):
'''
construct an individual category
{category: "Category 1 Name",
......@@ -47,35 +72,26 @@ class CombinedOpenEndedRubric(object):
all sorting and auto-point generation occurs in this function
'''
has_score=False
descriptionxml = category[0]
scorexml = category[1]
if scorexml.tag == "option":
optionsxml = category[1:]
else:
scorexml = category[1]
score = None
if scorexml.tag == 'score':
score_text = scorexml.text
optionsxml = category[2:]
has_score=True
score = int(score_text)
self.has_score = True
# if we are missing the score tag and we are expecting one
elif self.has_score:
raise Exception("[extract_category] Category {0} is missing a score".format(descriptionxml.text))
# parse description
if descriptionxml.tag != 'description':
raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag))
if has_score:
if scorexml.tag != 'score':
raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag))
for option in optionsxml:
if option.tag != "option":
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag))
description = descriptionxml.text
if has_score:
score = int(scorexml.text)
else:
score = 0
cur_points = 0
options = []
autonumbering = True
......@@ -97,19 +113,18 @@ class CombinedOpenEndedRubric(object):
points = cur_points
cur_points = cur_points + 1
else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly dfined.")
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points
optiontext = option.text
selected = False
if has_score:
if points == score:
selected = True
options.append({'text': option.text, 'points': points, 'selected' : selected})
options.append({'text': option.text, 'points': points, 'selected': selected})
# sort and check for duplicates
options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score, 'has_score' : has_score}
return {'description': description, 'options': options}
@staticmethod
def validate_options(options):
......
......@@ -20,6 +20,7 @@ h2 {
color: darken($error-red, 10%);
}
section.problem {
@media print {
display: block;
......@@ -756,4 +757,49 @@ section.problem {
}
}
}
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px;
margin: 10px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:3px;
position: relative;
padding: 15px;
width: 150px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: .9em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
......@@ -37,9 +37,13 @@ section.combined-open-ended {
.result-container
{
float:left;
width: 93%;
width: 100%;
position:relative;
}
h4
{
margin-bottom:10px;
}
}
section.combined-open-ended-status {
......@@ -49,15 +53,19 @@ section.combined-open-ended-status {
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
padding: 10px;
.show-results {
margin-top: .3em;
text-align:right;
}
.show-results-button {
font: 1em monospace;
}
}
.statusitem-current {
background-color: #BEBEBE;
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
background-color: #d4d4d4;
color: #222;
}
span {
......@@ -93,6 +101,7 @@ section.combined-open-ended-status {
div.result-container {
.evaluation {
p {
margin-bottom: 1px;
}
......@@ -104,6 +113,7 @@ div.result-container {
}
.evaluation-response {
margin-bottom: 10px;
header {
text-align: right;
a {
......@@ -134,6 +144,7 @@ div.result-container {
}
.external-grader-message {
margin-bottom: 5px;
section {
padding-left: 20px;
background-color: #FAFAFA;
......@@ -141,6 +152,7 @@ div.result-container {
font-family: monospace;
font-size: 1em;
padding-top: 10px;
padding-bottom:30px;
header {
font-size: 1.4em;
}
......@@ -221,12 +233,13 @@ div.result-container {
div.result-container, section.open-ended-child {
.rubric {
margin-bottom:25px;
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px;
padding: 20px 0px 25px 0px;
margin: 10px 0px;
height: 100%;
}
......@@ -236,16 +249,16 @@ div.result-container, section.open-ended-child {
}
label,
.view-only {
margin:10px;
margin:2px;
position: relative;
padding: 15px;
width: 200px;
padding: 10px 15px 25px 15px;
width: 145px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: 1em;
font-size: .85em;
}
.grade {
position: absolute;
......@@ -257,12 +270,6 @@ div.result-container, section.open-ended-child {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
......@@ -461,7 +468,6 @@ section.open-ended-child {
p {
line-height: 20px;
text-transform: capitalize;
margin-bottom: 0;
float: left;
}
......@@ -598,13 +604,15 @@ section.open-ended-child {
}
}
div.open-ended-alert {
div.open-ended-alert,
.save_message {
padding: 8px 12px;
border: 1px solid #EBE8BF;
border-radius: 3px;
background: #FFFCDD;
font-size: 0.9em;
margin-top: 10px;
margin-bottom:5px;
}
div.capa_reset {
......@@ -623,4 +631,31 @@ section.open-ended-child {
font-size: 0.9em;
}
.assessment-container {
margin: 40px 0px 30px 0px;
.scoring-container
{
p
{
margin-bottom: 1em;
}
label {
margin: 10px;
padding: 5px;
display: inline-block;
min-width: 50px;
background-color: #CCC;
text-size: 1.5em;
}
input[type=radio]:checked + label {
background: #666;
color: white;
}
input[class='grade-selection'] {
display: none;
}
}
}
}
......@@ -9,20 +9,34 @@ class @Collapsible
###
el: container
###
# standard longform + shortfom pattern
el.find('.longform').hide()
el.find('.shortform').append('<a href="#" class="full">See full output</a>')
# custom longform + shortform text pattern
short_custom = el.find('.shortform-custom')
# set up each one individually
short_custom.each (index, elt) =>
open_text = $(elt).data('open-text')
close_text = $(elt).data('close-text')
$(elt).append("<a href='#' class='full-custom'>"+ open_text + "</a>")
$(elt).find('.full-custom').click (event) => @toggleFull(event, open_text, close_text)
# collapsible pattern
el.find('.collapsible header + section').hide()
el.find('.full').click @toggleFull
# set up triggers
el.find('.full').click (event) => @toggleFull(event, "See full output", "Hide output")
el.find('.collapsible header a').click @toggleHint
@toggleFull: (event) =>
@toggleFull: (event, open_text, close_text) =>
event.preventDefault()
$(event.target).parent().siblings().slideToggle()
$(event.target).parent().parent().toggleClass('open')
if $(event.target).text() == 'See full output'
new_text = 'Hide output'
if $(event.target).text() == open_text
new_text = close_text
else
new_text = 'See full output'
new_text = open_text
$(event.target).text(new_text)
@toggleHint: (event) =>
......
......@@ -109,7 +109,8 @@ class @CombinedOpenEnded
@reset_button.hide()
@next_problem_button.hide()
@hint_area.attr('disabled', false)
if @child_state == 'done'
@rubric_wrapper.hide()
if @child_type=="openended"
@skip_button.hide()
if @allow_reset=="True"
......@@ -139,6 +140,7 @@ class @CombinedOpenEnded
else
@submit_button.click @message_post
else if @child_state == 'done'
@rubric_wrapper.hide()
@answer_area.attr("disabled", true)
@hint_area.attr('disabled', true)
@submit_button.hide()
......@@ -151,7 +153,7 @@ class @CombinedOpenEnded
find_assessment_elements: ->
@assessment = @$('select.assessment')
@assessment = @$('input[name="grade-selection"]')
find_hint_elements: ->
@hint_area = @$('textarea.post_assessment')
......@@ -163,6 +165,7 @@ class @CombinedOpenEnded
$.postWithPrefix "#{@ajax_url}/save_answer", data, (response) =>
if response.success
@rubric_wrapper.html(response.rubric_html)
@rubric_wrapper.show()
@child_state = 'assessing'
@find_assessment_elements()
@rebind()
......@@ -174,7 +177,8 @@ class @CombinedOpenEnded
save_assessment: (event) =>
event.preventDefault()
if @child_state == 'assessing'
data = {'assessment' : @assessment.find(':selected').text()}
checked_assessment = @$('input[name="grade-selection"]:checked')
data = {'assessment' : checked_assessment.val()}
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
if response.success
@child_state = response.state
......@@ -183,6 +187,7 @@ class @CombinedOpenEnded
@hint_wrapper.html(response.hint_html)
@find_hint_elements()
else if @child_state == 'done'
@rubric_wrapper.hide()
@message_wrapper.html(response.message_html)
@rebind()
......
......@@ -121,6 +121,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
'problem_id': self.display_name
})
updated_grader_payload = json.dumps(parsed_grader_payload)
......@@ -381,7 +382,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
rubric_feedback=""
feedback = self._convert_longform_feedback_to_html(response_items)
if response_items['rubric_scores_complete']==True:
rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml'], system)
rubric_renderer = CombinedOpenEndedRubric(system, True)
success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
if not response_items['success']:
return system.render_template("open_ended_error.html",
......@@ -450,8 +452,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'],
'rubric_scores_complete' : score_result['rubric_scores_complete'],
'rubric_xml' : score_result['rubric_xml'],
'rubric_scores_complete' : score_result['rubric_scores_complete'][i],
'rubric_xml' : score_result['rubric_xml'][i],
}
feedback_items.append(self._format_feedback(new_score_result, system))
if join_feedback:
......
......@@ -93,6 +93,7 @@ class OpenEndedChild(object):
self.prompt = static_data['prompt']
self.rubric = static_data['rubric']
self.display_name = static_data['display_name']
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
......
......@@ -75,7 +75,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'previous_answer': previous_answer,
'ajax_url': system.ajax_url,
'initial_rubric': self.get_rubric_html(system),
'initial_hint': self.get_hint_html(system),
'initial_hint': "",
'initial_message': self.get_message_html(),
'state': self.state,
'allow_reset': self._allow_reset(),
......@@ -122,7 +122,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
if self.state == self.INITIAL:
return ''
rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric, system)
rubric_renderer = CombinedOpenEndedRubric(system, True)
success, rubric_html = rubric_renderer.render_rubric(self.rubric)
# we'll render it
context = {'rubric': rubric_html,
......@@ -235,13 +236,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
d = {'success': True, }
if score == self.max_score():
self.change_state(self.DONE)
d['message_html'] = self.get_message_html()
d['allow_reset'] = self._allow_reset()
else:
self.change_state(self.POST_ASSESSMENT)
d['hint_html'] = self.get_hint_html(system)
d['state'] = self.state
return d
......
......@@ -30,6 +30,7 @@ class OpenEndedChildTest(unittest.TestCase):
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name'
}
definition = Mock()
descriptor = Mock()
......@@ -142,6 +143,7 @@ class OpenEndedModuleTest(unittest.TestCase):
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name'
}
oeparam = etree.XML('''
......
......@@ -42,7 +42,8 @@ class SelfAssessmentTest(unittest.TestCase):
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1
'max_score': 1,
'display_name': "Name"
}
self.module = SelfAssessmentModule(test_system, self.location,
......@@ -57,15 +58,13 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"}, test_system)
self.assertEqual(self.module.state, self.module.ASSESSING)
self.module.save_assessment({'assessment': '0'}, test_system)
self.assertEqual(self.module.state, self.module.POST_ASSESSMENT)
self.module.save_hint({'hint': 'this is a hint'}, test_system)
self.assertEqual(self.module.state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.state, self.module.INITIAL)
......
......@@ -11,6 +11,10 @@ from django.http import HttpResponse, Http404
from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric
from lxml import etree
from mitxmako.shortcuts import render_to_string
from xmodule.x_module import ModuleSystem
log = logging.getLogger(__name__)
......@@ -27,6 +31,7 @@ class GradingService(object):
self.url = config['url']
self.login_url = self.url + '/login/'
self.session = requests.session()
self.system = ModuleSystem(None, None, None, render_to_string, None)
def _login(self):
"""
......@@ -98,3 +103,29 @@ class GradingService(object):
return response
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response with the key 'rubric', render out the html
required to display the rubric
"""
try:
response_json = json.loads(response)
if response_json.has_key('rubric'):
rubric = response_json['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, False)
success, rubric_html = rubric_renderer.render_rubric(rubric)
if not success:
error_message = "Could not render rubric: {0}".format(rubric)
log.exception(error_message)
return json.dumps({'success': False,
'error': error_message})
response_json['rubric'] = rubric_html
return json.dumps(response_json)
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return json.dumps({'success': False,
'error': 'Error displaying submission'})
......@@ -20,7 +20,9 @@ from grading_service import GradingServiceError
from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric
from student.models import unique_id_for_user
from lxml import etree
log = logging.getLogger(__name__)
......@@ -93,15 +95,17 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id})
return response
return self._render_rubric(response)
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key):
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores):
data = {'grader_id' : grader_id,
'submission_id' : submission_id,
'score' : score,
'feedback' : feedback,
'submission_key': submission_key,
'location': location}
'location': location,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_grade_url, data)
def is_student_calibrated(self, problem_location, grader_id):
......@@ -110,15 +114,19 @@ class PeerGradingService(GradingService):
def show_calibration_essay(self, problem_location, grader_id):
params = {'problem_id' : problem_location, 'student_id': grader_id}
return self.get(self.show_calibration_essay_url, params)
response = self.get(self.show_calibration_essay_url, params)
return self._render_rubric(response)
def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, feedback):
def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key,
score, feedback, rubric_scores):
data = {'location': problem_location,
'student_id': grader_id,
'calibration_essay_id': calibration_essay_id,
'submission_key': submission_key,
'score': score,
'feedback': feedback}
'feedback': feedback,
'rubric_scores[]': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_calibration_essay_url, data)
def get_problem_list(self, course_id, grader_id):
......@@ -205,7 +213,7 @@ def get_next_submission(request, course_id):
mimetype="application/json")
except GradingServiceError:
log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}"
.format(staff_grading_service().url, location, grader_id))
.format(peer_grading_service().url, location, grader_id))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
......@@ -225,7 +233,7 @@ def save_grade(request, course_id):
error: if there was an error in the submission, this is the error message
"""
_check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback'])
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
......@@ -236,14 +244,15 @@ def save_grade(request, course_id):
score = p['score']
feedback = p['feedback']
submission_key = p['submission_key']
rubric_scores = p.getlist('rubric_scores[]')
try:
response = peer_grading_service().save_grade(location, grader_id, submission_id,
score, feedback, submission_key)
score, feedback, submission_key, rubric_scores)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(staff_grading_service().url,
.format(peer_grading_service().url,
location, submission_id, submission_key, score)
)
return json.dumps({'success': False,
......@@ -282,7 +291,7 @@ def is_student_calibrated(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}"
.format(staff_grading_service().url, grader_id, location))
.format(peer_grading_service().url, grader_id, location))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
......@@ -326,9 +335,15 @@ def show_calibration_essay(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error from grading service. server url: {0}, location: {0}"
.format(staff_grading_service().url, location))
.format(peer_grading_service().url, location))
return json.dumps({'success': False,
'error': 'Could not connect to grading service'})
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return json.dumps({'success': False,
'error': 'Error displaying submission'})
def save_calibration_essay(request, course_id):
......@@ -350,7 +365,7 @@ def save_calibration_essay(request, course_id):
"""
_check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback'])
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = _check_required(request, required)
if not success:
return _err_response(message)
......@@ -361,9 +376,11 @@ def save_calibration_essay(request, course_id):
submission_key = p['submission_key']
score = p['score']
feedback = p['feedback']
rubric_scores = p.getlist('rubric_scores[]')
try:
response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id, submission_key, score, feedback)
response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id,
submission_key, score, feedback, rubric_scores)
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
......
......@@ -17,6 +17,8 @@ from courseware.access import has_access
from util.json_request import expect_json
from xmodule.course_module import CourseDescriptor
from student.models import unique_id_for_user
from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
log = logging.getLogger(__name__)
......@@ -46,14 +48,14 @@ class MockStaffGradingService(object):
self.cnt += 1
return json.dumps({'success': True,
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', \
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', \
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores):
return self.get_next(course_id, 'fake location', grader_id)
......@@ -107,12 +109,13 @@ class StaffGradingService(GradingService):
Raises:
GradingServiceError: something went wrong with the connection.
"""
return self.get(self.get_next_url,
response = self.get(self.get_next_url,
params={'location': location,
'grader_id': grader_id})
return self._render_rubric(response)
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores):
"""
Save a score and feedback for a submission.
......@@ -129,7 +132,9 @@ class StaffGradingService(GradingService):
'score': score,
'feedback': feedback,
'grader_id': grader_id,
'skipped': skipped}
'skipped': skipped,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True}
return self.post(self.save_grade_url, data=data)
......@@ -143,6 +148,8 @@ class StaffGradingService(GradingService):
# importing this file doesn't create objects that may not have the right config
_service = None
module_system = ModuleSystem("", None, None, render_to_string, None)
def staff_grading_service():
"""
Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
......@@ -286,7 +293,7 @@ def save_grade(request, course_id):
if request.method != 'POST':
raise Http404
required = set(['score', 'feedback', 'submission_id', 'location'])
required = set(['score', 'feedback', 'submission_id', 'location', 'rubric_scores[]'])
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
......@@ -299,13 +306,15 @@ def save_grade(request, course_id):
location = p['location']
skipped = 'skipped' in p
try:
result_json = staff_grading_service().save_grade(course_id,
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped)
skipped,
p.getlist('rubric_scores[]'))
except GradingServiceError:
log.exception("Error saving grade")
return _err_response('Could not connect to grading service')
......
......@@ -96,7 +96,8 @@ class TestStaffGradingService(ct.PageLoader):
data = {'score': '12',
'feedback': 'great!',
'submission_id': '123',
'location': self.location}
'location': self.location,
'rubric_scores[]': ['1', '2']}
r = self.check_for_post_code(200, url, data)
d = json.loads(r.content)
self.assertTrue(d['success'], str(d))
......
......@@ -10,4 +10,18 @@ class PeerGrading
@message_container = $('.message-container')
@message_container.toggle(not @message_container.is(':empty'))
@problem_list = $('.problem-list')
@construct_progress_bar()
construct_progress_bar: () =>
problems = @problem_list.find('tr').next()
problems.each( (index, element) =>
problem = $(element)
progress_bar = problem.find('.progress-bar')
bar_value = parseInt(problem.data('graded'))
bar_max = parseInt(problem.data('required')) + bar_value
progress_bar.progressbar({value: bar_value, max: bar_max})
)
$(document).ready(() -> new PeerGrading())
......@@ -56,13 +56,41 @@ The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for t
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
'''
rubric: '''
<ul>
<li>Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.</li>
<li>Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.</li>
<li>Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.</li>
</ul>
<table class="rubric"><tbody><tr><th>Purpose</th>
<p>Please score your response according to how many of the above components you identified:</p>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
'''
max_score: 4
else if cmd == 'get_next_submission'
......@@ -82,13 +110,41 @@ Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim q
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
'''
rubric: '''
<ul>
<li>Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.</li>
<li>Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.</li>
<li>Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.</li>
</ul>
<table class="rubric"><tbody><tr><th>Purpose</th>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<p>Please score your response according to how many of the above components you identified:</p>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
'''
max_score: 4
else if cmd == 'save_calibration_essay'
......@@ -137,7 +193,8 @@ class PeerGradingProblem
@feedback_area = $('.feedback-area')
@score_selection_container = $('.score-selection-container')
@score = null
@rubric_selection_container = $('.rubric-selection-container')
@grade = null
@calibration = null
@submit_button = $('.submit-button')
......@@ -175,9 +232,23 @@ class PeerGradingProblem
fetch_submission_essay: () =>
@backend.post('get_next_submission', {location: @location}, @render_submission)
# finds the scores for each rubric category
get_score_list: () =>
# find the number of categories:
num_categories = $('table.rubric tr').length
score_lst = []
# get the score for each one
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
score_lst.push(score)
return score_lst
construct_data: () ->
data =
score: @score
rubric_scores: @get_score_list()
score: @grade
location: @location
submission_id: @essay_id_input.val()
submission_key: @submission_key_input.val()
......@@ -244,8 +315,16 @@ class PeerGradingProblem
# called after a grade is selected on the interface
graded_callback: (event) =>
@grading_message.hide()
@score = event.target.value
@grade = $("input[name='grade-selection']:checked").val()
if @grade == undefined
return
# check to see whether or not any categories have not been scored
num_categories = $('table.rubric tr').length
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
if score == undefined
return
# show button if we have scores for all categories
@show_submit_button()
......@@ -322,7 +401,7 @@ class PeerGradingProblem
@submission_container.append(@make_paragraphs(response.student_response))
@prompt_container.html(response.prompt)
@rubric_container.html(response.rubric)
@rubric_selection_container.html(response.rubric)
@submission_key_input.val(response.submission_key)
@essay_id_input.val(response.submission_id)
@setup_score_selection(response.max_score)
......@@ -336,10 +415,10 @@ class PeerGradingProblem
# display correct grade
@calibration_feedback_panel.slideDown()
calibration_wrapper = $('.calibration-feedback-wrapper')
calibration_wrapper.html("<p>The score you gave was: #{@score}. The actual score is: #{response.actual_score}</p>")
calibration_wrapper.html("<p>The score you gave was: #{@grade}. The actual score is: #{response.actual_score}</p>")
score = parseInt(@score)
score = parseInt(@grade)
actual_score = parseInt(response.actual_score)
if score == actual_score
......@@ -366,8 +445,12 @@ class PeerGradingProblem
@submit_button.show()
setup_score_selection: (max_score) =>
# first, get rid of all the old inputs, if any.
@score_selection_container.html('Choose score: ')
@score_selection_container.html("""
<h3>Overall Score</h3>
<p>Choose an overall score for this submission.</p>
""")
# Now create new labels and inputs for each possible score.
for score in [0..max_score]
......@@ -375,12 +458,13 @@ class PeerGradingProblem
label = """<label for="#{id}">#{score}</label>"""
input = """
<input type="radio" name="score-selection" id="#{id}" value="#{score}"/>
<input type="radio" name="grade-selection" id="#{id}" value="#{score}"/>
""" # " fix broken parsing in emacs
@score_selection_container.append(input + label)
# And now hook up an event handler again
$("input[name='score-selection']").change @graded_callback
$("input[name='grade-selection']").change @graded_callback
......
......@@ -42,14 +42,41 @@ class StaffGradingBackend
The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
'''
rubric: '''
<ul>
<li>Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.</li>
<li>Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.</li>
<li>Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.</li>
</ul>
<table class="rubric"><tbody><tr><th>Purpose</th>
<p>Please score your response according to how many of the above components you identified:</p>
'''
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>'''
submission_id: @mock_cnt
max_score: 2 + @mock_cnt % 3
ml_error_info : 'ML accuracy info: ' + @mock_cnt
......@@ -134,12 +161,11 @@ class StaffGrading
@submission_container = $('.submission-container')
@submission_wrapper = $('.submission-wrapper')
@rubric_container = $('.rubric-container')
@rubric_wrapper = $('.rubric-wrapper')
@grading_wrapper = $('.grading-wrapper')
@feedback_area = $('.feedback-area')
@score_selection_container = $('.score-selection-container')
@grade_selection_container = $('.grade-selection-container')
@submit_button = $('.submit-button')
@action_button = $('.action-button')
......@@ -166,8 +192,9 @@ class StaffGrading
@min_for_ml = 0
@num_graded = 0
@num_pending = 0
@score_lst = []
@grade = null
@score = null
@problems = null
# action handlers
......@@ -182,30 +209,53 @@ class StaffGrading
setup_score_selection: =>
# first, get rid of all the old inputs, if any.
@score_selection_container.html('Choose score: ')
@grade_selection_container.html("""
<h3>Overall Score</h3>
<p>Choose an overall score for this submission.</p>
""")
# Now create new labels and inputs for each possible score.
for score in [0..@max_score]
id = 'score-' + score
label = """<label for="#{id}">#{score}</label>"""
input = """
<input type="radio" name="score-selection" id="#{id}" value="#{score}"/>
<input type="radio" class="grade-selection" name="grade-selection" id="#{id}" value="#{score}"/>
""" # " fix broken parsing in emacs
@score_selection_container.append(input + label)
# And now hook up an event handler again
$("input[name='score-selection']").change @graded_callback
@grade_selection_container.append(input + label)
$('.grade-selection').click => @graded_callback()
@score_selection_container.html(@rubric)
$('.score-selection').click => @graded_callback()
graded_callback: () =>
@grade = $("input[name='grade-selection']:checked").val()
if @grade == undefined
return
# check to see whether or not any categories have not been scored
num_categories = $('table.rubric tr').length
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
if score == undefined
return
# show button if we have scores for all categories
@state = state_graded
@submit_button.show()
set_button_text: (text) =>
@action_button.attr('value', text)
graded_callback: (event) =>
@score = event.target.value
@state = state_graded
@message = ''
@render_view()
# finds the scores for each rubric category
get_score_list: () =>
# find the number of categories:
num_categories = $('table.rubric tr').length
score_lst = []
# get the score for each one
for i in [0..(num_categories-1)]
score = $("input[name='score-selection-#{i}']:checked").val()
score_lst.push(score)
return score_lst
ajax_callback: (response) =>
# always clear out errors and messages on transition.
......@@ -231,7 +281,8 @@ class StaffGrading
skip_and_get_next: () =>
data =
score: @score
score: @grade
rubric_scores: @get_score_list()
feedback: @feedback_area.val()
submission_id: @submission_id
location: @location
......@@ -244,7 +295,8 @@ class StaffGrading
submit_and_get_next: () ->
data =
score: @score
score: @grade
rubric_scores: @get_score_list()
feedback: @feedback_area.val()
submission_id: @submission_id
location: @location
......@@ -261,8 +313,8 @@ class StaffGrading
@rubric = response.rubric
@submission_id = response.submission_id
@feedback_area.val('')
@grade = null
@max_score = response.max_score
@score = null
@ml_error_info=response.ml_error_info
@prompt_name = response.problem_name
@num_graded = response.num_graded
......@@ -282,14 +334,21 @@ class StaffGrading
@ml_error_info = null
@submission_id = null
@message = message
@score = null
@grade = null
@max_score = 0
@state = state_no_data
render_view: () ->
# clear the problem list and breadcrumbs
@problem_list.html('')
@problem_list.html('''
<tr>
<th>Problem Name</th>
<th>Graded</th>
<th>Available to Grade</th>
<th>Required</th>
<th>Progress</th>
</tr>
''')
@breadcrumbs.html('')
@problem_list_container.toggle(@list_view)
if @backend.mock_backend
......@@ -306,7 +365,6 @@ class StaffGrading
@state == state_no_data)
@prompt_wrapper.toggle(show_grading_elements)
@submission_wrapper.toggle(show_grading_elements)
@rubric_wrapper.toggle(show_grading_elements)
@grading_wrapper.toggle(show_grading_elements)
@meta_info_wrapper.toggle(show_grading_elements)
@action_button.hide()
......@@ -318,7 +376,7 @@ class StaffGrading
problem_link:(problem) ->
link = $('<a>').attr('href', "javascript:void(0)").append(
"#{problem.problem_name} (#{problem.num_graded} graded, #{problem.num_pending} pending, required to grade #{problem.num_required} more)")
"#{problem.problem_name}")
.click =>
@get_next_submission problem.location
......@@ -331,7 +389,17 @@ class StaffGrading
render_list: () ->
for problem in @problems
@problem_list.append($('<li>').append(@problem_link(problem)))
problem_row = $('<tr>')
problem_row.append($('<td class="problem-name">').append(@problem_link(problem)))
problem_row.append($('<td>').append("#{problem.num_graded}"))
problem_row.append($('<td>').append("#{problem.num_pending}"))
problem_row.append($('<td>').append("#{problem.num_required}"))
row_progress_bar = $('<div>').addClass('progress-bar')
progress_value = parseInt(problem.num_graded)
progress_max = parseInt(problem.num_required) + progress_value
row_progress_bar.progressbar({value: progress_value, max: progress_max})
problem_row.append($('<td>').append(row_progress_bar))
@problem_list.append(problem_row)
render_problem: () ->
# make the view elements match the state. Idempotent.
......@@ -353,7 +421,7 @@ class StaffGrading
else if @state == state_grading
@ml_error_info_container.html(@ml_error_info)
meta_list = $("<ul>")
meta_list.append("<li><span class='meta-info'>Pending - </span> #{@num_pending}</li>")
meta_list.append("<li><span class='meta-info'>Available - </span> #{@num_pending}</li>")
meta_list.append("<li><span class='meta-info'>Graded - </span> #{@num_graded}</li>")
meta_list.append("<li><span class='meta-info'>Needed for ML - </span> #{Math.max(@min_for_ml - @num_graded, 0)}</li>")
@problem_meta_info.html(meta_list)
......@@ -361,8 +429,6 @@ class StaffGrading
@prompt_container.html(@prompt)
@prompt_name_container.html("#{@prompt_name}")
@submission_container.html(@make_paragraphs(@submission))
@rubric_container.html(@rubric)
# no submit button until user picks grade.
show_submit_button = false
show_action_button = false
......
......@@ -24,15 +24,33 @@ div.peer-grading{
color: white;
}
input[name='score-selection'] {
input[name='score-selection'],
input[name='grade-selection'] {
display: none;
}
ul
.problem-list
{
li
text-align: center;
table-layout: auto;
width:100%;
th
{
padding: 10px;
}
td
{
padding:10px;
}
td.problem-name
{
text-align:left;
}
.ui-progressbar
{
margin: 16px 0px;
height:1em;
margin:0px;
padding:0px;
}
}
......@@ -106,6 +124,7 @@ div.peer-grading{
margin: 0px;
background: #eee;
height: 10em;
width:47.6%;
h3
{
text-align:center;
......@@ -120,12 +139,10 @@ div.peer-grading{
.calibration-panel
{
float:left;
width:48%;
}
.grading-panel
{
float:right;
width: 48%;
}
.current-state
{
......@@ -159,5 +176,49 @@ div.peer-grading{
}
}
padding: 40px;
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px 25px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:2px;
position: relative;
padding: 15px 15px 25px 15px;
width: 150px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: .9em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
<section id="combined-open-ended" class="combined-open-ended" data-ajax-url="${ajax_url}" data-allow_reset="${allow_reset}" data-state="${state}" data-task-count="${task_count}" data-task-number="${task_number}">
<h2>${display_name}</h2>
<div class="status-container">
<h4>Status</h4><br/>
${status | n}
</div>
<div class="item-container">
<h4>Problem</h4><br/>
<h4>Problem</h4>
<div class="problem-container">
% for item in items:
<div class="item">${item['content'] | n}</div>
% endfor
</div>
<input type="button" value="Reset" class="reset-button" name="reset"/>
<input type="button" value="Next Step" class="next-step-button" name="reset"/>
</div>
<a name="results" />
<div class="result-container">
</div>
</section>
......
<div class="result-container">
<h4>Results from Step ${task_number}</h4><br/>
<h4>Results from Step ${task_number}</h4>
${results | n}
</div>
%if status_list[0]['state'] != 'initial':
<h4>Status</h4>
<div class="status-elements">
<section id="combined-open-ended-status" class="combined-open-ended-status">
%for i in xrange(0,len(status_list)):
<%status=status_list[i]%>
%if i==len(status_list)-1:
<div class="statusitem-current" data-status-number="${i}">
<div class="statusitem statusitem-current" data-status-number="${i}">
%else:
<div class="statusitem" data-status-number="${i}">
%endif
......@@ -20,9 +23,12 @@
%if status['type']=="openended" and status['state'] in ['done', 'post_assessment']:
<div class="show-results">
<a href="#" class="show-results-button">Show results from step ${status['task_number']}</a>
<a href="#results" class="show-results-button">Show results from Step ${status['task_number']}</a>
</div>
%endif
</div>
%endfor
</section>
</div>
%endif
......@@ -33,8 +33,8 @@
</div>
<h2>Problem List</h2>
<ul class="problem-list">
</ul>
<table class="problem-list">
</table>
</section>
<!-- Grading View -->
......@@ -54,11 +54,6 @@
<div class="prompt-container">
</div>
</div>
<div class="rubric-wrapper">
<h3>Grading Rubric</h3>
<div class="rubric-container">
</div>
</div>
</section>
......@@ -78,6 +73,8 @@
<div class="evaluation">
<p class="score-selection-container">
</p>
<p class="grade-selection-container">
</p>
<textarea name="feedback" placeholder="Feedback for student (optional)"
class="feedback-area" cols="70" ></textarea>
</div>
......
......@@ -10,11 +10,11 @@
% if state == 'initial':
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
% elif state in ['done', 'post_assessment'] and correct == 'correct':
<span class="correct" id="status_${id}">Correct</span>
<span class="correct" id="status_${id}"></span> <p>Correct</p>
% elif state in ['done', 'post_assessment'] and correct == 'incorrect':
<span class="incorrect" id="status_${id}">Incorrect</span>
<span class="incorrect" id="status_${id}"></span> <p>Incorrect. </p>
% elif state == 'assessing':
<span class="grading" id="status_${id}">Submitted for grading</span>
<span class="grading" id="status_${id}">Submitted for grading.</span>
% endif
% if hidden:
......
<section>
<header>Feedback</header>
<div class="shortform">
<div class="shortform-custom" data-open-text='Show detailed results' data-close-text='Hide detailed results'>
<div class="result-output">
<p>Score: ${score}</p>
% if grader_type == "ML":
......
<table class="rubric">
% for i in range(len(rubric_categories)):
<% category = rubric_categories[i] %>
<tr>
<th>
${category['description']}
% if category['has_score'] == True:
(Your score: ${category['score']})
<form class="rubric-template" id="inputtype_${id}">
<h3>Rubric</h3>
% if view_only and has_score:
<p>This is the rubric that was used to grade your submission. The highlighted selection matches how the grader feels you performed in each category.</p>
% elif view_only:
<p>Use the below rubric to rate this submission.</p>
% else:
<p>Select the criteria you feel best represents this submission in each category.</p>
% endif
</th>
<table class="rubric">
% for i in range(len(categories)):
<% category = categories[i] %>
<tr>
<th>${category['description']}</th>
% for j in range(len(category['options'])):
<% option = category['options'][j] %>
<td>
% if view_only:
## if this is the selected rubric block, show it highlighted
% if option['selected']:
<div class="view-only selected-grade">
% else:
<div class="view-only">
${option['text']}
% if option.has_key('selected'):
% if option['selected'] == True:
<div class="selected-grade">[${option['points']} points]</div>
%else:
<div class="grade">[${option['points']} points]</div>
% endif
% else:
${option['text']}
<div class="grade">[${option['points']} points]</div>
%endif
</div>
% else:
<input type="radio" class="score-selection" name="score-selection-${i}" id="score-${i}-${j}" value="${option['points']}"/>
<label for="score-${i}-${j}">${option['text']}</label>
% endif
</td>
% endfor
</tr>
% endfor
</table>
\ No newline at end of file
</table>
</form>
......@@ -26,13 +26,37 @@
Nothing to grade!
</div>
%else:
<ul class="problem-list">
<div class="problem-list-container">
<table class="problem-list">
<tr>
<th>Problem Name</th>
<th>Graded</th>
<th>Available</th>
<th>Required</th>
<th>Progress</th>
</tr>
%for problem in problem_list:
<li>
<a href="${ajax_url}problem?location=${problem['location']}">${problem['problem_name']} (${problem['num_graded']} graded, ${problem['num_pending']} pending, required to grade ${problem['num_required']} more)</a>
</li>
<tr data-graded="${problem['num_graded']}" data-required="${problem['num_required']}">
<td class="problem-name">
<a href="${ajax_url}problem?location=${problem['location']}">${problem['problem_name']}</a>
</td>
<td>
${problem['num_graded']}
</td>
<td>
${problem['num_pending']}
</td>
<td>
${problem['num_required']}
</td>
<td>
<div class="progress-bar">
</div>
</td>
</tr>
%endfor
</ul>
</table>
</div>
%endif
%endif
</div>
......
......@@ -44,20 +44,13 @@
</div>
<div class="prompt-wrapper">
<div class="prompt-information-container collapsible">
<header><a href="javascript:void(0)">Question</a></header>
<h2>Question</h2>
<div class="prompt-information-container">
<section>
<div class="prompt-container">
</div>
</section>
</div>
<div class="rubric-wrapper collapsible">
<header><a href="javascript:void(0)">Rubric</a></header>
<section>
<div class="rubric-container">
</div>
</section>
</div>
</div>
......@@ -74,6 +67,7 @@
<input type="hidden" name="essay-id" value="" />
</div>
<div class="evaluation">
<p class="rubric-selection-container"></p>
<p class="score-selection-container">
</p>
<textarea name="feedback" placeholder="Feedback for student (optional)"
......
<div class="assessment">
<div class="assessment-container">
<div class="rubric">
<h3>Self-assess your answer with this rubric:</h3>
${rubric | n }
</div>
% if not read_only:
<select name="assessment" class="assessment">
<div class="scoring-container">
<h3>Scoring</h3>
<p>Please select a score below:</p>
<div class="grade-selection">
%for i in xrange(0,max_score+1):
<option value="${i}">${i}</option>
<% id = "score-{0}".format(i) %>
<input type="radio" class="grade-selection" name="grade-selection" value="${i}" id="${id}">
<label for="${id}">${i}</label>
%endfor
</select>
</div>
</div>
% endif
</div>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment