Commit 9db810a4 by Will Daly

Merge pull request #469 from edx/will/authoring/Rubric-GUI

Will/authoring/rubric gui
parents 061a4d93 29946f2e
......@@ -13,7 +13,6 @@ describe("OpenAssessment.StudioView", function() {
this.loadError = false;
this.updateError = false;
this.promptBox = "";
this.rubricXmlBox = "";
this.titleField = "";
this.submissionStartField = "";
this.submissionDueField = "";
......@@ -36,13 +35,33 @@ describe("OpenAssessment.StudioView", function() {
this.isReleased = false;
this.rubric = {
prompt: 'This is the feedback prompt',
criteria: [
{
order_num: 0,
name: 'This is the criterion name',
prompt: 'this is the criterion prompt',
feedback: 'disabled',
options: [
{
order_num: 0,
name: 'Did real bad',
points: 0,
explanation: 'Showed as little effort as I did making this test case interesting.'
}
]
}
]
};
this.errorPromise = $.Deferred(function(defer) {
defer.rejectWith(this, ['Test error']);
}).promise();
this.loadEditorContext = function() {
var prompt = this.promptBox;
var rubric = this.rubricXmlBox;
var rubric = this.rubric;
var title = this.titleField;
var submission_start = this.submissionStartField;
var submission_due = this.submissionDueField;
......@@ -86,10 +105,10 @@ describe("OpenAssessment.StudioView", function() {
}
};
this.updateEditorContext = function(prompt, rubricXml, title, sub_start, sub_due, assessments) {
this.updateEditorContext = function(prompt, rubric, title, sub_start, sub_due, assessments) {
if (!this.updateError) {
this.promptBox = prompt;
this.rubricXmlBox = rubricXml;
this.rubric = rubric;
this.titleField = title;
this.submissionStartField = sub_start;
this.submissionDueField = sub_due;
......@@ -140,15 +159,32 @@ describe("OpenAssessment.StudioView", function() {
var view = null;
var prompt = "How much do you like waffles?";
var rubric =
"<rubric>" +
"<criterion>"+
"<name>Proper Appreciation of Gravity</name>"+
"<prompt>How much respect did the person give waffles?</prompt>"+
"<option points=\"0\"><name>No</name><explanation>Not enough</explanation></option>"+
"<option points=\"2\"><name>Yes</name><explanation>An appropriate Amount</explanation></option>"+
"</criterion>"+
"</rubric>";
var rubric = {
criteria: [
{
order_num: 0,
name: "Proper appreciation of Gravity",
prompt: "How much respect did the person give waffles?",
feedback: "disabled",
options: [
{
order_num: 0,
points: 0,
name: "No",
explanation: "Not enough"
},
{
order_num: 1,
points: 2,
name: "Yes",
explanation: "An appropriate Amount"
}
]
}
]
};
var title = "The most important of all questions.";
var subStart = "";
var subDue = "2014-10-1T10:00:00";
......@@ -207,16 +243,14 @@ describe("OpenAssessment.StudioView", function() {
view.load();
// Expect that the XML definition(s) were loaded
var rubric = view.rubricXmlBox.getValue();
var prompt = view.promptBox.value;
var prompt = view.settingsFieldSelectors.promptBox.prop('value');
expect(prompt).toEqual('');
expect(rubric).toEqual('');
});
it("saves the Editor Context definition", function() {
// Update the Context
view.titleField.value = 'THIS IS THE NEW TITLE';
view.settingsFieldSelectors.titleField.prop('value', 'THIS IS THE NEW TITLE');
// Save the updated editor definition
view.save();
......@@ -249,25 +283,24 @@ describe("OpenAssessment.StudioView", function() {
server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments);
view.load();
expect(view.promptBox.value).toEqual(prompt);
expect(view.rubricXmlBox.getValue()).toEqual(rubric);
expect(view.titleField.value).toEqual(title);
expect(view.submissionStartField.value).toEqual(subStart);
expect(view.submissionDueField.value).toEqual(subDue);
expect(view.hasPeer.prop('checked')).toEqual(true);
expect(view.hasSelf.prop('checked')).toEqual(true);
expect(view.hasAI.prop('checked')).toEqual(false);
expect(view.hasTraining.prop('checked')).toEqual(true);
expect(view.peerMustGrade.prop('value')).toEqual('5');
expect(view.peerGradedBy.prop('value')).toEqual('3');
expect(view.peerDue.prop('value')).toEqual("");
expect(view.selfStart.prop('value')).toEqual("");
expect(view.selfDue.prop('value')).toEqual("");
expect(view.settingsFieldSelectors.promptBox.prop('value')).toEqual(prompt);
expect(view.settingsFieldSelectors.titleField.prop('value')).toEqual(title);
expect(view.settingsFieldSelectors.submissionStartField.prop('value')).toEqual(subStart);
expect(view.settingsFieldSelectors.submissionDueField.prop('value')).toEqual(subDue);
expect(view.settingsFieldSelectors.hasPeer.prop('checked')).toEqual(true);
expect(view.settingsFieldSelectors.hasSelf.prop('checked')).toEqual(true);
expect(view.settingsFieldSelectors.hasAI.prop('checked')).toEqual(false);
expect(view.settingsFieldSelectors.hasTraining.prop('checked')).toEqual(true);
expect(view.settingsFieldSelectors.peerMustGrade.prop('value')).toEqual('5');
expect(view.settingsFieldSelectors.peerGradedBy.prop('value')).toEqual('3');
expect(view.settingsFieldSelectors.peerDue.prop('value')).toEqual("");
expect(view.settingsFieldSelectors.selfStart.prop('value')).toEqual("");
expect(view.settingsFieldSelectors.selfDue.prop('value')).toEqual("");
expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual("");
expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples);
expect(view.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
expect(view.settingsFieldSelectors.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
view.titleField.value = "This is the new title.";
view.settingsFieldSelectors.titleField.prop('value', "This is the new title.");
view.updateEditorContext();
expect(server.titleField).toEqual("This is the new title.");
......
......@@ -438,11 +438,11 @@ OpenAssessment.Server.prototype = {
function(err) { console.log(err); }
);
**/
updateEditorContext: function(prompt, rubricXml, title, sub_start, sub_due, assessments) {
updateEditorContext: function(prompt, rubric, title, sub_start, sub_due, assessments) {
var url = this.url('update_editor_context');
var payload = JSON.stringify({
'prompt': prompt,
'rubric': rubricXml,
'rubric': rubric,
'title': title,
'submission_start': sub_start,
'submission_due': sub_due,
......
......@@ -69,7 +69,7 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Error updating XBlock configuration')}
try:
rubric = xml.parse_rubric_xml_str(data["rubric"])
rubric = verify_rubric_format(data['rubric'])
submission_due = xml.parse_date(data["submission_due"], name="submission due date")
submission_start = xml.parse_date(data["submission_start"], name="submission start date")
assessments = parse_assessment_dictionaries(data["assessments"])
......@@ -82,8 +82,8 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update(
rubric['criteria'],
rubric['feedbackprompt'],
rubric.get('criteria', []),
rubric.get('feedbackprompt', None),
assessments,
submission_due,
submission_start,
......@@ -112,12 +112,9 @@ class StudioMixin(object):
"""
try:
rubric = xml.serialize_rubric_to_xml_str(self)
# Copies the rubric assessments so that we can change student training examples from dict -> str without
# negatively modifying the openassessmentblock definition.
assessment_list = copy.deepcopy(self.rubric_assessments)
# Finds the student training dictionary, if it exists, and replaces the examples with their XML definition
student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"]
if student_training_dictionary:
......@@ -127,9 +124,9 @@ class StudioMixin(object):
student_training_dictionary["examples"] = examples
# We do not expect serialization to raise an exception, but if it does, handle it gracefully.
except Exception as ex:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex)
logger.error(msg)
except:
logger.exception("An error occurred while serializing the XBlock")
msg = _('An unexpected error occurred while loading the problem')
return {'success': False, 'msg': msg, 'xml': u''}
# Populates the context for the assessments section of the editing
......@@ -137,13 +134,17 @@ class StudioMixin(object):
# section.
submission_due = self.submission_due if self.submission_due else ''
submission_start = self.submission_start if self.submission_start else ''
rubric_dict = {
'criteria' : self.rubric_criteria,
'feedbackprompt': unicode(self.rubric_feedback_prompt)
}
return {
'success': True,
'msg': '',
'rubric': rubric,
'rubric': rubric_dict,
'prompt': self.prompt,
'submission_due': submission_due,
'submission_start': submission_start,
......@@ -241,3 +242,133 @@ def parse_assessment_dictionaries(input_assessments):
assessments_list.append(assessment_dict)
return assessments_list
def verify_rubric_format(rubric):
"""
Verifies that the rubric that was passed in follows the conventions that we expect, including
types and structure.
Args:
rubric (dict): Unsanitized version of our rubric. Usually taken from the GUI.
Returns:
rubric (dict): Sanitized version of the same form.
Raises:
UpdateFromXMLError
"""
if not isinstance(rubric, dict):
raise UpdateFromXmlError(_("The given rubric was not a dictionary of the form {criteria: [criteria1, criteria2...]}"))
if "criteria" not in rubric.keys():
raise UpdateFromXmlError(_("The given rubric did not contain a key for a list of criteria, and is invalid"))
if rubric.get('prompt', False):
if not isinstance(rubric['prompt'], basestring):
raise UpdateFromXmlError(_("The given rubric's feedback prompt was invalid, it must be a string."))
criteria = rubric["criteria"]
if not isinstance(criteria, list):
raise UpdateFromXmlError(_("The criteria term in the rubric dictionary corresponds to a non-list object."))
sanitized_criteria = []
for criterion in criteria:
if not isinstance(criterion, dict):
raise UpdateFromXmlError(_("A criterion given was not a dictionary."))
criterion = dict(criterion)
expected_keys = {'order_num', 'name', 'prompt', 'options', 'feedback'}
missing_keys = expected_keys - set(criterion.keys())
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the definition of one or more criteria: {}".format(", ".join(missing_keys))))
try:
name = unicode(criterion['name'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The name value must be a string."))
try:
prompt = unicode(criterion['prompt'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The prompt value must be a string."))
try:
feedback = unicode(criterion['feedback'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The prompt value must be a string."))
try:
order_num = int(criterion['order_num'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The order_num value must be an integer."))
if not isinstance(criterion['options'], list):
raise UpdateFromXmlError(_("The dictionary entry for 'options' in a criteria's dictionary definition must be a list."))
options = criterion['options']
sanitized_options = []
for option in options:
if not isinstance(option, dict):
raise UpdateFromXmlError(_("An option given was not a dictionary."))
expected_keys = {'order_num', 'name', 'points', 'explanation'}
unexpected_keys = list(set(option.keys()) - expected_keys)
missing_keys = list(expected_keys - set(option.keys()))
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the definition of one or more options: {}".format(", ".join(missing_keys))))
try:
option_name = unicode(option['name'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option names values must be strings."))
try:
option_explanation = unicode(option['explanation'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option explanation values must be strings."))
try:
option_points = int(option['points'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option point values must be integers."))
option_dict = {
"order_num": option['order_num'],
"name": option_name,
"explanation": option_explanation,
"points": option_points
}
sanitized_options.append(option_dict)
criterion_dict = {
"order_num": order_num,
"name": name,
"prompt": prompt,
"options": sanitized_options,
"feedback": feedback
}
sanitized_criteria.append(criterion_dict)
sanitized_rubric = {
'criteria': sanitized_criteria
}
if rubric.get('prompt'):
try:
sanitized_rubric['prompt'] = unicode(rubric.get('prompt'))
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All prompt values must be strings."))
return sanitized_rubric
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<!-- no options -->",
"</criterion>",
"</rubric>"
],
"missing_feedback": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
}
]
}
]
},
"prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
......
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "required"
}
]
},
"prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
......@@ -32,5 +46,54 @@
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt"
},
"unicode": {
"rubric": {
"prompt": "Ṫëṡẗ ṗṛöṁṗẗ",
"criteria": [
{
"order_num": 0,
"name": "Ṫëṡẗ ċṛïẗëïṛöṅ",
"prompt": "Téśt ćŕítéíŕőń ṕŕőḿṕt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Ṅö",
"explanation": "Ńő éxṕĺáńátíőń"
},
{
"order_num": 1,
"points": 2,
"name": "sǝʎ",
"explanation": "Чэѕ эхрlаиатіои"
}
],
"feedback": "required"
}
]
},
"prompt": "Ṁÿ ṅëẅ ṗṛöṁṗẗ.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
"title": "ɯʎ uǝʍ ʇıʇןǝ",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": "",
"due": "4014-03-10T00:00:00"
},
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Ṫëṡẗ ċṛïẗëṛïöṅ ṗṛöṁṗẗ"
}
}
......@@ -5,10 +5,8 @@ View-level tests for Studio view of OpenAssessment XBlock.
import json
import datetime as dt
import lxml.etree as etree
import mock
import pytz
from ddt import ddt, data, file_data
from openassessment.xblock.xml import UpdateFromXmlError
from ddt import ddt, file_data
from .base import scenario, XBlockHandlerTestCase
......@@ -29,9 +27,9 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
# Verify that the Rubric XML is parse-able and the root is <rubric>
rubric = etree.fromstring(resp['rubric'])
self.assertEqual(rubric.tag, 'rubric')
# Verify that the Rubric has criteria, and that they are a list of dictionaries
self.assertTrue(isinstance(resp['rubric']['criteria'], list))
self.assertTrue(isinstance(resp['rubric']['criteria'][0], dict))
# Verify that every assessment in the list of assessments has a name.
for assessment_dict in resp['assessments']:
......@@ -40,63 +38,17 @@ class StudioViewTest(XBlockHandlerTestCase):
examples = etree.fromstring(assessment_dict['examples'])
self.assertEqual(examples.tag, 'examples')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str')
@scenario('data/basic_scenario.xml')
def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock
mock_rubric_serializer.side_effect = UpdateFromXmlError('Test error!')
# Check that we get a failure message
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xblock(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
def test_update_context(self, xblock, data):
xblock.published_date = None
# Test that we can update the xblock with the expected configuration.
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
print "ERROR IS {}".format(resp['msg'])
self.assertTrue(resp['success'])
self.assertIn('success', resp['msg'].lower())
# Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, data['title'])
self.assertEqual(xblock.prompt, data['prompt'])
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_context_post_release(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
# XBlock start date defaults to already open,
# so we should get an error when trying to update anything that change the number of points
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg'))
@file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_context_invalid_request_data(self, xblock, data):
# First, parse XML data into a single string.
if 'rubric' in data:
data['rubric'] = "".join(data['rubric'])
xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertFalse(resp['success'])
self.assertIn(data['expected_error'], resp['msg'].lower())
......@@ -104,9 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
request = json.dumps(data)
# Store old XBlock fields for later verification
......@@ -118,7 +67,7 @@ class StudioViewTest(XBlockHandlerTestCase):
# Verify the response fails
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("not valid", resp['msg'].lower())
self.assertIn("the following keys were missing", resp['msg'].lower())
# Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests
......@@ -128,7 +77,6 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertItemsEqual(xblock.rubric_assessments, old_assessments)
self.assertItemsEqual(xblock.rubric_criteria, old_criteria)
@scenario('data/basic_scenario.xml')
def test_check_released(self, xblock):
# By default, the problem should be released
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment