Commit 9db810a4 by Will Daly

Merge pull request #469 from edx/will/authoring/Rubric-GUI

Will/authoring/rubric gui
parents 061a4d93 29946f2e
...@@ -13,7 +13,6 @@ describe("OpenAssessment.StudioView", function() { ...@@ -13,7 +13,6 @@ describe("OpenAssessment.StudioView", function() {
this.loadError = false; this.loadError = false;
this.updateError = false; this.updateError = false;
this.promptBox = ""; this.promptBox = "";
this.rubricXmlBox = "";
this.titleField = ""; this.titleField = "";
this.submissionStartField = ""; this.submissionStartField = "";
this.submissionDueField = ""; this.submissionDueField = "";
...@@ -36,13 +35,33 @@ describe("OpenAssessment.StudioView", function() { ...@@ -36,13 +35,33 @@ describe("OpenAssessment.StudioView", function() {
this.isReleased = false; this.isReleased = false;
this.rubric = {
prompt: 'This is the feedback prompt',
criteria: [
{
order_num: 0,
name: 'This is the criterion name',
prompt: 'this is the criterion prompt',
feedback: 'disabled',
options: [
{
order_num: 0,
name: 'Did real bad',
points: 0,
explanation: 'Showed as little effort as I did making this test case interesting.'
}
]
}
]
};
this.errorPromise = $.Deferred(function(defer) { this.errorPromise = $.Deferred(function(defer) {
defer.rejectWith(this, ['Test error']); defer.rejectWith(this, ['Test error']);
}).promise(); }).promise();
this.loadEditorContext = function() { this.loadEditorContext = function() {
var prompt = this.promptBox; var prompt = this.promptBox;
var rubric = this.rubricXmlBox; var rubric = this.rubric;
var title = this.titleField; var title = this.titleField;
var submission_start = this.submissionStartField; var submission_start = this.submissionStartField;
var submission_due = this.submissionDueField; var submission_due = this.submissionDueField;
...@@ -86,10 +105,10 @@ describe("OpenAssessment.StudioView", function() { ...@@ -86,10 +105,10 @@ describe("OpenAssessment.StudioView", function() {
} }
}; };
this.updateEditorContext = function(prompt, rubricXml, title, sub_start, sub_due, assessments) { this.updateEditorContext = function(prompt, rubric, title, sub_start, sub_due, assessments) {
if (!this.updateError) { if (!this.updateError) {
this.promptBox = prompt; this.promptBox = prompt;
this.rubricXmlBox = rubricXml; this.rubric = rubric;
this.titleField = title; this.titleField = title;
this.submissionStartField = sub_start; this.submissionStartField = sub_start;
this.submissionDueField = sub_due; this.submissionDueField = sub_due;
...@@ -140,15 +159,32 @@ describe("OpenAssessment.StudioView", function() { ...@@ -140,15 +159,32 @@ describe("OpenAssessment.StudioView", function() {
var view = null; var view = null;
var prompt = "How much do you like waffles?"; var prompt = "How much do you like waffles?";
var rubric = var rubric = {
"<rubric>" + criteria: [
"<criterion>"+ {
"<name>Proper Appreciation of Gravity</name>"+ order_num: 0,
"<prompt>How much respect did the person give waffles?</prompt>"+ name: "Proper appreciation of Gravity",
"<option points=\"0\"><name>No</name><explanation>Not enough</explanation></option>"+ prompt: "How much respect did the person give waffles?",
"<option points=\"2\"><name>Yes</name><explanation>An appropriate Amount</explanation></option>"+ feedback: "disabled",
"</criterion>"+ options: [
"</rubric>"; {
order_num: 0,
points: 0,
name: "No",
explanation: "Not enough"
},
{
order_num: 1,
points: 2,
name: "Yes",
explanation: "An appropriate Amount"
}
]
}
]
};
var title = "The most important of all questions."; var title = "The most important of all questions.";
var subStart = ""; var subStart = "";
var subDue = "2014-10-1T10:00:00"; var subDue = "2014-10-1T10:00:00";
...@@ -207,16 +243,14 @@ describe("OpenAssessment.StudioView", function() { ...@@ -207,16 +243,14 @@ describe("OpenAssessment.StudioView", function() {
view.load(); view.load();
// Expect that the XML definition(s) were loaded // Expect that the XML definition(s) were loaded
var rubric = view.rubricXmlBox.getValue(); var prompt = view.settingsFieldSelectors.promptBox.prop('value');
var prompt = view.promptBox.value;
expect(prompt).toEqual(''); expect(prompt).toEqual('');
expect(rubric).toEqual('');
}); });
it("saves the Editor Context definition", function() { it("saves the Editor Context definition", function() {
// Update the Context // Update the Context
view.titleField.value = 'THIS IS THE NEW TITLE'; view.settingsFieldSelectors.titleField.prop('value', 'THIS IS THE NEW TITLE');
// Save the updated editor definition // Save the updated editor definition
view.save(); view.save();
...@@ -249,25 +283,24 @@ describe("OpenAssessment.StudioView", function() { ...@@ -249,25 +283,24 @@ describe("OpenAssessment.StudioView", function() {
server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments); server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments);
view.load(); view.load();
expect(view.promptBox.value).toEqual(prompt); expect(view.settingsFieldSelectors.promptBox.prop('value')).toEqual(prompt);
expect(view.rubricXmlBox.getValue()).toEqual(rubric); expect(view.settingsFieldSelectors.titleField.prop('value')).toEqual(title);
expect(view.titleField.value).toEqual(title); expect(view.settingsFieldSelectors.submissionStartField.prop('value')).toEqual(subStart);
expect(view.submissionStartField.value).toEqual(subStart); expect(view.settingsFieldSelectors.submissionDueField.prop('value')).toEqual(subDue);
expect(view.submissionDueField.value).toEqual(subDue); expect(view.settingsFieldSelectors.hasPeer.prop('checked')).toEqual(true);
expect(view.hasPeer.prop('checked')).toEqual(true); expect(view.settingsFieldSelectors.hasSelf.prop('checked')).toEqual(true);
expect(view.hasSelf.prop('checked')).toEqual(true); expect(view.settingsFieldSelectors.hasAI.prop('checked')).toEqual(false);
expect(view.hasAI.prop('checked')).toEqual(false); expect(view.settingsFieldSelectors.hasTraining.prop('checked')).toEqual(true);
expect(view.hasTraining.prop('checked')).toEqual(true); expect(view.settingsFieldSelectors.peerMustGrade.prop('value')).toEqual('5');
expect(view.peerMustGrade.prop('value')).toEqual('5'); expect(view.settingsFieldSelectors.peerGradedBy.prop('value')).toEqual('3');
expect(view.peerGradedBy.prop('value')).toEqual('3'); expect(view.settingsFieldSelectors.peerDue.prop('value')).toEqual("");
expect(view.peerDue.prop('value')).toEqual(""); expect(view.settingsFieldSelectors.selfStart.prop('value')).toEqual("");
expect(view.selfStart.prop('value')).toEqual(""); expect(view.settingsFieldSelectors.selfDue.prop('value')).toEqual("");
expect(view.selfDue.prop('value')).toEqual("");
expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual(""); expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual("");
expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples); expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples);
expect(view.peerStart.prop('value')).toEqual("2014-10-04T00:00:00"); expect(view.settingsFieldSelectors.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
view.titleField.value = "This is the new title."; view.settingsFieldSelectors.titleField.prop('value', "This is the new title.");
view.updateEditorContext(); view.updateEditorContext();
expect(server.titleField).toEqual("This is the new title."); expect(server.titleField).toEqual("This is the new title.");
......
...@@ -438,11 +438,11 @@ OpenAssessment.Server.prototype = { ...@@ -438,11 +438,11 @@ OpenAssessment.Server.prototype = {
function(err) { console.log(err); } function(err) { console.log(err); }
); );
**/ **/
updateEditorContext: function(prompt, rubricXml, title, sub_start, sub_due, assessments) { updateEditorContext: function(prompt, rubric, title, sub_start, sub_due, assessments) {
var url = this.url('update_editor_context'); var url = this.url('update_editor_context');
var payload = JSON.stringify({ var payload = JSON.stringify({
'prompt': prompt, 'prompt': prompt,
'rubric': rubricXml, 'rubric': rubric,
'title': title, 'title': title,
'submission_start': sub_start, 'submission_start': sub_start,
'submission_due': sub_due, 'submission_due': sub_due,
......
...@@ -69,7 +69,7 @@ class StudioMixin(object): ...@@ -69,7 +69,7 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Error updating XBlock configuration')} return {'success': False, 'msg': _('Error updating XBlock configuration')}
try: try:
rubric = xml.parse_rubric_xml_str(data["rubric"]) rubric = verify_rubric_format(data['rubric'])
submission_due = xml.parse_date(data["submission_due"], name="submission due date") submission_due = xml.parse_date(data["submission_due"], name="submission due date")
submission_start = xml.parse_date(data["submission_start"], name="submission start date") submission_start = xml.parse_date(data["submission_start"], name="submission start date")
assessments = parse_assessment_dictionaries(data["assessments"]) assessments = parse_assessment_dictionaries(data["assessments"])
...@@ -82,8 +82,8 @@ class StudioMixin(object): ...@@ -82,8 +82,8 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)} return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update( self.update(
rubric['criteria'], rubric.get('criteria', []),
rubric['feedbackprompt'], rubric.get('feedbackprompt', None),
assessments, assessments,
submission_due, submission_due,
submission_start, submission_start,
...@@ -112,12 +112,9 @@ class StudioMixin(object): ...@@ -112,12 +112,9 @@ class StudioMixin(object):
""" """
try: try:
rubric = xml.serialize_rubric_to_xml_str(self)
# Copies the rubric assessments so that we can change student training examples from dict -> str without # Copies the rubric assessments so that we can change student training examples from dict -> str without
# negatively modifying the openassessmentblock definition. # negatively modifying the openassessmentblock definition.
assessment_list = copy.deepcopy(self.rubric_assessments) assessment_list = copy.deepcopy(self.rubric_assessments)
# Finds the student training dictionary, if it exists, and replaces the examples with their XML definition # Finds the student training dictionary, if it exists, and replaces the examples with their XML definition
student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"] student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"]
if student_training_dictionary: if student_training_dictionary:
...@@ -127,9 +124,9 @@ class StudioMixin(object): ...@@ -127,9 +124,9 @@ class StudioMixin(object):
student_training_dictionary["examples"] = examples student_training_dictionary["examples"] = examples
# We do not expect serialization to raise an exception, but if it does, handle it gracefully. # We do not expect serialization to raise an exception, but if it does, handle it gracefully.
except Exception as ex: except:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex) logger.exception("An error occurred while serializing the XBlock")
logger.error(msg) msg = _('An unexpected error occurred while loading the problem')
return {'success': False, 'msg': msg, 'xml': u''} return {'success': False, 'msg': msg, 'xml': u''}
# Populates the context for the assessments section of the editing # Populates the context for the assessments section of the editing
...@@ -137,13 +134,17 @@ class StudioMixin(object): ...@@ -137,13 +134,17 @@ class StudioMixin(object):
# section. # section.
submission_due = self.submission_due if self.submission_due else '' submission_due = self.submission_due if self.submission_due else ''
submission_start = self.submission_start if self.submission_start else '' submission_start = self.submission_start if self.submission_start else ''
rubric_dict = {
'criteria' : self.rubric_criteria,
'feedbackprompt': unicode(self.rubric_feedback_prompt)
}
return { return {
'success': True, 'success': True,
'msg': '', 'msg': '',
'rubric': rubric, 'rubric': rubric_dict,
'prompt': self.prompt, 'prompt': self.prompt,
'submission_due': submission_due, 'submission_due': submission_due,
'submission_start': submission_start, 'submission_start': submission_start,
...@@ -240,4 +241,134 @@ def parse_assessment_dictionaries(input_assessments): ...@@ -240,4 +241,134 @@ def parse_assessment_dictionaries(input_assessments):
# Update the list of assessments # Update the list of assessments
assessments_list.append(assessment_dict) assessments_list.append(assessment_dict)
return assessments_list return assessments_list
\ No newline at end of file
def verify_rubric_format(rubric):
"""
Verifies that the rubric that was passed in follows the conventions that we expect, including
types and structure.
Args:
rubric (dict): Unsanitized version of our rubric. Usually taken from the GUI.
Returns:
rubric (dict): Sanitized version of the same form.
Raises:
UpdateFromXMLError
"""
if not isinstance(rubric, dict):
raise UpdateFromXmlError(_("The given rubric was not a dictionary of the form {criteria: [criteria1, criteria2...]}"))
if "criteria" not in rubric.keys():
raise UpdateFromXmlError(_("The given rubric did not contain a key for a list of criteria, and is invalid"))
if rubric.get('prompt', False):
if not isinstance(rubric['prompt'], basestring):
raise UpdateFromXmlError(_("The given rubric's feedback prompt was invalid, it must be a string."))
criteria = rubric["criteria"]
if not isinstance(criteria, list):
raise UpdateFromXmlError(_("The criteria term in the rubric dictionary corresponds to a non-list object."))
sanitized_criteria = []
for criterion in criteria:
if not isinstance(criterion, dict):
raise UpdateFromXmlError(_("A criterion given was not a dictionary."))
criterion = dict(criterion)
expected_keys = {'order_num', 'name', 'prompt', 'options', 'feedback'}
missing_keys = expected_keys - set(criterion.keys())
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the definition of one or more criteria: {}".format(", ".join(missing_keys))))
try:
name = unicode(criterion['name'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The name value must be a string."))
try:
prompt = unicode(criterion['prompt'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The prompt value must be a string."))
try:
feedback = unicode(criterion['feedback'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The prompt value must be a string."))
try:
order_num = int(criterion['order_num'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The order_num value must be an integer."))
if not isinstance(criterion['options'], list):
raise UpdateFromXmlError(_("The dictionary entry for 'options' in a criteria's dictionary definition must be a list."))
options = criterion['options']
sanitized_options = []
for option in options:
if not isinstance(option, dict):
raise UpdateFromXmlError(_("An option given was not a dictionary."))
expected_keys = {'order_num', 'name', 'points', 'explanation'}
unexpected_keys = list(set(option.keys()) - expected_keys)
missing_keys = list(expected_keys - set(option.keys()))
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the definition of one or more options: {}".format(", ".join(missing_keys))))
try:
option_name = unicode(option['name'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option names values must be strings."))
try:
option_explanation = unicode(option['explanation'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option explanation values must be strings."))
try:
option_points = int(option['points'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option point values must be integers."))
option_dict = {
"order_num": option['order_num'],
"name": option_name,
"explanation": option_explanation,
"points": option_points
}
sanitized_options.append(option_dict)
criterion_dict = {
"order_num": order_num,
"name": name,
"prompt": prompt,
"options": sanitized_options,
"feedback": feedback
}
sanitized_criteria.append(criterion_dict)
sanitized_rubric = {
'criteria': sanitized_criteria
}
if rubric.get('prompt'):
try:
sanitized_rubric['prompt'] = unicode(rubric.get('prompt'))
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All prompt values must be strings."))
return sanitized_rubric
{ {
"simple": { "missing_feedback": {
"rubric": [ "rubric": {
"<rubric>", "prompt": "Test Prompt",
"<prompt>Test prompt</prompt>", "criteria": [
"<criterion>", {
"<name>Test criterion</name>", "order_num": 0,
"<prompt>Test criterion prompt</prompt>", "name": "Test criterion",
"<!-- no options -->", "prompt": "Test criterion prompt",
"</criterion>", "options": [
"</rubric>" {
], "order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
}
]
}
]
},
"prompt": "My new prompt.", "prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28", "submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28", "submission_start": "4014-02-10T09:46:28",
...@@ -32,4 +40,4 @@ ...@@ -32,4 +40,4 @@
"expected-assessment": "peer-assessment", "expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt" "expected-criterion-prompt": "Test criterion prompt"
} }
} }
\ No newline at end of file
{ {
"simple": { "simple": {
"rubric": [ "rubric": {
"<rubric>", "prompt": "Test Prompt",
"<prompt>Test prompt</prompt>", "criteria": [
"<criterion>", {
"<name>Test criterion</name>", "order_num": 0,
"<prompt>Test criterion prompt</prompt>", "name": "Test criterion",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>", "prompt": "Test criterion prompt",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>", "options": [
"</criterion>", {
"</rubric>" "order_num": 0,
], "points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "required"
}
]
},
"prompt": "My new prompt.", "prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28", "submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28", "submission_start": "4014-02-10T09:46:28",
...@@ -32,5 +46,54 @@ ...@@ -32,5 +46,54 @@
"expected-assessment": "peer-assessment", "expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt" "expected-criterion-prompt": "Test criterion prompt"
},
"unicode": {
"rubric": {
"prompt": "Ṫëṡẗ ṗṛöṁṗẗ",
"criteria": [
{
"order_num": 0,
"name": "Ṫëṡẗ ċṛïẗëïṛöṅ",
"prompt": "Téśt ćŕítéíŕőń ṕŕőḿṕt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Ṅö",
"explanation": "Ńő éxṕĺáńátíőń"
},
{
"order_num": 1,
"points": 2,
"name": "sǝʎ",
"explanation": "Чэѕ эхрlаиатіои"
}
],
"feedback": "required"
}
]
},
"prompt": "Ṁÿ ṅëẅ ṗṛöṁṗẗ.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
"title": "ɯʎ uǝʍ ʇıʇןǝ",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": "",
"due": "4014-03-10T00:00:00"
},
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Ṫëṡẗ ċṛïẗëṛïöṅ ṗṛöṁṗẗ"
} }
} }
\ No newline at end of file
...@@ -5,10 +5,8 @@ View-level tests for Studio view of OpenAssessment XBlock. ...@@ -5,10 +5,8 @@ View-level tests for Studio view of OpenAssessment XBlock.
import json import json
import datetime as dt import datetime as dt
import lxml.etree as etree import lxml.etree as etree
import mock
import pytz import pytz
from ddt import ddt, data, file_data from ddt import ddt, file_data
from openassessment.xblock.xml import UpdateFromXmlError
from .base import scenario, XBlockHandlerTestCase from .base import scenario, XBlockHandlerTestCase
...@@ -29,9 +27,9 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -29,9 +27,9 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'') self.assertEqual(resp['msg'], u'')
# Verify that the Rubric XML is parse-able and the root is <rubric> # Verify that the Rubric has criteria, and that they are a list of dictionaries
rubric = etree.fromstring(resp['rubric']) self.assertTrue(isinstance(resp['rubric']['criteria'], list))
self.assertEqual(rubric.tag, 'rubric') self.assertTrue(isinstance(resp['rubric']['criteria'][0], dict))
# Verify that every assessment in the list of assessments has a name. # Verify that every assessment in the list of assessments has a name.
for assessment_dict in resp['assessments']: for assessment_dict in resp['assessments']:
...@@ -40,63 +38,17 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -40,63 +38,17 @@ class StudioViewTest(XBlockHandlerTestCase):
examples = etree.fromstring(assessment_dict['examples']) examples = etree.fromstring(assessment_dict['examples'])
self.assertEqual(examples.tag, 'examples') self.assertEqual(examples.tag, 'examples')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str')
@scenario('data/basic_scenario.xml')
def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock
mock_rubric_serializer.side_effect = UpdateFromXmlError('Test error!')
# Check that we get a failure message
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
@file_data('data/update_xblock.json') @file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_xblock(self, xblock, data): def test_update_context(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
xblock.published_date = None xblock.published_date = None
# Test that we can update the xblock with the expected configuration. resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
request = json.dumps(data) self.assertTrue(resp['success'], msg=resp.get('msg'))
# Verify the response is successfully
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
print "ERROR IS {}".format(resp['msg'])
self.assertTrue(resp['success'])
self.assertIn('success', resp['msg'].lower())
# Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, data['title'])
self.assertEqual(xblock.prompt, data['prompt'])
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_context_post_release(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
# XBlock start date defaults to already open,
# so we should get an error when trying to update anything that change the number of points
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
@file_data('data/invalid_update_xblock.json') @file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_context_invalid_request_data(self, xblock, data): def test_update_context_invalid_request_data(self, xblock, data):
# First, parse XML data into a single string.
if 'rubric' in data:
data['rubric'] = "".join(data['rubric'])
xblock.published_date = None xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json') resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn(data['expected_error'], resp['msg'].lower()) self.assertIn(data['expected_error'], resp['msg'].lower())
...@@ -104,9 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -104,9 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_rubric.json') @file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data): def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
request = json.dumps(data) request = json.dumps(data)
# Store old XBlock fields for later verification # Store old XBlock fields for later verification
...@@ -118,7 +67,7 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -118,7 +67,7 @@ class StudioViewTest(XBlockHandlerTestCase):
# Verify the response fails # Verify the response fails
resp = self.request(xblock, 'update_editor_context', request, response_format='json') resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn("not valid", resp['msg'].lower()) self.assertIn("the following keys were missing", resp['msg'].lower())
# Check that the XBlock fields were NOT updated # Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests # We don't need to be exhaustive here, because we have other unit tests
...@@ -128,7 +77,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -128,7 +77,6 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertItemsEqual(xblock.rubric_assessments, old_assessments) self.assertItemsEqual(xblock.rubric_assessments, old_assessments)
self.assertItemsEqual(xblock.rubric_criteria, old_criteria) self.assertItemsEqual(xblock.rubric_criteria, old_criteria)
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_check_released(self, xblock): def test_check_released(self, xblock):
# By default, the problem should be released # By default, the problem should be released
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment