Commit b2bc882c by gradyward Committed by Will Daly

Ready For Code Review. Some things still remain to be done, but they are all…

Ready For Code Review.  Some things still remain to be done, but they are all stylistic Front end things:
- Styles
- Styles
- Styles
- Radio Button Inheritance on deletion
- Feedback prompt followthrough
- Comprehensive front end testing
parent 421ad263
......@@ -13,7 +13,6 @@ describe("OpenAssessment.StudioView", function() {
this.loadError = false;
this.updateError = false;
this.promptBox = "";
this.rubricXmlBox = "";
this.titleField = "";
this.submissionStartField = "";
this.submissionDueField = "";
......@@ -36,13 +35,33 @@ describe("OpenAssessment.StudioView", function() {
this.isReleased = false;
this.rubric = {
prompt: 'This is the feedback prompt',
criteria: [
{
order_num: 0,
name: 'This is the criterion name',
prompt: 'this is the criterion prompt',
feedback: 'disabled',
options: [
{
order_num: 0,
name: 'Did real bad',
points: 0,
explanation: 'Showed as little effort as I did making this test case interesting.'
}
]
}
]
};
this.errorPromise = $.Deferred(function(defer) {
defer.rejectWith(this, ['Test error']);
}).promise();
this.loadEditorContext = function() {
var prompt = this.promptBox;
var rubric = this.rubricXmlBox;
var rubric = this.rubric;
var title = this.titleField;
var submission_start = this.submissionStartField;
var submission_due = this.submissionDueField;
......@@ -86,10 +105,10 @@ describe("OpenAssessment.StudioView", function() {
}
};
this.updateEditorContext = function(prompt, rubricXml, title, sub_start, sub_due, assessments) {
this.updateEditorContext = function(prompt, rubric, title, sub_start, sub_due, assessments) {
if (!this.updateError) {
this.promptBox = prompt;
this.rubricXmlBox = rubricXml;
this.rubric = rubric;
this.titleField = title;
this.submissionStartField = sub_start;
this.submissionDueField = sub_due;
......@@ -140,15 +159,32 @@ describe("OpenAssessment.StudioView", function() {
var view = null;
var prompt = "How much do you like waffles?";
var rubric =
"<rubric>" +
"<criterion>"+
"<name>Proper Appreciation of Gravity</name>"+
"<prompt>How much respect did the person give waffles?</prompt>"+
"<option points=\"0\"><name>No</name><explanation>Not enough</explanation></option>"+
"<option points=\"2\"><name>Yes</name><explanation>An appropriate Amount</explanation></option>"+
"</criterion>"+
"</rubric>";
var rubric = {
criteria: [
{
order_num: 0,
name: "Proper appreciation of Gravity",
prompt: "How much respect did the person give waffles?",
feedback: "disabled",
options: [
{
order_num: 0,
points: 0,
name: "No",
explanation: "Not enough"
},
{
order_num: 1,
points: 2,
name: "Yes",
explanation: "An appropriate Amount"
}
]
}
]
};
var title = "The most important of all questions.";
var subStart = "";
var subDue = "2014-10-1T10:00:00";
......@@ -207,16 +243,14 @@ describe("OpenAssessment.StudioView", function() {
view.load();
// Expect that the XML definition(s) were loaded
var rubric = view.rubricXmlBox.getValue();
var prompt = view.promptBox.value;
var prompt = view.settingsFieldSelectors.promptBox.prop('value');
expect(prompt).toEqual('');
expect(rubric).toEqual('');
});
it("saves the Editor Context definition", function() {
// Update the Context
view.titleField.value = 'THIS IS THE NEW TITLE';
view.settingsFieldSelectors.titleField.prop('value', 'THIS IS THE NEW TITLE');
// Save the updated editor definition
view.save();
......@@ -249,25 +283,24 @@ describe("OpenAssessment.StudioView", function() {
server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments);
view.load();
expect(view.promptBox.value).toEqual(prompt);
expect(view.rubricXmlBox.getValue()).toEqual(rubric);
expect(view.titleField.value).toEqual(title);
expect(view.submissionStartField.value).toEqual(subStart);
expect(view.submissionDueField.value).toEqual(subDue);
expect(view.hasPeer.prop('checked')).toEqual(true);
expect(view.hasSelf.prop('checked')).toEqual(true);
expect(view.hasAI.prop('checked')).toEqual(false);
expect(view.hasTraining.prop('checked')).toEqual(true);
expect(view.peerMustGrade.prop('value')).toEqual('5');
expect(view.peerGradedBy.prop('value')).toEqual('3');
expect(view.peerDue.prop('value')).toEqual("");
expect(view.selfStart.prop('value')).toEqual("");
expect(view.selfDue.prop('value')).toEqual("");
expect(view.settingsFieldSelectors.promptBox.prop('value')).toEqual(prompt);
expect(view.settingsFieldSelectors.titleField.prop('value')).toEqual(title);
expect(view.settingsFieldSelectors.submissionStartField.prop('value')).toEqual(subStart);
expect(view.settingsFieldSelectors.submissionDueField.prop('value')).toEqual(subDue);
expect(view.settingsFieldSelectors.hasPeer.prop('checked')).toEqual(true);
expect(view.settingsFieldSelectors.hasSelf.prop('checked')).toEqual(true);
expect(view.settingsFieldSelectors.hasAI.prop('checked')).toEqual(false);
expect(view.settingsFieldSelectors.hasTraining.prop('checked')).toEqual(true);
expect(view.settingsFieldSelectors.peerMustGrade.prop('value')).toEqual('5');
expect(view.settingsFieldSelectors.peerGradedBy.prop('value')).toEqual('3');
expect(view.settingsFieldSelectors.peerDue.prop('value')).toEqual("");
expect(view.settingsFieldSelectors.selfStart.prop('value')).toEqual("");
expect(view.settingsFieldSelectors.selfDue.prop('value')).toEqual("");
expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual("");
expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples);
expect(view.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
expect(view.settingsFieldSelectors.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
view.titleField.value = "This is the new title.";
view.settingsFieldSelectors.titleField.prop('value', "This is the new title.");
view.updateEditorContext();
expect(server.titleField).toEqual("This is the new title.");
......
......@@ -438,11 +438,11 @@ OpenAssessment.Server.prototype = {
function(err) { console.log(err); }
);
**/
updateEditorContext: function(prompt, rubricXml, title, sub_start, sub_due, assessments) {
updateEditorContext: function(prompt, rubric, title, sub_start, sub_due, assessments) {
var url = this.url('update_editor_context');
var payload = JSON.stringify({
'prompt': prompt,
'rubric': rubricXml,
'rubric': rubric,
'title': title,
'submission_start': sub_start,
'submission_due': sub_due,
......
......@@ -207,9 +207,10 @@
.oa_editor_content_wrapper {
height: 100%;
width: 100%;
border-radius: 4px;
border: 1px solid $edx-gray-d3;
border-radius: 3px;
border: 1px solid $edx-gray-d1;
background-color: #f5f5f5;
overflow-y: scroll;
}
#openassessment_prompt_editor {
......@@ -219,17 +220,25 @@
border: none;
border-radius: 4px;
padding: 10px;
textarea{
font-size: 14px;
border: none;
overflow: auto;
outline: none;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
}
#openassessment_rubric_editor {
width: 100%;
height: 100%;
display: none;
}
#oa_basic_settings_editor {
padding: 20px 20px;
border-bottom: 1px solid $edx-gray-d3;
border-bottom: 1px solid $edx-gray-d1;
#openassessment_title_editor_wrapper{
label{
width: 25%;
......@@ -243,31 +252,17 @@
}
#openassessment_step_select_description{
padding: 10px;
padding: 10px 10px 0 10px;
text-align: center;
font-size: 80%;
}
.openassessment_assessment_module_settings_editor{
margin-bottom: 10px;
padding-bottom: 10px;
border-bottom: 1px solid $edx-gray-l3;
}
.openassessment_indent_line_input{
padding: 5px 20px;
}
#oa_settings_editor_wrapper {
padding: 0 10px;
overflow-y: scroll;
}
#oa_rubric_editor_wrapper{
overflow-y: scroll;
}
#openassessment_title_editor {
width: 300px;
margin-left: 50px;
padding: 5px;
margin: 10px;
border: 1px solid lightgray;
border-radius: 3px;
}
.openassessment_description{
......@@ -275,46 +270,12 @@
margin: 0;
}
.openassessment_date_field{
width: 130px;
}
.openassessment_number_field{
width: 25px;
}
.openassessment_peer_fixed_width{
width: 45%;
display: inline-block;
}
.openassessment_description_closed{
@extend .openassessment_description;
}
.openassessment_text_field_wrapper{
width: 50%;
text-align: center;
}
.openassessment_right_text_field_wrapper {
@extend .openassessment_text_field_wrapper;
float: right;
}
.openassessment_left_text_field_wrapper {
@extend .openassessment_text_field_wrapper;
float: left;
}
.openassessment_due_date_editor{
height: 30px;
}
.openassessment_inclusion_wrapper{
background-color: $edx-gray-l3;
padding: ($baseline-v/8) ($baseline-h/8);
margin: ($baseline-v/8) ($baseline-h/8);
border-radius: ($baseline-v)/8;
margin: 2.5px 5px;
input[type="checkbox"]{
display: none;
......@@ -331,6 +292,7 @@
input[type="checkbox"]:checked + label:before{
content: "\f046";
color: #009fe6;
}
}
......@@ -364,16 +326,19 @@
}
}
hr {
background-color: transparent;
color: $edx-gray-d3;
height: 1px;
border: 0px;
clear: both;
.openassessment_assessment_module_editor{
padding: 2.5px 0px;
.openassessment_description{
padding-left: 15px;
}
}
#oa_rubric_editor_wrapper{
.wrapper-comp-settings{
display: initial;
}
#openassessment_rubric_instructions{
background-color: $edx-gray-l2;
border-bottom: 1px solid $edx-gray-d3;
......@@ -381,20 +346,19 @@
}
.openassessment_criterion {
border: 1px dashed $edx-gray-l3;
margin: 5px;
padding-bottom: 10px;
.openassessment_criterion_header {
margin: 10px;
padding: 5px;
border-bottom: 1px solid $edx-gray-d3;
overflow: auto;
input[type="checkbox"] {
display: none;
}
input[type="checkbox"] + label:before {
input[type="checkbox"] + h6:before {
font-family: "FontAwesome";
display: inline-block;
margin-right: ($baseline-h/4);
......@@ -408,8 +372,10 @@
}
.openassessment_criterion_header_title {
font-size: 125%;
text-transform: uppercase;
width: 50%;
display: inline-block;
float: left;
}
.openassessment_criterion_header_remove {
......@@ -430,6 +396,33 @@
border-radius: 3px;
}
.openassessment_criterion_basic_editor{
.comp-setting-entry{
padding-right: 0;
margin-right: 10px;
overflow: auto;
.wrapper-comp-settings{
input{
font-size: 11px;
float: right;
width: 70%
}
.openassessment_criterion_prompt{
padding: 10px;
@extend .openassessment_large_text_input;
width: 70%;
float: right;
}
label{
padding: 0;
margin: 0;
}
}
}
}
.openassessment_criterion_feedback_wrapper{
.openassessment_criterion_feedback_header {
......@@ -463,7 +456,7 @@
.openassessment_option_header{
background-color: $edx-gray-l2;
padding: 5px;
padding: 5px 5px 5px 10px;
margin: 5px 5px 8px 5px;
border-radius: 3px;
......@@ -473,44 +466,62 @@
}
.openassessment_criterion_option_point_wrapper{
width: 30%;
width: 40%;
border-top: none;
padding: 5px 5px 5px 0px;
float: left;
padding: 0 10px;
margin: 0;
label{
width: 62.5%
width: 40%;
vertical-align: middle;
padding: 0;
margin: 0;
}
input{
width: 40px;
@extend .openassessment_input_styling
padding: 10px;
float: right;
width: 55%;
font-size: 11px;
}
}
.openassessment_criterion_option_name_wrapper{
width: 70%;
float: right;
float: left;
width: 60%;
padding: 5px 10px 5px 20px;
border-top: 0;
margin: 0;
label{
width: 40%;
padding-right: 10px;
width: 25%;
vertical-align: middle;
padding: 0;
margin: 0;
}
input{
padding: 10px;
font-size: 11px;
width: 60%;
float: right;
}
}
.openassessment_criterion_option_explanation_wrapper{
padding: 15px 5px 0px 5px;
padding: 10px 5px 0px 20px;
width: 100%;
display: inline-block;
margin: 0;
label{
width: 30%;
width: 25%;
text-align: left;
padding-left:15px;
}
textarea{
padding: 10px;
@extend .openassessment_large_text_input;
width: 70%;
float: right;
......@@ -577,23 +588,42 @@
margin: 0 5px;
float: right;
}
#openassessment_rubric_feedback_wrapper{
.openassessment_rubric_feedback_wrapper{
padding: 0 10px;
#openassessment_rubric_feedback_header{
.openassessment_rubric_feedback_header{
margin-top: 10px;
border-bottom: 1px solid $edx-gray-d3;
font-size: 125%;
padding: 10px;
padding-right: 20px;
}
.openassessment_feedback_radio_toggle{
input[type="radio"]{
display: none;
}
input[type="radio"] + label:before{
font-family: "FontAwesome";
display: inline-block;
margin-right: ($baseline-h/4);
width: auto;
height: auto;
content: "\f10c";
}
input[type="radio"]:checked + label:before{
content: "\f05d";
}
}
}
#openassessment_rubric_add_criterion{
font-size: 125%;
h2:before{
h6:before{
font-family: "FontAwesome";
display: inline-block;
margin-left: 5px;
......@@ -620,7 +650,7 @@
}
#openassessment_make_invisible{
display: none;
}
.modal-content {
......
......@@ -69,7 +69,7 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Error updating XBlock configuration')}
try:
rubric = xml.parse_rubric_xml_str(data["rubric"])
rubric = verify_rubric_format(data['rubric'])
submission_due = xml.parse_date(data["submission_due"], name="submission due date")
submission_start = xml.parse_date(data["submission_start"], name="submission start date")
assessments = parse_assessment_dictionaries(data["assessments"])
......@@ -82,8 +82,8 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update(
rubric['criteria'],
rubric['feedbackprompt'],
rubric,
rubric.get('feedbackprompt', None),
assessments,
submission_due,
submission_start,
......@@ -112,12 +112,9 @@ class StudioMixin(object):
"""
try:
rubric = xml.serialize_rubric_to_xml_str(self)
# Copies the rubric assessments so that we can change student training examples from dict -> str without
# negatively modifying the openassessmentblock definition.
assessment_list = copy.deepcopy(self.rubric_assessments)
# Finds the student training dictionary, if it exists, and replaces the examples with their XML definition
student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"]
if student_training_dictionary:
......@@ -140,10 +137,14 @@ class StudioMixin(object):
submission_start = self.submission_start if self.submission_start else ''
rubric_dict = { 'criteria' : self.rubric_criteria }
rubric_dict['feedbackprompt'] = unicode(self.rubric_feedback_prompt)
return {
'success': True,
'msg': '',
'rubric': rubric,
'rubric': rubric_dict,
'prompt': self.prompt,
'submission_due': submission_due,
'submission_start': submission_start,
......@@ -241,3 +242,127 @@ def parse_assessment_dictionaries(input_assessments):
assessments_list.append(assessment_dict)
return assessments_list
def verify_rubric_format(rubric):
"""
Verifies that the rubric that was passed in follows the conventions that we expect, including
types and structure. The code documents itself well here.
Args:
rubric (dict): Unsanitized version of our rubric. Usually taken from the GUI.
Returns:
rubric (dict): Sanitized version of the same form.
Raises:
UpdateFromXMLError
"""
# import pudb, sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
if not isinstance(rubric, dict):
# import pudb,sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
raise UpdateFromXmlError(_("The given rubric was not a dictionary of the form {criteria: [criteria1, criteria2...]}"))
if "criteria" not in rubric.keys():
raise UpdateFromXmlError(_("The given rubric did not contain a key for a list of criteria, and is invalid"))
if len((set(rubric.keys()) - {'prompt', 'criteria'})) > 0:
unexpected_keys = list(set(rubric.keys()) - {"prompt", "criteria"})
raise UpdateFromXmlError(_("The following keys were included in the rubric when they were not allowed to be: {}".format(unexpected_keys)))
if rubric.get('prompt', False):
if not isinstance(rubric['prompt'], basestring):
# import pudb,sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
raise UpdateFromXmlError(_("The given rubric's feedback prompt was invalid, it must be a string."))
criteria = rubric["criteria"]
if not isinstance(criteria, list):
raise UpdateFromXmlError(_("The criteria term in the rubric dictionary corresponds to a non-list object."))
sanitized_criteria = []
for criterion in criteria:
if not isinstance(criterion, dict):
raise UpdateFromXmlError(_("A criterion given was not a dictionary."))
criterion = dict(criterion)
expected_keys = {'order_num', 'name', 'prompt', 'options', 'feedback'}
unexpected_keys = list(set(criterion.keys()) - expected_keys)
missing_keys = list(expected_keys - set(criterion.keys()))
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the Definition of one or more criteria: {}".format(missing_keys)))
if unexpected_keys:
raise UpdateFromXmlError(_("The following extraneous keys were found in the definition for one or more criteria: {}".format(unexpected_keys)))
name = str(criterion['name'])
prompt = str(criterion['prompt'])
feedback = str(criterion['feedback'])
try:
order_num = int(criterion['order_num'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The order_num value must be an integer."))
if not isinstance(criterion['options'], list):
raise UpdateFromXmlError(_("The dictionary entry for 'options' in a criteria's dictionary definition must be a list."))
options = criterion['options']
sanitized_options = []
for option in options:
if not isinstance(option, dict):
raise UpdateFromXmlError(_("An option given was not a dictionary."))
expected_keys = {'order_num','name', 'points', 'explanation'}
unexpected_keys = list(set(option.keys()) - expected_keys)
missing_keys = list(expected_keys - set(option.keys()))
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the Definition of one or more options: {}".format(missing_keys)))
if unexpected_keys:
raise UpdateFromXmlError(_("The following extraneous keys were found in the definition for one or more options: {}".format(unexpected_keys)))
option_name = str(option['name'])
option_explanation = str(option['explanation'])
try:
option_points = int(option['points'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option point values must be integers."))
option_dict = {
"order_num": option['order_num'],
"name": option_name,
"explanation": option_explanation,
"points": option_points
}
sanitized_options.append(option_dict)
criterion_dict = {
"order_num": order_num,
"name": name,
"prompt": prompt,
"options": sanitized_options,
"feedback": feedback
}
sanitized_criteria.append(criterion_dict)
sanitized_rubric = {
'criteria': sanitized_criteria
}
if rubric.get('prompt'):
sanitized_rubric['prompt'] = str(rubric.get('prompt'))
return sanitized_rubric
\ No newline at end of file
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<!-- no options -->",
"</criterion>",
"</rubric>"
],
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
}
]
}
]
},
"prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
......
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "required"
}
]
},
"prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
......
......@@ -29,9 +29,9 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
# Verify that the Rubric XML is parse-able and the root is <rubric>
rubric = etree.fromstring(resp['rubric'])
self.assertEqual(rubric.tag, 'rubric')
# Verify that the Rubric has criteria, and that they are a list of dictionaries
self.assertTrue(isinstance(resp['rubric']['criteria'], list))
self.assertTrue(isinstance(resp['rubric']['criteria'][0], dict))
# Verify that every assessment in the list of assessments has a name.
for assessment_dict in resp['assessments']:
......@@ -40,7 +40,8 @@ class StudioViewTest(XBlockHandlerTestCase):
examples = etree.fromstring(assessment_dict['examples'])
self.assertEqual(examples.tag, 'examples')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str')
# WEDALY!!! I cannot figure out how to mock out this call correctly, so it is consequently failing.
@mock.patch('studio_mixin.verify_rubric_format')
@scenario('data/basic_scenario.xml')
def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock
......@@ -51,28 +52,30 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xblock(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
xblock.published_date = None
# Test that we can update the xblock with the expected configuration.
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
print "ERROR IS {}".format(resp['msg'])
self.assertTrue(resp['success'])
self.assertIn('success', resp['msg'].lower())
# Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, data['title'])
self.assertEqual(xblock.prompt, data['prompt'])
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
# WEDALY!!! I don't know if this test is relevant any more (using update editor context with
# XML is so OVER am-i-right? Rather, we now test teh same behavior a million times with the
# Dictionary/List structures.
# Thoughts?
# @file_data('data/update_xblock.json')
# @scenario('data/basic_scenario.xml')
# def test_update_xblock(self, xblock, data):
# xblock.published_date = None
# # Test that we can update the xblock with the expected configuration.
# request = json.dumps(data)
#
# # Verify the response is successfully
# resp = self.request(xblock, 'update_editor_context', request, response_format='json')
# print "ERROR IS {}".format(resp['msg'])
# self.assertTrue(resp['success'])
# self.assertIn('success', resp['msg'].lower())
#
# # Check that the XBlock fields were updated
# # We don't need to be exhaustive here, because we have other unit tests
# # that verify this extensively.
# self.assertEqual(xblock.title, data['title'])
# self.assertEqual(xblock.prompt, data['prompt'])
# self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
# self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
......@@ -91,9 +94,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_context_invalid_request_data(self, xblock, data):
# First, parse XML data into a single string.
if 'rubric' in data:
data['rubric'] = "".join(data['rubric'])
xblock.published_date = None
......@@ -104,8 +104,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
request = json.dumps(data)
......@@ -118,7 +116,7 @@ class StudioViewTest(XBlockHandlerTestCase):
# Verify the response fails
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("not valid", resp['msg'].lower())
self.assertIn("the following keys were missing", resp['msg'].lower())
# Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment