Commit b2bc882c by gradyward Committed by Will Daly

Ready For Code Review. Some things still remain to be done, but they are all…

Ready For Code Review.  Some things still remain to be done, but they are all stylistic Front end things:
- Styles
- Styles
- Styles
- Radio Button Inheritance on deletion
- Feedback prompt followthrough
- Comprehensive front end testing
parent 421ad263
...@@ -13,7 +13,6 @@ describe("OpenAssessment.StudioView", function() { ...@@ -13,7 +13,6 @@ describe("OpenAssessment.StudioView", function() {
this.loadError = false; this.loadError = false;
this.updateError = false; this.updateError = false;
this.promptBox = ""; this.promptBox = "";
this.rubricXmlBox = "";
this.titleField = ""; this.titleField = "";
this.submissionStartField = ""; this.submissionStartField = "";
this.submissionDueField = ""; this.submissionDueField = "";
...@@ -36,13 +35,33 @@ describe("OpenAssessment.StudioView", function() { ...@@ -36,13 +35,33 @@ describe("OpenAssessment.StudioView", function() {
this.isReleased = false; this.isReleased = false;
this.rubric = {
prompt: 'This is the feedback prompt',
criteria: [
{
order_num: 0,
name: 'This is the criterion name',
prompt: 'this is the criterion prompt',
feedback: 'disabled',
options: [
{
order_num: 0,
name: 'Did real bad',
points: 0,
explanation: 'Showed as little effort as I did making this test case interesting.'
}
]
}
]
};
this.errorPromise = $.Deferred(function(defer) { this.errorPromise = $.Deferred(function(defer) {
defer.rejectWith(this, ['Test error']); defer.rejectWith(this, ['Test error']);
}).promise(); }).promise();
this.loadEditorContext = function() { this.loadEditorContext = function() {
var prompt = this.promptBox; var prompt = this.promptBox;
var rubric = this.rubricXmlBox; var rubric = this.rubric;
var title = this.titleField; var title = this.titleField;
var submission_start = this.submissionStartField; var submission_start = this.submissionStartField;
var submission_due = this.submissionDueField; var submission_due = this.submissionDueField;
...@@ -86,10 +105,10 @@ describe("OpenAssessment.StudioView", function() { ...@@ -86,10 +105,10 @@ describe("OpenAssessment.StudioView", function() {
} }
}; };
this.updateEditorContext = function(prompt, rubricXml, title, sub_start, sub_due, assessments) { this.updateEditorContext = function(prompt, rubric, title, sub_start, sub_due, assessments) {
if (!this.updateError) { if (!this.updateError) {
this.promptBox = prompt; this.promptBox = prompt;
this.rubricXmlBox = rubricXml; this.rubric = rubric;
this.titleField = title; this.titleField = title;
this.submissionStartField = sub_start; this.submissionStartField = sub_start;
this.submissionDueField = sub_due; this.submissionDueField = sub_due;
...@@ -140,15 +159,32 @@ describe("OpenAssessment.StudioView", function() { ...@@ -140,15 +159,32 @@ describe("OpenAssessment.StudioView", function() {
var view = null; var view = null;
var prompt = "How much do you like waffles?"; var prompt = "How much do you like waffles?";
var rubric = var rubric = {
"<rubric>" + criteria: [
"<criterion>"+ {
"<name>Proper Appreciation of Gravity</name>"+ order_num: 0,
"<prompt>How much respect did the person give waffles?</prompt>"+ name: "Proper appreciation of Gravity",
"<option points=\"0\"><name>No</name><explanation>Not enough</explanation></option>"+ prompt: "How much respect did the person give waffles?",
"<option points=\"2\"><name>Yes</name><explanation>An appropriate Amount</explanation></option>"+ feedback: "disabled",
"</criterion>"+ options: [
"</rubric>"; {
order_num: 0,
points: 0,
name: "No",
explanation: "Not enough"
},
{
order_num: 1,
points: 2,
name: "Yes",
explanation: "An appropriate Amount"
}
]
}
]
};
var title = "The most important of all questions."; var title = "The most important of all questions.";
var subStart = ""; var subStart = "";
var subDue = "2014-10-1T10:00:00"; var subDue = "2014-10-1T10:00:00";
...@@ -207,16 +243,14 @@ describe("OpenAssessment.StudioView", function() { ...@@ -207,16 +243,14 @@ describe("OpenAssessment.StudioView", function() {
view.load(); view.load();
// Expect that the XML definition(s) were loaded // Expect that the XML definition(s) were loaded
var rubric = view.rubricXmlBox.getValue(); var prompt = view.settingsFieldSelectors.promptBox.prop('value');
var prompt = view.promptBox.value;
expect(prompt).toEqual(''); expect(prompt).toEqual('');
expect(rubric).toEqual('');
}); });
it("saves the Editor Context definition", function() { it("saves the Editor Context definition", function() {
// Update the Context // Update the Context
view.titleField.value = 'THIS IS THE NEW TITLE'; view.settingsFieldSelectors.titleField.prop('value', 'THIS IS THE NEW TITLE');
// Save the updated editor definition // Save the updated editor definition
view.save(); view.save();
...@@ -249,25 +283,24 @@ describe("OpenAssessment.StudioView", function() { ...@@ -249,25 +283,24 @@ describe("OpenAssessment.StudioView", function() {
server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments); server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments);
view.load(); view.load();
expect(view.promptBox.value).toEqual(prompt); expect(view.settingsFieldSelectors.promptBox.prop('value')).toEqual(prompt);
expect(view.rubricXmlBox.getValue()).toEqual(rubric); expect(view.settingsFieldSelectors.titleField.prop('value')).toEqual(title);
expect(view.titleField.value).toEqual(title); expect(view.settingsFieldSelectors.submissionStartField.prop('value')).toEqual(subStart);
expect(view.submissionStartField.value).toEqual(subStart); expect(view.settingsFieldSelectors.submissionDueField.prop('value')).toEqual(subDue);
expect(view.submissionDueField.value).toEqual(subDue); expect(view.settingsFieldSelectors.hasPeer.prop('checked')).toEqual(true);
expect(view.hasPeer.prop('checked')).toEqual(true); expect(view.settingsFieldSelectors.hasSelf.prop('checked')).toEqual(true);
expect(view.hasSelf.prop('checked')).toEqual(true); expect(view.settingsFieldSelectors.hasAI.prop('checked')).toEqual(false);
expect(view.hasAI.prop('checked')).toEqual(false); expect(view.settingsFieldSelectors.hasTraining.prop('checked')).toEqual(true);
expect(view.hasTraining.prop('checked')).toEqual(true); expect(view.settingsFieldSelectors.peerMustGrade.prop('value')).toEqual('5');
expect(view.peerMustGrade.prop('value')).toEqual('5'); expect(view.settingsFieldSelectors.peerGradedBy.prop('value')).toEqual('3');
expect(view.peerGradedBy.prop('value')).toEqual('3'); expect(view.settingsFieldSelectors.peerDue.prop('value')).toEqual("");
expect(view.peerDue.prop('value')).toEqual(""); expect(view.settingsFieldSelectors.selfStart.prop('value')).toEqual("");
expect(view.selfStart.prop('value')).toEqual(""); expect(view.settingsFieldSelectors.selfDue.prop('value')).toEqual("");
expect(view.selfDue.prop('value')).toEqual("");
expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual(""); expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual("");
expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples); expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples);
expect(view.peerStart.prop('value')).toEqual("2014-10-04T00:00:00"); expect(view.settingsFieldSelectors.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
view.titleField.value = "This is the new title."; view.settingsFieldSelectors.titleField.prop('value', "This is the new title.");
view.updateEditorContext(); view.updateEditorContext();
expect(server.titleField).toEqual("This is the new title."); expect(server.titleField).toEqual("This is the new title.");
......
...@@ -438,11 +438,11 @@ OpenAssessment.Server.prototype = { ...@@ -438,11 +438,11 @@ OpenAssessment.Server.prototype = {
function(err) { console.log(err); } function(err) { console.log(err); }
); );
**/ **/
updateEditorContext: function(prompt, rubricXml, title, sub_start, sub_due, assessments) { updateEditorContext: function(prompt, rubric, title, sub_start, sub_due, assessments) {
var url = this.url('update_editor_context'); var url = this.url('update_editor_context');
var payload = JSON.stringify({ var payload = JSON.stringify({
'prompt': prompt, 'prompt': prompt,
'rubric': rubricXml, 'rubric': rubric,
'title': title, 'title': title,
'submission_start': sub_start, 'submission_start': sub_start,
'submission_due': sub_due, 'submission_due': sub_due,
......
...@@ -207,9 +207,10 @@ ...@@ -207,9 +207,10 @@
.oa_editor_content_wrapper { .oa_editor_content_wrapper {
height: 100%; height: 100%;
width: 100%; width: 100%;
border-radius: 4px; border-radius: 3px;
border: 1px solid $edx-gray-d3; border: 1px solid $edx-gray-d1;
background-color: #f5f5f5; background-color: #f5f5f5;
overflow-y: scroll;
} }
#openassessment_prompt_editor { #openassessment_prompt_editor {
...@@ -219,17 +220,25 @@ ...@@ -219,17 +220,25 @@
border: none; border: none;
border-radius: 4px; border-radius: 4px;
padding: 10px; padding: 10px;
textarea{
font-size: 14px;
border: none;
overflow: auto;
outline: none;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
} }
#openassessment_rubric_editor { #openassessment_rubric_editor {
width: 100%; width: 100%;
height: 100%; height: 100%;
display: none;
} }
#oa_basic_settings_editor { #oa_basic_settings_editor {
padding: 20px 20px; border-bottom: 1px solid $edx-gray-d1;
border-bottom: 1px solid $edx-gray-d3;
#openassessment_title_editor_wrapper{ #openassessment_title_editor_wrapper{
label{ label{
width: 25%; width: 25%;
...@@ -243,31 +252,17 @@ ...@@ -243,31 +252,17 @@
} }
#openassessment_step_select_description{ #openassessment_step_select_description{
padding: 10px; padding: 10px 10px 0 10px;
text-align: center;
font-size: 80%;
} }
.openassessment_assessment_module_settings_editor{ .openassessment_assessment_module_settings_editor{
margin-bottom: 10px;
padding-bottom: 10px;
border-bottom: 1px solid $edx-gray-l3;
}
.openassessment_indent_line_input{
padding: 5px 20px;
}
#oa_settings_editor_wrapper {
padding: 0 10px;
overflow-y: scroll; overflow-y: scroll;
} padding: 5px;
margin: 10px;
#oa_rubric_editor_wrapper{ border: 1px solid lightgray;
overflow-y: scroll; border-radius: 3px;
}
#openassessment_title_editor {
width: 300px;
margin-left: 50px;
} }
.openassessment_description{ .openassessment_description{
...@@ -275,46 +270,12 @@ ...@@ -275,46 +270,12 @@
margin: 0; margin: 0;
} }
.openassessment_date_field{
width: 130px;
}
.openassessment_number_field{
width: 25px;
}
.openassessment_peer_fixed_width{
width: 45%;
display: inline-block;
}
.openassessment_description_closed{ .openassessment_description_closed{
@extend .openassessment_description; @extend .openassessment_description;
} }
.openassessment_text_field_wrapper{
width: 50%;
text-align: center;
}
.openassessment_right_text_field_wrapper {
@extend .openassessment_text_field_wrapper;
float: right;
}
.openassessment_left_text_field_wrapper {
@extend .openassessment_text_field_wrapper;
float: left;
}
.openassessment_due_date_editor{
height: 30px;
}
.openassessment_inclusion_wrapper{ .openassessment_inclusion_wrapper{
background-color: $edx-gray-l3; margin: 2.5px 5px;
padding: ($baseline-v/8) ($baseline-h/8);
margin: ($baseline-v/8) ($baseline-h/8);
border-radius: ($baseline-v)/8;
input[type="checkbox"]{ input[type="checkbox"]{
display: none; display: none;
...@@ -331,6 +292,7 @@ ...@@ -331,6 +292,7 @@
input[type="checkbox"]:checked + label:before{ input[type="checkbox"]:checked + label:before{
content: "\f046"; content: "\f046";
color: #009fe6;
} }
} }
...@@ -364,16 +326,19 @@ ...@@ -364,16 +326,19 @@
} }
} }
hr { .openassessment_assessment_module_editor{
background-color: transparent; padding: 2.5px 0px;
color: $edx-gray-d3; .openassessment_description{
height: 1px; padding-left: 15px;
border: 0px; }
clear: both;
} }
#oa_rubric_editor_wrapper{ #oa_rubric_editor_wrapper{
.wrapper-comp-settings{
display: initial;
}
#openassessment_rubric_instructions{ #openassessment_rubric_instructions{
background-color: $edx-gray-l2; background-color: $edx-gray-l2;
border-bottom: 1px solid $edx-gray-d3; border-bottom: 1px solid $edx-gray-d3;
...@@ -381,20 +346,19 @@ ...@@ -381,20 +346,19 @@
} }
.openassessment_criterion { .openassessment_criterion {
border: 1px dashed $edx-gray-l3;
margin: 5px;
padding-bottom: 10px; padding-bottom: 10px;
.openassessment_criterion_header { .openassessment_criterion_header {
margin: 10px; margin: 10px;
padding: 5px; padding: 5px;
border-bottom: 1px solid $edx-gray-d3; border-bottom: 1px solid $edx-gray-d3;
overflow: auto;
input[type="checkbox"] { input[type="checkbox"] {
display: none; display: none;
} }
input[type="checkbox"] + label:before { input[type="checkbox"] + h6:before {
font-family: "FontAwesome"; font-family: "FontAwesome";
display: inline-block; display: inline-block;
margin-right: ($baseline-h/4); margin-right: ($baseline-h/4);
...@@ -408,8 +372,10 @@ ...@@ -408,8 +372,10 @@
} }
.openassessment_criterion_header_title { .openassessment_criterion_header_title {
font-size: 125%;
text-transform: uppercase; text-transform: uppercase;
width: 50%;
display: inline-block;
float: left;
} }
.openassessment_criterion_header_remove { .openassessment_criterion_header_remove {
...@@ -430,6 +396,33 @@ ...@@ -430,6 +396,33 @@
border-radius: 3px; border-radius: 3px;
} }
.openassessment_criterion_basic_editor{
.comp-setting-entry{
padding-right: 0;
margin-right: 10px;
overflow: auto;
.wrapper-comp-settings{
input{
font-size: 11px;
float: right;
width: 70%
}
.openassessment_criterion_prompt{
padding: 10px;
@extend .openassessment_large_text_input;
width: 70%;
float: right;
}
label{
padding: 0;
margin: 0;
}
}
}
}
.openassessment_criterion_feedback_wrapper{ .openassessment_criterion_feedback_wrapper{
.openassessment_criterion_feedback_header { .openassessment_criterion_feedback_header {
...@@ -463,7 +456,7 @@ ...@@ -463,7 +456,7 @@
.openassessment_option_header{ .openassessment_option_header{
background-color: $edx-gray-l2; background-color: $edx-gray-l2;
padding: 5px; padding: 5px 5px 5px 10px;
margin: 5px 5px 8px 5px; margin: 5px 5px 8px 5px;
border-radius: 3px; border-radius: 3px;
...@@ -473,44 +466,62 @@ ...@@ -473,44 +466,62 @@
} }
.openassessment_criterion_option_point_wrapper{ .openassessment_criterion_option_point_wrapper{
width: 30%; width: 40%;
border-top: none;
padding: 5px 5px 5px 0px;
float: left; float: left;
padding: 0 10px; margin: 0;
label{ label{
width: 62.5% width: 40%;
vertical-align: middle;
padding: 0;
margin: 0;
} }
input{ input{
width: 40px; padding: 10px;
@extend .openassessment_input_styling float: right;
width: 55%;
font-size: 11px;
} }
} }
.openassessment_criterion_option_name_wrapper{ .openassessment_criterion_option_name_wrapper{
width: 70%; float: left;
float: right; width: 60%;
padding: 5px 10px 5px 20px;
border-top: 0;
margin: 0;
label{ label{
width: 40%; width: 25%;
padding-right: 10px; vertical-align: middle;
padding: 0;
margin: 0;
} }
input{ input{
padding: 10px;
font-size: 11px;
width: 60%; width: 60%;
float: right;
} }
} }
.openassessment_criterion_option_explanation_wrapper{ .openassessment_criterion_option_explanation_wrapper{
padding: 15px 5px 0px 5px; padding: 10px 5px 0px 20px;
width: 100%; width: 100%;
display: inline-block; display: inline-block;
margin: 0;
label{ label{
width: 30%; width: 25%;
text-align: left; text-align: left;
padding-left:15px;
} }
textarea{ textarea{
padding: 10px;
@extend .openassessment_large_text_input; @extend .openassessment_large_text_input;
width: 70%; width: 70%;
float: right; float: right;
...@@ -577,23 +588,42 @@ ...@@ -577,23 +588,42 @@
margin: 0 5px; margin: 0 5px;
float: right; float: right;
} }
#openassessment_rubric_feedback_wrapper{
.openassessment_rubric_feedback_wrapper{
padding: 0 10px; padding: 0 10px;
#openassessment_rubric_feedback_header{ .openassessment_rubric_feedback_header{
margin-top: 10px; margin-top: 10px;
border-bottom: 1px solid $edx-gray-d3; border-bottom: 1px solid $edx-gray-d3;
font-size: 125%; font-size: 125%;
padding: 10px; padding: 10px;
padding-right: 20px; padding-right: 20px;
}
.openassessment_feedback_radio_toggle{
input[type="radio"]{
display: none;
}
input[type="radio"] + label:before{
font-family: "FontAwesome";
display: inline-block;
margin-right: ($baseline-h/4);
width: auto;
height: auto;
content: "\f10c";
}
input[type="radio"]:checked + label:before{
content: "\f05d";
}
} }
} }
#openassessment_rubric_add_criterion{ #openassessment_rubric_add_criterion{
font-size: 125%;
h2:before{ h6:before{
font-family: "FontAwesome"; font-family: "FontAwesome";
display: inline-block; display: inline-block;
margin-left: 5px; margin-left: 5px;
...@@ -620,7 +650,7 @@ ...@@ -620,7 +650,7 @@
} }
#openassessment_make_invisible{ #openassessment_make_invisible{
display: none;
} }
.modal-content { .modal-content {
......
...@@ -69,7 +69,7 @@ class StudioMixin(object): ...@@ -69,7 +69,7 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Error updating XBlock configuration')} return {'success': False, 'msg': _('Error updating XBlock configuration')}
try: try:
rubric = xml.parse_rubric_xml_str(data["rubric"]) rubric = verify_rubric_format(data['rubric'])
submission_due = xml.parse_date(data["submission_due"], name="submission due date") submission_due = xml.parse_date(data["submission_due"], name="submission due date")
submission_start = xml.parse_date(data["submission_start"], name="submission start date") submission_start = xml.parse_date(data["submission_start"], name="submission start date")
assessments = parse_assessment_dictionaries(data["assessments"]) assessments = parse_assessment_dictionaries(data["assessments"])
...@@ -82,8 +82,8 @@ class StudioMixin(object): ...@@ -82,8 +82,8 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)} return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update( self.update(
rubric['criteria'], rubric,
rubric['feedbackprompt'], rubric.get('feedbackprompt', None),
assessments, assessments,
submission_due, submission_due,
submission_start, submission_start,
...@@ -112,12 +112,9 @@ class StudioMixin(object): ...@@ -112,12 +112,9 @@ class StudioMixin(object):
""" """
try: try:
rubric = xml.serialize_rubric_to_xml_str(self)
# Copies the rubric assessments so that we can change student training examples from dict -> str without # Copies the rubric assessments so that we can change student training examples from dict -> str without
# negatively modifying the openassessmentblock definition. # negatively modifying the openassessmentblock definition.
assessment_list = copy.deepcopy(self.rubric_assessments) assessment_list = copy.deepcopy(self.rubric_assessments)
# Finds the student training dictionary, if it exists, and replaces the examples with their XML definition # Finds the student training dictionary, if it exists, and replaces the examples with their XML definition
student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"] student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"]
if student_training_dictionary: if student_training_dictionary:
...@@ -140,10 +137,14 @@ class StudioMixin(object): ...@@ -140,10 +137,14 @@ class StudioMixin(object):
submission_start = self.submission_start if self.submission_start else '' submission_start = self.submission_start if self.submission_start else ''
rubric_dict = { 'criteria' : self.rubric_criteria }
rubric_dict['feedbackprompt'] = unicode(self.rubric_feedback_prompt)
return { return {
'success': True, 'success': True,
'msg': '', 'msg': '',
'rubric': rubric, 'rubric': rubric_dict,
'prompt': self.prompt, 'prompt': self.prompt,
'submission_due': submission_due, 'submission_due': submission_due,
'submission_start': submission_start, 'submission_start': submission_start,
...@@ -240,4 +241,128 @@ def parse_assessment_dictionaries(input_assessments): ...@@ -240,4 +241,128 @@ def parse_assessment_dictionaries(input_assessments):
# Update the list of assessments # Update the list of assessments
assessments_list.append(assessment_dict) assessments_list.append(assessment_dict)
return assessments_list return assessments_list
\ No newline at end of file
def verify_rubric_format(rubric):
"""
Verifies that the rubric that was passed in follows the conventions that we expect, including
types and structure. The code documents itself well here.
Args:
rubric (dict): Unsanitized version of our rubric. Usually taken from the GUI.
Returns:
rubric (dict): Sanitized version of the same form.
Raises:
UpdateFromXMLError
"""
# import pudb, sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
if not isinstance(rubric, dict):
# import pudb,sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
raise UpdateFromXmlError(_("The given rubric was not a dictionary of the form {criteria: [criteria1, criteria2...]}"))
if "criteria" not in rubric.keys():
raise UpdateFromXmlError(_("The given rubric did not contain a key for a list of criteria, and is invalid"))
if len((set(rubric.keys()) - {'prompt', 'criteria'})) > 0:
unexpected_keys = list(set(rubric.keys()) - {"prompt", "criteria"})
raise UpdateFromXmlError(_("The following keys were included in the rubric when they were not allowed to be: {}".format(unexpected_keys)))
if rubric.get('prompt', False):
if not isinstance(rubric['prompt'], basestring):
# import pudb,sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
raise UpdateFromXmlError(_("The given rubric's feedback prompt was invalid, it must be a string."))
criteria = rubric["criteria"]
if not isinstance(criteria, list):
raise UpdateFromXmlError(_("The criteria term in the rubric dictionary corresponds to a non-list object."))
sanitized_criteria = []
for criterion in criteria:
if not isinstance(criterion, dict):
raise UpdateFromXmlError(_("A criterion given was not a dictionary."))
criterion = dict(criterion)
expected_keys = {'order_num', 'name', 'prompt', 'options', 'feedback'}
unexpected_keys = list(set(criterion.keys()) - expected_keys)
missing_keys = list(expected_keys - set(criterion.keys()))
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the Definition of one or more criteria: {}".format(missing_keys)))
if unexpected_keys:
raise UpdateFromXmlError(_("The following extraneous keys were found in the definition for one or more criteria: {}".format(unexpected_keys)))
name = str(criterion['name'])
prompt = str(criterion['prompt'])
feedback = str(criterion['feedback'])
try:
order_num = int(criterion['order_num'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The order_num value must be an integer."))
if not isinstance(criterion['options'], list):
raise UpdateFromXmlError(_("The dictionary entry for 'options' in a criteria's dictionary definition must be a list."))
options = criterion['options']
sanitized_options = []
for option in options:
if not isinstance(option, dict):
raise UpdateFromXmlError(_("An option given was not a dictionary."))
expected_keys = {'order_num','name', 'points', 'explanation'}
unexpected_keys = list(set(option.keys()) - expected_keys)
missing_keys = list(expected_keys - set(option.keys()))
if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the Definition of one or more options: {}".format(missing_keys)))
if unexpected_keys:
raise UpdateFromXmlError(_("The following extraneous keys were found in the definition for one or more options: {}".format(unexpected_keys)))
option_name = str(option['name'])
option_explanation = str(option['explanation'])
try:
option_points = int(option['points'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option point values must be integers."))
option_dict = {
"order_num": option['order_num'],
"name": option_name,
"explanation": option_explanation,
"points": option_points
}
sanitized_options.append(option_dict)
criterion_dict = {
"order_num": order_num,
"name": name,
"prompt": prompt,
"options": sanitized_options,
"feedback": feedback
}
sanitized_criteria.append(criterion_dict)
sanitized_rubric = {
'criteria': sanitized_criteria
}
if rubric.get('prompt'):
sanitized_rubric['prompt'] = str(rubric.get('prompt'))
return sanitized_rubric
\ No newline at end of file
{ {
"simple": { "simple": {
"rubric": [ "rubric": {
"<rubric>", "prompt": "Test Prompt",
"<prompt>Test prompt</prompt>", "criteria": [
"<criterion>", {
"<name>Test criterion</name>", "order_num": 0,
"<prompt>Test criterion prompt</prompt>", "name": "Test criterion",
"<!-- no options -->", "prompt": "Test criterion prompt",
"</criterion>", "options": [
"</rubric>" {
], "order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
}
]
}
]
},
"prompt": "My new prompt.", "prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28", "submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28", "submission_start": "4014-02-10T09:46:28",
......
{ {
"simple": { "simple": {
"rubric": [ "rubric": {
"<rubric>", "prompt": "Test Prompt",
"<prompt>Test prompt</prompt>", "criteria": [
"<criterion>", {
"<name>Test criterion</name>", "order_num": 0,
"<prompt>Test criterion prompt</prompt>", "name": "Test criterion",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>", "prompt": "Test criterion prompt",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>", "options": [
"</criterion>", {
"</rubric>" "order_num": 0,
], "points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "required"
}
]
},
"prompt": "My new prompt.", "prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28", "submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28", "submission_start": "4014-02-10T09:46:28",
......
...@@ -29,9 +29,9 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -29,9 +29,9 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'') self.assertEqual(resp['msg'], u'')
# Verify that the Rubric XML is parse-able and the root is <rubric> # Verify that the Rubric has criteria, and that they are a list of dictionaries
rubric = etree.fromstring(resp['rubric']) self.assertTrue(isinstance(resp['rubric']['criteria'], list))
self.assertEqual(rubric.tag, 'rubric') self.assertTrue(isinstance(resp['rubric']['criteria'][0], dict))
# Verify that every assessment in the list of assessments has a name. # Verify that every assessment in the list of assessments has a name.
for assessment_dict in resp['assessments']: for assessment_dict in resp['assessments']:
...@@ -40,7 +40,8 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -40,7 +40,8 @@ class StudioViewTest(XBlockHandlerTestCase):
examples = etree.fromstring(assessment_dict['examples']) examples = etree.fromstring(assessment_dict['examples'])
self.assertEqual(examples.tag, 'examples') self.assertEqual(examples.tag, 'examples')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str') # WEDALY!!! I cannot figure out how to mock out this call correctly, so it is consequently failing.
@mock.patch('studio_mixin.verify_rubric_format')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_get_editor_context_error(self, xblock, mock_rubric_serializer): def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock # Simulate an unexpected error while serializing the XBlock
...@@ -51,28 +52,30 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -51,28 +52,30 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower()) self.assertIn(u'unexpected error', resp['msg'].lower())
@file_data('data/update_xblock.json') # WEDALY!!! I don't know if this test is relevant any more (using update editor context with
@scenario('data/basic_scenario.xml') # XML is so OVER am-i-right? Rather, we now test teh same behavior a million times with the
def test_update_xblock(self, xblock, data): # Dictionary/List structures.
# First, parse XML data into a single string. # Thoughts?
data['rubric'] = "".join(data['rubric']) # @file_data('data/update_xblock.json')
xblock.published_date = None # @scenario('data/basic_scenario.xml')
# Test that we can update the xblock with the expected configuration. # def test_update_xblock(self, xblock, data):
request = json.dumps(data) # xblock.published_date = None
# # Test that we can update the xblock with the expected configuration.
# Verify the response is successfully # request = json.dumps(data)
resp = self.request(xblock, 'update_editor_context', request, response_format='json') #
print "ERROR IS {}".format(resp['msg']) # # Verify the response is successfully
self.assertTrue(resp['success']) # resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertIn('success', resp['msg'].lower()) # print "ERROR IS {}".format(resp['msg'])
# self.assertTrue(resp['success'])
# Check that the XBlock fields were updated # self.assertIn('success', resp['msg'].lower())
# We don't need to be exhaustive here, because we have other unit tests #
# that verify this extensively. # # Check that the XBlock fields were updated
self.assertEqual(xblock.title, data['title']) # # We don't need to be exhaustive here, because we have other unit tests
self.assertEqual(xblock.prompt, data['prompt']) # # that verify this extensively.
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment']) # self.assertEqual(xblock.title, data['title'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt']) # self.assertEqual(xblock.prompt, data['prompt'])
# self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
# self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json') @file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
...@@ -91,9 +94,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -91,9 +94,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_update_xblock.json') @file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_context_invalid_request_data(self, xblock, data): def test_update_context_invalid_request_data(self, xblock, data):
# First, parse XML data into a single string.
if 'rubric' in data:
data['rubric'] = "".join(data['rubric'])
xblock.published_date = None xblock.published_date = None
...@@ -104,8 +104,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -104,8 +104,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_rubric.json') @file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data): def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
request = json.dumps(data) request = json.dumps(data)
...@@ -118,7 +116,7 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -118,7 +116,7 @@ class StudioViewTest(XBlockHandlerTestCase):
# Verify the response fails # Verify the response fails
resp = self.request(xblock, 'update_editor_context', request, response_format='json') resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn("not valid", resp['msg'].lower()) self.assertIn("the following keys were missing", resp['msg'].lower())
# Check that the XBlock fields were NOT updated # Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests # We don't need to be exhaustive here, because we have other unit tests
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment