Commit a53004f9 by gradyward

Merge pull request #445 from edx/grady/authoring/assessments-gui

GUI to allow editing of ORA problems
parents dfb745c4 e55fbbae
...@@ -2133,47 +2133,119 @@ hr.divider, ...@@ -2133,47 +2133,119 @@ hr.divider,
.step--student-training .message--incorrect.is--hidden .step__header { .step--student-training .message--incorrect.is--hidden .step__header {
border-bottom: none; } border-bottom: none; }
#openassessment-editor .openassessment-editor-content-and-tabs { #openassessment-editor {
width: 100%; margin-bottom: 0; }
height: 370px; } #openassessment-editor .openassessment_editor_content_and_tabs {
#openassessment-editor .openassessment-editor-header { width: 100%;
background-color: #e5e5e5; height: 370px; }
width: 100%; #openassessment-editor #openassessment_editor_header {
top: 0; } background-color: #e5e5e5;
#openassessment-editor #oa-editor-window-title { width: 100%;
float: left; } top: 0; }
#openassessment-editor .oa-editor-tab { #openassessment-editor #oa_editor_window_title {
float: right; float: left; }
padding: 2.5px 5px; #openassessment-editor .oa_editor_tab {
margin: 2.5px 5px; } float: right;
#openassessment-editor .oa-editor-content-wrapper { padding: 2.5px 5px;
height: 100%; margin: 2.5px 5px;
width: 100%; border-radius: 5px;
padding: 5px 10px; } box-shadow: none;
#openassessment-editor .openassessment-prompt-editor { border: 0; }
width: 100%; #openassessment-editor .oa_editor_content_wrapper {
height: 100%; height: 100%;
resize: none; } width: 100%;
#openassessment-editor .openassessment-rubric-editor { padding: 5px 10px; }
width: 100%; #openassessment-editor #openassessment_prompt_editor {
height: 100%; } width: 100%;
#openassessment-editor .openassessment-assessments-editor { height: 100%;
width: 100%; } resize: none;
#openassessment-editor #oa-settings-editor-text-fields { border: none; }
float: left; #openassessment-editor #openassessment_rubric_editor {
width: 30%; } width: 100%;
#openassessment-editor #oa-settings-assessments { height: 100%; }
float: right; #openassessment-editor #oa_basic_settings_editor {
width: 70%; padding: 20px 20px;
height: 100%; } border-bottom: 1px solid #414243; }
#openassessment-editor .xblock-actions { #openassessment-editor #oa_basic_settings_editor #openassessment_title_editor_wrapper label {
background-color: #e5e5e5; width: 25%;
position: absolute; text-align: left; }
width: 100%; #openassessment-editor #oa_basic_settings_editor #openassessment_title_editor_wrapper input {
bottom: 0; } width: 45%;
min-width: 100px; }
#openassessment-editor #openassessment_step_select_description {
margin: 10px 0; }
#openassessment-editor .openassessment_assessment_module_settings_editor {
margin-bottom: 10px;
padding-bottom: 10px;
border-bottom: 1px solid #dadbdc; }
#openassessment-editor .openassessment_indent_line_input {
padding: 5px 20px; }
#openassessment-editor #oa_settings_editor_wrapper {
overflow-y: scroll; }
#openassessment-editor #openassessment_title_editor {
width: 300px;
margin-left: 50px; }
#openassessment-editor .openassessment_description, #openassessment-editor .openassessment_description_closed {
font-size: 75%;
margin: 0; }
#openassessment-editor .openassessment_date_field {
width: 130px; }
#openassessment-editor .openassessment_number_field {
width: 25px; }
#openassessment-editor .openassessment_text_field_wrapper, #openassessment-editor .openassessment_right_text_field_wrapper, #openassessment-editor .openassessment_left_text_field_wrapper {
width: 50%;
text-align: center; }
#openassessment-editor .openassessment_right_text_field_wrapper {
float: right; }
#openassessment-editor .openassessment_left_text_field_wrapper {
float: left; }
#openassessment-editor .openassessment_due_date_editor {
height: 30px; }
#openassessment-editor .openassessment_inclusion_wrapper {
background-color: #dadbdc;
padding: 2.5px 5px;
margin: 2.5px 5px;
border-radius: 2.5px; }
#openassessment-editor .openassessment_inclusion_wrapper input[type="checkbox"] {
display: none; }
#openassessment-editor .openassessment_inclusion_wrapper input[type="checkbox"] + label:before {
font-family: "FontAwesome";
display: inline-block;
margin-right: 10px;
width: auto;
height: auto;
content: "\f096"; }
#openassessment-editor .openassessment_inclusion_wrapper input[type="checkbox"]:checked + label:before {
content: "\f046"; }
#openassessment-editor label {
padding-right: 10px; }
#openassessment-editor .xblock_actions {
background-color: #c8c9ca;
position: absolute;
width: 100%;
bottom: 0; }
#openassessment-editor .ui-widget-header .ui-state-default {
background: #e5e5e5; }
#openassessment-editor .ui-widget-header .ui-state-default a {
color: #414243;
text-transform: uppercase;
outline-color: transparent; }
#openassessment-editor .ui-widget-header .ui-state-active {
background: #414243;
color: whitesmoke; }
#openassessment-editor .ui-widget-header .ui-state-active a {
color: whitesmoke;
text-transform: uppercase;
outline-color: transparent; }
#openassessment-editor hr {
background-color: transparent;
color: #414243;
height: 1px;
border: 0px;
clear: both; }
.modal-content { .modal-content {
height: 500px !important; } height: 470px !important; }
.openassessment .self-assessment__display__header, .openassessment .peer-assessment__display__header, .openassessment .step__header { .openassessment .self-assessment__display__header, .openassessment .peer-assessment__display__header, .openassessment .step__header {
margin-bottom: 0 !important; margin-bottom: 0 !important;
......
...@@ -17,7 +17,22 @@ describe("OpenAssessment.StudioView", function() { ...@@ -17,7 +17,22 @@ describe("OpenAssessment.StudioView", function() {
this.titleField = ""; this.titleField = "";
this.submissionStartField = ""; this.submissionStartField = "";
this.submissionDueField = ""; this.submissionDueField = "";
this.assessmentsXmlBox = "";
this.hasPeer = true;
this.hasSelf = true;
this.hasTraining = false;
this.hasAI = false;
this.peerMustGrade = 2;
this.peerGradedBy = 3;
this.peerStart = '';
this.peerDue = '';
this.selfStart = '';
this.selfDue = '';
this.aiTrainingExamplesCodeBox = "";
this.studentTrainingExamplesCodeBox = "";
this.isReleased = false; this.isReleased = false;
...@@ -28,16 +43,42 @@ describe("OpenAssessment.StudioView", function() { ...@@ -28,16 +43,42 @@ describe("OpenAssessment.StudioView", function() {
this.loadEditorContext = function() { this.loadEditorContext = function() {
var prompt = this.promptBox; var prompt = this.promptBox;
var rubric = this.rubricXmlBox; var rubric = this.rubricXmlBox;
var settings = { var title = this.titleField;
title: this.titleField, var submission_start = this.submissionStartField;
submission_start: this.submissionStartField, var submission_due = this.submissionDueField;
submission_due: this.submissionDueField, var assessments = [];
assessments: this.assessmentsXmlBox if (this.hasTraining){
}; assessments = assessments.concat({
"name": "student-training",
"examples": this.studentTrainingExamplesCodeBox
});
}
if (this.hasPeer){
assessments = assessments.concat({
"name": "peer-assessment",
"start": this.peerStart,
"due": this.peerDue,
"must_grade": this.peerMustGrade,
"must_be_graded_by": this.peerGradedBy
});
}
if (this.hasSelf){
assessments = assessments.concat({
"name": "self-assessment",
"start": this.selfStart,
"due": this.selfDue
});
}
if (this.hasAI){
assessments = assessments.concat({
"name": "example-based-assessment",
"examples": this.aiTrainingExamplesCodeBox
});
}
if (!this.loadError) { if (!this.loadError) {
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
defer.resolveWith(this, [prompt, rubric, settings]); defer.resolveWith(this, [prompt, rubric, title, submission_start, submission_due, assessments]);
}).promise(); }).promise();
} }
else { else {
...@@ -45,14 +86,39 @@ describe("OpenAssessment.StudioView", function() { ...@@ -45,14 +86,39 @@ describe("OpenAssessment.StudioView", function() {
} }
}; };
this.updateEditorContext = function(prompt, rubricXml, title, sub_start, sub_due, assessmentsXml) { this.updateEditorContext = function(prompt, rubricXml, title, sub_start, sub_due, assessments) {
if (!this.updateError) { if (!this.updateError) {
this.promptBox = prompt; this.promptBox = prompt;
this.rubricXmlBox = rubricXml; this.rubricXmlBox = rubricXml;
this.titleField = title; this.titleField = title;
this.submissionStartField = sub_start; this.submissionStartField = sub_start;
this.submissionDueField = sub_due; this.submissionDueField = sub_due;
this.assessmentsXmlBox = assessmentsXml;
this.hasPeer = false;
this.hasSelf = false;
this.hasAI = false;
this.hasTraining = false;
for (var i = 0; i < assessments.length; i++) {
var assessment = assessments[i];
if (assessment.name == 'peer-assessment') {
this.hasPeer = true;
this.peerMustGrade = assessment.must_grade;
this.peerGradedBy = assessment.must_be_graded_by;
this.peerStart = assessment.start;
this.peerDue = assessment.due;
} else if (assessment.name == 'self-assessment') {
this.hasSelf = true;
this.selfStart = assessment.start;
this.selfDue = assessment.due;
} else if (assessment.name == 'example-based-assessment') {
this.hasAI = true;
this.aiTrainingExamplesCodeBox = assessment.examples;
} else if (assessment.name == 'student-training') {
this.hasTraining = true;
this.studentTrainingExamplesCodeBox = assessment.examples;
}
}
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
defer.resolve(); defer.resolve();
}).promise(); }).promise();
...@@ -73,6 +139,52 @@ describe("OpenAssessment.StudioView", function() { ...@@ -73,6 +139,52 @@ describe("OpenAssessment.StudioView", function() {
var server = null; var server = null;
var view = null; var view = null;
var prompt = "How much do you like waffles?";
var rubric =
"<rubric>" +
"<criterion>"+
"<name>Proper Appreciation of Gravity</name>"+
"<prompt>How much respect did the person give waffles?</prompt>"+
"<option points=\"0\"><name>No</name><explanation>Not enough</explanation></option>"+
"<option points=\"2\"><name>Yes</name><explanation>An appropriate Amount</explanation></option>"+
"</criterion>"+
"</rubric>";
var title = "The most important of all questions.";
var subStart = "";
var subDue = "2014-10-1T10:00:00";
var assessments = [
{
"name": "student-training",
"examples":
"<examples>"+
"<example>" +
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>" +
"<select criterion=\"Test criterion\" option=\"Yes\" />" +
"<select criterion=\"Another test criterion\" option=\"No\" />" +
"</example>" +
"<example>" +
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>" +
"<select criterion=\"Another test criterion\" option=\"Yes\" />" +
"<select criterion=\"Test criterion\" option=\"No\" />" +
"</example>"+
"</examples>",
"start": "",
"due": ""
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": "2014-10-04T00:00:00",
"due": ""
},
{
"name": "self-assessment",
"start": "",
"due": ""
}
];
beforeEach(function() { beforeEach(function() {
// Load the DOM fixture // Load the DOM fixture
...@@ -97,11 +209,9 @@ describe("OpenAssessment.StudioView", function() { ...@@ -97,11 +209,9 @@ describe("OpenAssessment.StudioView", function() {
// Expect that the XML definition(s) were loaded // Expect that the XML definition(s) were loaded
var rubric = view.rubricXmlBox.getValue(); var rubric = view.rubricXmlBox.getValue();
var prompt = view.promptBox.value; var prompt = view.promptBox.value;
var assessments = view.assessmentsXmlBox.getValue()
expect(prompt).toEqual(''); expect(prompt).toEqual('');
expect(rubric).toEqual(''); expect(rubric).toEqual('');
expect(assessments).toEqual('');
}); });
it("saves the Editor Context definition", function() { it("saves the Editor Context definition", function() {
...@@ -135,6 +245,34 @@ describe("OpenAssessment.StudioView", function() { ...@@ -135,6 +245,34 @@ describe("OpenAssessment.StudioView", function() {
expect(view.confirmPostReleaseUpdate).toHaveBeenCalled(); expect(view.confirmPostReleaseUpdate).toHaveBeenCalled();
}); });
it("full integration test for load and update_editor_context", function() {
server.updateEditorContext(prompt, rubric, title, subStart, subDue, assessments);
view.load();
expect(view.promptBox.value).toEqual(prompt);
expect(view.rubricXmlBox.getValue()).toEqual(rubric);
expect(view.titleField.value).toEqual(title);
expect(view.submissionStartField.value).toEqual(subStart);
expect(view.submissionDueField.value).toEqual(subDue);
expect(view.hasPeer.prop('checked')).toEqual(true);
expect(view.hasSelf.prop('checked')).toEqual(true);
expect(view.hasAI.prop('checked')).toEqual(false);
expect(view.hasTraining.prop('checked')).toEqual(true);
expect(view.peerMustGrade.prop('value')).toEqual('5');
expect(view.peerGradedBy.prop('value')).toEqual('3');
expect(view.peerDue.prop('value')).toEqual("");
expect(view.selfStart.prop('value')).toEqual("");
expect(view.selfDue.prop('value')).toEqual("");
expect(view.aiTrainingExamplesCodeBox.getValue()).toEqual("");
expect(view.studentTrainingExamplesCodeBox.getValue()).toEqual(assessments[0].examples);
expect(view.peerStart.prop('value')).toEqual("2014-10-04T00:00:00");
view.titleField.value = "This is the new title.";
view.updateEditorContext();
expect(server.titleField).toEqual("This is the new title.");
});
it("cancels editing", function() { it("cancels editing", function() {
view.cancel(); view.cancel();
expect(runtime.notify).toHaveBeenCalledWith('cancel', {}); expect(runtime.notify).toHaveBeenCalledWith('cancel', {});
......
...@@ -51,17 +51,24 @@ describe("OpenAssessment.Server", function() { ...@@ -51,17 +51,24 @@ describe("OpenAssessment.Server", function() {
'</criterion>'+ '</criterion>'+
'</rubric>'; '</rubric>';
var assessments = '<assessments>' + var ASSESSMENTS = [
'<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" due="2000-01-02"/>' + {
'<assessment name="self-assessment" due="2000-01-8"/>' + "name": "peer-assessment",
'</assessments>'; "must_grade": 5,
"must_be_graded_by": 3,
var SETTINGS = { "start": "",
title: 'This is the title.', "due": "4014-03-10T00:00:00"
submission_start: '2012-10-09T00:00:00', },
submission_due: '2015-10-10T00:00:00', {
assessments: assessments "name": "self-assessment",
}; "start": "",
"due": ""
}
];
var TITLE = 'This is the title.';
var SUBMISSION_START = '2012-10-09T00:00:00';
var SUBMISSION_DUE = '2015-10-10T00:00:00';
beforeEach(function() { beforeEach(function() {
// Create the server // Create the server
...@@ -184,20 +191,33 @@ describe("OpenAssessment.Server", function() { ...@@ -184,20 +191,33 @@ describe("OpenAssessment.Server", function() {
}); });
it("loads the XBlock's Context definition", function() { it("loads the XBlock's Context definition", function() {
stubAjax(true, { success: true, prompt: PROMPT, rubric: RUBRIC, settings: SETTINGS}); stubAjax(true, {
success: true, prompt: PROMPT, rubric: RUBRIC, title: TITLE,
submission_start: SUBMISSION_START, submission_due: SUBMISSION_DUE, assessments: ASSESSMENTS
});
var loadedPrompt = ""; var loadedPrompt = "";
var loadedRubric = ""; var loadedRubric = "";
var loadedSettings = ""; var loadedAssessments = [];
server.loadEditorContext().done(function(prompt, rubric, settings) { var loadedTitle = "";
var loadedStart = "";
var loadedDue = "";
server.loadEditorContext().done(function(prompt, rubric, title, sub_start, sub_due, assessments) {
loadedPrompt = prompt; loadedPrompt = prompt;
loadedRubric = rubric; loadedRubric = rubric;
loadedSettings = settings; loadedTitle = title;
loadedStart = sub_start;
loadedDue = sub_due;
loadedAssessments = assessments;
}); });
expect(loadedPrompt).toEqual(PROMPT); expect(loadedPrompt).toEqual(PROMPT);
expect(loadedRubric).toEqual(RUBRIC); expect(loadedRubric).toEqual(RUBRIC);
expect(loadedSettings).toEqual(SETTINGS); expect(loadedTitle).toEqual(TITLE);
expect(loadedStart).toEqual(SUBMISSION_START);
expect(loadedDue).toEqual(SUBMISSION_DUE);
expect(loadedAssessments).toEqual(ASSESSMENTS);
expect($.ajax).toHaveBeenCalledWith({ expect($.ajax).toHaveBeenCalledWith({
url: '/editor_context', type: "POST", data: '""' url: '/editor_context', type: "POST", data: '""'
}); });
...@@ -207,11 +227,14 @@ describe("OpenAssessment.Server", function() { ...@@ -207,11 +227,14 @@ describe("OpenAssessment.Server", function() {
stubAjax(true, { success: true }); stubAjax(true, { success: true });
server.updateEditorContext( server.updateEditorContext(
PROMPT, RUBRIC, SETTINGS.title, SETTINGS.submission_start, SETTINGS.submission_due, SETTINGS.assessments PROMPT, RUBRIC, TITLE, SUBMISSION_START, SUBMISSION_DUE, ASSESSMENTS
); );
expect($.ajax).toHaveBeenCalledWith({ expect($.ajax).toHaveBeenCalledWith({
type: "POST", url: '/update_editor_context', type: "POST", url: '/update_editor_context',
data: JSON.stringify({prompt: PROMPT, rubric: RUBRIC, settings: SETTINGS}) data: JSON.stringify({
prompt: PROMPT, rubric: RUBRIC, title: TITLE, submission_start: SUBMISSION_START,
submission_due: SUBMISSION_DUE, assessments: ASSESSMENTS
})
}); });
}); });
......
...@@ -157,9 +157,7 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -157,9 +157,7 @@ function OpenAssessmentBlock(runtime, element) {
/** /**
Render views within the base view on page load. Render views within the base view on page load.
**/ **/
$(function($) { var server = new OpenAssessment.Server(runtime, element);
var server = new OpenAssessment.Server(runtime, element); var view = new OpenAssessment.BaseView(runtime, element, server);
var view = new OpenAssessment.BaseView(runtime, element, server); view.load();
view.load();
});
} }
...@@ -356,7 +356,9 @@ OpenAssessment.Server.prototype = { ...@@ -356,7 +356,9 @@ OpenAssessment.Server.prototype = {
$.ajax({ $.ajax({
type: "POST", url: url, data: "\"\"" type: "POST", url: url, data: "\"\""
}).done(function(data) { }).done(function(data) {
if (data.success) { defer.resolveWith(this, [data.prompt, data.rubric, data.settings]); } if (data.success) { defer.resolveWith(this, [
data.prompt, data.rubric, data.title, data.submission_start, data.submission_due, data.assessments
]); }
else { defer.rejectWith(this, [data.msg]); } else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, [gettext('This problem could not be loaded.')]); defer.rejectWith(this, [gettext('This problem could not be loaded.')]);
...@@ -367,7 +369,7 @@ OpenAssessment.Server.prototype = { ...@@ -367,7 +369,7 @@ OpenAssessment.Server.prototype = {
/** /**
Update the XBlock's XML definition on the server. Update the XBlock's XML definition on the server.
Returns: Return
A JQuery promise, which resolves with no arguments A JQuery promise, which resolves with no arguments
and fails with an error message. and fails with an error message.
...@@ -378,15 +380,16 @@ OpenAssessment.Server.prototype = { ...@@ -378,15 +380,16 @@ OpenAssessment.Server.prototype = {
function(err) { console.log(err); } function(err) { console.log(err); }
); );
**/ **/
updateEditorContext: function(prompt, rubricXml, title, sub_start, sub_due, assessmentsXml) { updateEditorContext: function(prompt, rubricXml, title, sub_start, sub_due, assessments) {
var url = this.url('update_editor_context'); var url = this.url('update_editor_context');
var settings = { var payload = JSON.stringify({
'prompt': prompt,
'rubric': rubricXml,
'title': title, 'title': title,
'submission_start': sub_start, 'submission_start': sub_start,
'submission_due': sub_due, 'submission_due': sub_due,
'assessments': assessmentsXml 'assessments': assessments
}; });
var payload = JSON.stringify({'prompt': prompt, 'rubric': rubricXml, 'settings': settings});
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
$.ajax({ $.ajax({
type: "POST", url: url, data: payload type: "POST", url: url, data: payload
......
...@@ -171,68 +171,188 @@ ...@@ -171,68 +171,188 @@
// -------------------- // --------------------
#openassessment-editor { #openassessment-editor {
margin-bottom: 0;
.openassessment-editor-content-and-tabs { .openassessment_editor_content_and_tabs {
width: 100%; width: 100%;
height: 370px; height: 370px;
} }
.openassessment-editor-header{ #openassessment_editor_header{
background-color: #e5e5e5; background-color: #e5e5e5;
width: 100%; width: 100%;
top: 0; top: 0;
} }
#oa-editor-window-title{ #oa_editor_window_title{
float: left; float: left;
} }
.oa-editor-tab{ .oa_editor_tab{
float: right; float: right;
padding: ($baseline-v/8) ($baseline-h/8); padding: ($baseline-v/8) ($baseline-h/8);
margin: ($baseline-v/8) ($baseline-h/8); margin: ($baseline-v/8) ($baseline-h/8);
border-radius: ($baseline-v/4);
box-shadow: none;
border: 0;
} }
.oa-editor-content-wrapper { .oa_editor_content_wrapper {
height: 100%; height: 100%;
width: 100%; width: 100%;
padding: ($baseline-v/4) ($baseline-h/4); padding: ($baseline-v/4) ($baseline-h/4);
} }
.openassessment-prompt-editor { #openassessment_prompt_editor {
width: 100%; width: 100%;
height: 100%; height: 100%;
resize: none; resize: none;
border: none;
} }
.openassessment-rubric-editor { #openassessment_rubric_editor {
width: 100%; width: 100%;
height: 100%; height: 100%;
} }
.openassessment-assessments-editor { #oa_basic_settings_editor {
width: 100%; padding: 20px 20px;
border-bottom: 1px solid $edx-gray-d3;
#openassessment_title_editor_wrapper{
label{
width: 25%;
text-align: left;
}
input{
width: 45%;
min-width: 100px;
}
}
} }
#oa-settings-editor-text-fields { #openassessment_step_select_description{
float: left; margin: 10px 0;
width: 30%; }
.openassessment_assessment_module_settings_editor{
margin-bottom: 10px;
padding-bottom: 10px;
border-bottom: 1px solid $edx-gray-l3;
}
.openassessment_indent_line_input{
padding: 5px 20px;
}
#oa_settings_editor_wrapper {
overflow-y: scroll;
}
#openassessment_title_editor {
width: 300px;
margin-left: 50px;
} }
#oa-settings-assessments{ .openassessment_description{
font-size: 75%;
margin: 0;
}
.openassessment_date_field{
width: 130px;
}
.openassessment_number_field{
width: 25px;
}
.openassessment_description_closed{
@extend .openassessment_description;
}
.openassessment_text_field_wrapper{
width: 50%;
text-align: center;
}
.openassessment_right_text_field_wrapper {
@extend .openassessment_text_field_wrapper;
float: right; float: right;
width: 70%;
height: 100%;
} }
.xblock-actions { .openassessment_left_text_field_wrapper {
background-color: #e5e5e5; @extend .openassessment_text_field_wrapper;
float: left;
}
.openassessment_due_date_editor{
height: 30px;
}
.openassessment_inclusion_wrapper{
background-color: $edx-gray-l3;
padding: ($baseline-v/8) ($baseline-h/8);
margin: ($baseline-v/8) ($baseline-h/8);
border-radius: ($baseline-v)/8;
input[type="checkbox"]{
display: none;
}
input[type="checkbox"] + label:before {
font-family: "FontAwesome";
display: inline-block;
margin-right: ($baseline-h/4);
width: auto;
height: auto;
content: "\f096";
}
input[type="checkbox"]:checked + label:before{
content: "\f046";
}
}
label{
padding-right: 10px;
}
.xblock_actions {
background-color: $edx-gray-l2;
position: absolute; position: absolute;
width: 100%; width: 100%;
bottom: 0; bottom: 0;
} }
.ui-widget-header .ui-state-default{
background: #e5e5e5;
a{
color: $edx-gray-d3;
text-transform: uppercase;
outline-color: transparent;
}
}
.ui-widget-header .ui-state-active{
background: $edx-gray-d3;
color: $white;
a{
color: $white;
text-transform: uppercase;
outline-color: transparent;
}
}
hr {
background-color: transparent;
color: $edx-gray-d3;
height: 1px;
border: 0px;
clear: both;
}
} }
.modal-content { .modal-content {
height: 500px !important; height: 470px !important;
} }
...@@ -2,14 +2,16 @@ ...@@ -2,14 +2,16 @@
Studio editing view for OpenAssessment XBlock. Studio editing view for OpenAssessment XBlock.
""" """
import pkg_resources import pkg_resources
import copy
import logging import logging
from django.template.context import Context from django.template.context import Context
from django.template.loader import get_template from django.template.loader import get_template
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _, ugettext
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fragment import Fragment from xblock.fragment import Fragment
from openassessment.xblock import xml from openassessment.xblock import xml
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.xml import UpdateFromXmlError, parse_date, parse_examples_xml_str
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -42,12 +44,14 @@ class StudioMixin(object): ...@@ -42,12 +44,14 @@ class StudioMixin(object):
Update the XBlock's configuration. Update the XBlock's configuration.
Args: Args:
data (dict): Data from the request; should have a value for the keys data (dict): Data from the request; should have a value for the keys: 'rubric', 'prompt',
'rubric', 'settings' and 'prompt'. The 'rubric' should be an XML 'title', 'submission_start', 'submission_due', and 'assessments'.
representation of the new rubric. The 'prompt' should be a plain -- The 'rubric' should be an XML representation of the new rubric.
text prompt. The 'settings' should be a dict of 'title', -- The 'prompt' and 'title' should be plain text.
'submission_due', 'submission_start' and the XML configuration for -- The dates 'submission_start' and 'submission_due' are both ISO strings
all 'assessments'. -- The 'assessments' is a list of assessment dictionaries (much like self.rubric_assessments)
with the notable exception that all examples (for Student Training and eventually AI)
are in XML string format and need to be parsed into dictionaries.
Kwargs: Kwargs:
suffix (str): Not used suffix (str): Not used
...@@ -55,18 +59,20 @@ class StudioMixin(object): ...@@ -55,18 +59,20 @@ class StudioMixin(object):
Returns: Returns:
dict with keys 'success' (bool) and 'msg' (str) dict with keys 'success' (bool) and 'msg' (str)
""" """
missing_keys = list({'rubric', 'settings', 'prompt'} - set(data.keys())) missing_keys = list(
{'rubric', 'prompt', 'title', 'assessments', 'submission_start', 'submission_due'} - set(data.keys())
)
if missing_keys: if missing_keys:
logger.warn( logger.warn(
'Must specify the following keys in request JSON dict: {}'.format(missing_keys) 'Must specify the following missing keys in request JSON dict: {}'.format(missing_keys)
) )
return {'success': False, 'msg': _('Error updating XBlock configuration')} return {'success': False, 'msg': _('Error updating XBlock configuration')}
settings = data['settings']
try: try:
rubric = xml.parse_rubric_xml_str(data['rubric']) rubric = xml.parse_rubric_xml_str(data["rubric"])
assessments = xml.parse_assessments_xml_str(settings['assessments']) submission_due = xml.parse_date(data["submission_due"], name="submission due date")
submission_due = xml.parse_date(settings["submission_due"]) submission_start = xml.parse_date(data["submission_start"], name="submission start date")
submission_start = xml.parse_date(settings["submission_start"]) assessments = parse_assessment_dictionaries(data["assessments"])
except xml.UpdateFromXmlError as ex: except xml.UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)} return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)}
...@@ -81,7 +87,7 @@ class StudioMixin(object): ...@@ -81,7 +87,7 @@ class StudioMixin(object):
assessments, assessments,
submission_due, submission_due,
submission_start, submission_start,
settings["title"], data["title"],
data["prompt"] data["prompt"]
) )
return {'success': True, 'msg': 'Successfully updated OpenAssessment XBlock'} return {'success': True, 'msg': 'Successfully updated OpenAssessment XBlock'}
...@@ -100,15 +106,27 @@ class StudioMixin(object): ...@@ -100,15 +106,27 @@ class StudioMixin(object):
suffix (str): Not used suffix (str): Not used
Returns: Returns:
dict with keys 'success' (bool), 'message' (unicode), dict with keys
'rubric' (unicode), 'prompt' (unicode), and 'settings' (dict) 'success' (bool), 'message' (unicode), 'rubric' (unicode), 'prompt' (unicode),
'title' (unicode), 'submission_start' (unicode), 'submission_due' (unicode), 'assessments (dict)
""" """
try: try:
assessments = xml.serialize_assessments_to_xml_str(self)
rubric = xml.serialize_rubric_to_xml_str(self) rubric = xml.serialize_rubric_to_xml_str(self)
# We do not expect serialization to raise an exception,
# but if it does, handle it gracefully. # Copies the rubric assessments so that we can change student training examples from dict -> str without
# negatively modifying the openassessmentblock definition.
assessment_list = copy.deepcopy(self.rubric_assessments)
# Finds the student training dictionary, if it exists, and replaces the examples with their XML definition
student_training_dictionary = [d for d in assessment_list if d["name"] == "student-training"]
if student_training_dictionary:
# Our for loop will return a list. Select the first element of that list if it exists.
student_training_dictionary = student_training_dictionary[0]
examples = xml.serialize_examples_to_xml_str(student_training_dictionary)
student_training_dictionary["examples"] = examples
# We do not expect serialization to raise an exception, but if it does, handle it gracefully.
except Exception as ex: except Exception as ex:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex) msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex)
logger.error(msg) logger.error(msg)
...@@ -122,19 +140,15 @@ class StudioMixin(object): ...@@ -122,19 +140,15 @@ class StudioMixin(object):
submission_start = self.submission_start if self.submission_start else '' submission_start = self.submission_start if self.submission_start else ''
settings = {
'submission_due': submission_due,
'submission_start': submission_start,
'title': self.title,
'assessments': assessments
}
return { return {
'success': True, 'success': True,
'msg': '', 'msg': '',
'rubric': rubric, 'rubric': rubric,
'prompt': self.prompt, 'prompt': self.prompt,
'settings': settings 'submission_due': submission_due,
'submission_start': submission_start,
'title': self.title,
'assessments': assessment_list
} }
@XBlock.json_handler @XBlock.json_handler
...@@ -157,3 +171,73 @@ class StudioMixin(object): ...@@ -157,3 +171,73 @@ class StudioMixin(object):
'success': True, 'msg': u'', 'success': True, 'msg': u'',
'is_released': self.is_released() 'is_released': self.is_released()
} }
def parse_assessment_dictionaries(input_assessments):
"""
Parses the elements of assessment dictionaries returned by the Studio UI into storable rubric_assessments
Args:
input_assessments (list of dict): A list of the dictionaries that are assembled in Javascript to
represent their modules. Some changes need to be made between this and the result:
-- Parse the XML examples from the Student Training and or AI
-- Parse all dates (including the assessment dates) correctly
Returns:
(list of dict): Can be directly assigned/stored in an openassessmentblock.rubric_assessments
"""
assessments_list = []
for assessment in input_assessments:
assessment_dict = dict()
# Assessment name
if 'name' in assessment:
assessment_dict['name'] = assessment.get('name')
else:
raise UpdateFromXmlError(_('All "assessment" elements must contain a "name" element.'))
# Assessment start
if 'start' in assessment:
parsed_start = parse_date(assessment.get('start'), name="{} start date".format(assessment.get('name')))
assessment_dict['start'] = parsed_start
else:
assessment_dict['start'] = None
# Assessment due
if 'due' in assessment:
parsed_due = parse_date(assessment.get('due'), name="{} due date".format(assessment.get('name')))
assessment_dict['due'] = parsed_due
else:
assessment_dict['due'] = None
# Assessment must_grade
if 'must_grade' in assessment:
try:
assessment_dict['must_grade'] = int(assessment.get('must_grade'))
except (ValueError, TypeError):
raise UpdateFromXmlError(_('The "must_grade" value must be a positive integer.'))
# Assessment must_be_graded_by
if 'must_be_graded_by' in assessment:
try:
assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by'))
except (ValueError, TypeError):
raise UpdateFromXmlError(_('The "must_be_graded_by" value must be a positive integer.'))
# Training examples (can be for AI OR for Student Training)
if 'examples' in assessment:
try:
assessment_dict['examples'] = parse_examples_xml_str(assessment.get('examples'))
except UpdateFromXmlError as ex:
raise UpdateFromXmlError(_("There was an error in parsing the {name} examples: {ex}").format(
name=assessment_dict['name'], ex=ex
))
# Update the list of assessments
assessments_list.append(assessment_dict)
return assessments_list
\ No newline at end of file
...@@ -11,17 +11,24 @@ ...@@ -11,17 +11,24 @@
"</rubric>" "</rubric>"
], ],
"prompt": "My new prompt.", "prompt": "My new prompt.",
"settings": { "submission_due": "4014-02-27T09:46:28",
"title": "My new title.", "submission_start": "4014-02-10T09:46:28",
"assessments": [ "title": "My new title.",
"<assessments>", "assessments": [
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />", {
"<assessment name=\"self-assessment\" />", "name": "peer-assessment",
"</assessments>" "must_grade": 5,
], "must_be_graded_by": 3,
"submission_due": "2014-02-27T09:46:28", "start": "",
"submission_start": "2014-02-10T09:46:28" "due": "4014-03-10T00:00:00"
}, },
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"expected-assessment": "peer-assessment", "expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt" "expected-criterion-prompt": "Test criterion prompt"
} }
......
{ {
"no_rubric": { "no_rubric": {
"prompt": "My new prompt.", "prompt": "My new prompt.",
"settings": { "title": "My new title.",
"title": "My new title.", "assessments": [
"assessments": [ {
"<assessments>", "name": "peer-assessment",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />", "must_grade": 5,
"<assessment name=\"self-assessment\" />", "must_be_graded_by": 3,
"</assessments>" "start": "",
], "due": ""
"submission_due": "2014-02-27T09:46:28", },
"submission_start": "2014-02-10T09:46:28" {
}, "name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28",
"expected_error": "error" "expected_error": "error"
}, },
"no_prompt": { "no_prompt": {
...@@ -26,20 +32,26 @@ ...@@ -26,20 +32,26 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"settings": { "title": "My new title.",
"title": "My new title.", "assessments": [
"assessments": [ {
"<assessments>", "name": "peer-assessment",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />", "must_grade": 5,
"<assessment name=\"self-assessment\" />", "must_be_graded_by": 3,
"</assessments>" "start": "",
], "due": ""
"submission_due": "2014-02-27T09:46:28", },
"submission_start": "2014-02-10T09:46:28" {
}, "name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28",
"expected_error": "error" "expected_error": "error"
}, },
"no_settings": { "no_submission_due": {
"rubric": [ "rubric": [
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>", "<prompt>Test prompt</prompt>",
...@@ -52,9 +64,59 @@ ...@@ -52,9 +64,59 @@
"</rubric>" "</rubric>"
], ],
"prompt": "My new prompt.", "prompt": "My new prompt.",
"title": "My new title.",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": "",
"due": ""
},
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_start": "2014-02-10T09:46:28",
"expected_error": "error" "expected_error": "error"
}, },
"invalid_dates": { "invalid_dates_one": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"title": "My new title.",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": "",
"due": ""
},
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_due": "2012-02-27T09:46:28",
"submission_start": "2015-02-10T09:46:28",
"expected_error": "cannot be later"
},
"invalid_dates_two": {
"rubric": [ "rubric": [
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>", "<prompt>Test prompt</prompt>",
...@@ -67,17 +129,25 @@ ...@@ -67,17 +129,25 @@
"</rubric>" "</rubric>"
], ],
"prompt": "My new prompt.", "prompt": "My new prompt.",
"settings": {
"title": "My new title.", "title": "My new title.",
"assessments": [ "assessments": [
"<assessments>", {
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />", "name": "peer-assessment",
"<assessment name=\"self-assessment\" start=\"2010-01-01\" due=\"2003-01-01\"/>", "must_grade": 5,
"</assessments>" "must_be_graded_by": 3,
"start": "",
"due": ""
},
{
"name": "self-assessment",
"start": "",
"due": "2003-01-02T00:00:00"
}
], ],
"submission_due": "2012-02-27T09:46:28", "submission_due": "2012-02-27T09:46:28",
"submission_start": "2015-02-10T09:46:28" "submission_start": "",
}, "expected_error": "cannot be later"
"expected_error": "cannot be earlier"
} }
} }
\ No newline at end of file
{
"no-dates": {
"assessments_list": [
{
"name": "peer-assessment",
"start": "",
"due": "",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "",
"start": ""
}
],
"results": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": null,
"start": null
}
]
},
"student-training": {
"assessments_list": [
{
"name": "student-training",
"start": "",
"due": "",
"examples": "<example><answer>ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Test criterion\" option=\"Yes\" /><select criterion=\"Another test criterion\" option=\"No\" /></example><example><answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Another test criterion\" option=\"Yes\" /><select criterion=\"Test criterion\" option=\"No\" /></example>"
},
{
"name": "peer-assessment",
"start": "",
"due": "",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "",
"start": ""
}
],
"results": [
{
"name": "student-training",
"due": null,
"start": null,
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
},
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": null,
"start": null
}
]
},
"date-parsing": {
"assessments_list": [
{
"name": "student-training",
"start": "2014-10-10T01:00:01",
"due": "",
"examples": "<example><answer>ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Test criterion\" option=\"Yes\" /><select criterion=\"Another test criterion\" option=\"No\" /></example><example><answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Another test criterion\" option=\"Yes\" /><select criterion=\"Test criterion\" option=\"No\" /></example>"
},
{
"name": "peer-assessment",
"start": "",
"due": "2015-01-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "2015-01-01T00:00:00",
"start": ""
}
],
"results": [
{
"name": "student-training",
"due": null,
"start": "2014-10-10T01:00:01",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
},
{
"name": "peer-assessment",
"start": null,
"due": "2015-01-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "2015-01-01T00:00:00",
"start": null
}
]
}
}
\ No newline at end of file
{
"date-parsing-due": {
"assessments_list": [
{
"name": "student-training",
"start": "2014-10-10T01:00:01",
"due": "",
"examples": "<examples><example><answer>ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Test criterion\" option=\"Yes\" /><select criterion=\"Another test criterion\" option=\"No\" /></example><example><answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Another test criterion\" option=\"Yes\" /><select criterion=\"Test criterion\" option=\"No\" /></example></examples>"
},
{
"name": "peer-assessment",
"start": "",
"due": "2015-01-01T00:00:HI",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "2015-014-01",
"start": ""
}
]
},
"date-parsing-start": {
"assessments_list": [
{
"name": "peer-assessment",
"start": "2014-13-13T00:00:00",
"due": "",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "",
"start": ""
}
]
},
"no-answers-in-examples": {
"assessments_list": [
{
"name": "student-training",
"start": "",
"due": "",
"examples": "<example><select criterion=\"Test criterion\" option=\"Yes\" /><select criterion=\"Another test criterion\" option=\"No\" /></example><example><answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer><select criterion=\"Another test criterion\" option=\"Yes\" /><select criterion=\"Test criterion\" option=\"No\" /></example>"
},
{
"name": "peer-assessment",
"start": "",
"due": "",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "",
"start": ""
}
]
},
"must_grade": {
"assessments_list": [
{
"name": "peer-assessment",
"start": "",
"due": "",
"must_grade": "Not a number fool!",
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"due": "",
"start": ""
}
]
},
"must_be_graded_by": {
"assessments_list": [
{
"name": "peer-assessment",
"start": "",
"due": "",
"must_grade": 3,
"must_be_graded_by": "Not a number fool!"
},
{
"name": "self-assessment",
"due": "",
"start": ""
}
]
}
}
\ No newline at end of file
...@@ -12,17 +12,24 @@ ...@@ -12,17 +12,24 @@
"</rubric>" "</rubric>"
], ],
"prompt": "My new prompt.", "prompt": "My new prompt.",
"settings": { "submission_due": "4014-02-27T09:46:28",
"title": "My new title.", "submission_start": "4014-02-10T09:46:28",
"assessments": [ "title": "My new title.",
"<assessments>", "assessments": [
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />", {
"<assessment name=\"self-assessment\" />", "name": "peer-assessment",
"</assessments>" "must_grade": 5,
], "must_be_graded_by": 3,
"submission_due": "4014-02-27T09:46:28", "start": "",
"submission_start": "4014-02-10T09:46:28" "due": "4014-03-10T00:00:00"
}, },
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"expected-assessment": "peer-assessment", "expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt" "expected-criterion-prompt": "Test criterion prompt"
} }
......
...@@ -33,8 +33,12 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -33,8 +33,12 @@ class StudioViewTest(XBlockHandlerTestCase):
rubric = etree.fromstring(resp['rubric']) rubric = etree.fromstring(resp['rubric'])
self.assertEqual(rubric.tag, 'rubric') self.assertEqual(rubric.tag, 'rubric')
assessments = etree.fromstring(resp['settings']['assessments']) # Verify that every assessment in the list of assessments has a name.
self.assertEqual(assessments.tag, 'assessments') for assessment_dict in resp['assessments']:
self.assertTrue(assessment_dict.get('name', False))
if assessment_dict.get('name') == 'student-training':
examples = etree.fromstring(assessment_dict['examples'])
self.assertEqual(examples.tag, 'examples')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str') @mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
...@@ -52,7 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -52,7 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase):
def test_update_xblock(self, xblock, data): def test_update_xblock(self, xblock, data):
# First, parse XML data into a single string. # First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric']) data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
xblock.published_date = None xblock.published_date = None
# Test that we can update the xblock with the expected configuration. # Test that we can update the xblock with the expected configuration.
request = json.dumps(data) request = json.dumps(data)
...@@ -66,7 +69,7 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -66,7 +69,7 @@ class StudioViewTest(XBlockHandlerTestCase):
# Check that the XBlock fields were updated # Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests # We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively. # that verify this extensively.
self.assertEqual(xblock.title, data['settings']['title']) self.assertEqual(xblock.title, data['title'])
self.assertEqual(xblock.prompt, data['prompt']) self.assertEqual(xblock.prompt, data['prompt'])
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment']) self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt']) self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
...@@ -76,7 +79,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -76,7 +79,6 @@ class StudioViewTest(XBlockHandlerTestCase):
def test_update_context_post_release(self, xblock, data): def test_update_context_post_release(self, xblock, data):
# First, parse XML data into a single string. # First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric']) data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
# XBlock start date defaults to already open, # XBlock start date defaults to already open,
# so we should get an error when trying to update anything that change the number of points # so we should get an error when trying to update anything that change the number of points
...@@ -93,9 +95,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -93,9 +95,6 @@ class StudioViewTest(XBlockHandlerTestCase):
if 'rubric' in data: if 'rubric' in data:
data['rubric'] = "".join(data['rubric']) data['rubric'] = "".join(data['rubric'])
if 'settings' in data and 'assessments' in data['settings']:
data['settings']['assessments'] = "".join(data['settings']['assessments'])
xblock.published_date = None xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json') resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
...@@ -107,7 +106,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -107,7 +106,6 @@ class StudioViewTest(XBlockHandlerTestCase):
def test_update_rubric_invalid(self, xblock, data): def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string. # First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric']) data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
request = json.dumps(data) request = json.dumps(data)
......
...@@ -11,6 +11,7 @@ import dateutil.parser ...@@ -11,6 +11,7 @@ import dateutil.parser
from django.test import TestCase from django.test import TestCase
import ddt import ddt
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.studio_mixin import parse_assessment_dictionaries
from openassessment.xblock.xml import ( from openassessment.xblock.xml import (
serialize_content, parse_from_xml_str, parse_rubric_xml_str, serialize_content, parse_from_xml_str, parse_rubric_xml_str,
parse_examples_xml_str, parse_assessments_xml_str, parse_examples_xml_str, parse_assessments_xml_str,
...@@ -358,6 +359,25 @@ class TestParseAssessmentsFromXml(TestCase): ...@@ -358,6 +359,25 @@ class TestParseAssessmentsFromXml(TestCase):
self.assertEqual(assessments, data['assessments']) self.assertEqual(assessments, data['assessments'])
@ddt.ddt
class TestParseAssessmentsFromDictionaries(TestCase):
@ddt.file_data('data/parse_assessment_dicts.json')
def test_parse_assessments_dictionary(self, data):
config = parse_assessment_dictionaries(data['assessments_list'])
if len(config) == 0:
# Prevents this test from passing benignly if parse_assessment_dictionaries returns []
self.assertTrue(False)
for config_assessment, correct_assessment in zip(config, data['results']):
self.assertEqual(config_assessment, correct_assessment)
@ddt.file_data('data/parse_assessment_dicts_error.json')
def test_parse_assessments_dictionary_error(self, data):
with self.assertRaises(UpdateFromXmlError):
parse_assessment_dictionaries(data['assessments_list'])
@ddt.ddt @ddt.ddt
class TestUpdateFromXml(TestCase): class TestUpdateFromXml(TestCase):
...@@ -399,3 +419,4 @@ class TestUpdateFromXml(TestCase): ...@@ -399,3 +419,4 @@ class TestUpdateFromXml(TestCase):
def test_parse_from_xml_error(self, data): def test_parse_from_xml_error(self, data):
with self.assertRaises(UpdateFromXmlError): with self.assertRaises(UpdateFromXmlError):
parse_from_xml_str("".join(data['xml'])) parse_from_xml_str("".join(data['xml']))
...@@ -295,7 +295,7 @@ def validator(oa_block, strict_post_release=True): ...@@ -295,7 +295,7 @@ def validator(oa_block, strict_post_release=True):
# Dates # Dates
submission_dates = [(submission_dict['start'], submission_dict['due'])] submission_dates = [(submission_dict['start'], submission_dict['due'])]
assessment_dates = [(asmnt['start'], asmnt['due']) for asmnt in assessments] assessment_dates = [(asmnt.get('start'), asmnt.get('due')) for asmnt in assessments]
success, msg = validate_dates(oa_block.start, oa_block.due, submission_dates + assessment_dates) success, msg = validate_dates(oa_block.start, oa_block.due, submission_dates + assessment_dates)
if not success: if not success:
return (False, msg) return (False, msg)
......
...@@ -160,7 +160,7 @@ def serialize_rubric(rubric_root, oa_block, include_prompt=True): ...@@ -160,7 +160,7 @@ def serialize_rubric(rubric_root, oa_block, include_prompt=True):
feedback_prompt.text = unicode(oa_block.rubric_feedback_prompt) feedback_prompt.text = unicode(oa_block.rubric_feedback_prompt)
def parse_date(date_str): def parse_date(date_str, name=""):
""" """
Attempt to parse a date string into ISO format (without milliseconds) Attempt to parse a date string into ISO format (without milliseconds)
Returns `None` if this cannot be done. Returns `None` if this cannot be done.
...@@ -168,6 +168,9 @@ def parse_date(date_str): ...@@ -168,6 +168,9 @@ def parse_date(date_str):
Args: Args:
date_str (str): The date string to parse. date_str (str): The date string to parse.
Kwargs:
name (str): the name to return in an error to the origin of the call if an error occurs.
Returns: Returns:
unicode in ISO format (without milliseconds) if the date string is unicode in ISO format (without milliseconds) if the date string is
parse-able. None if parsing fails. parse-able. None if parsing fails.
...@@ -184,8 +187,9 @@ def parse_date(date_str): ...@@ -184,8 +187,9 @@ def parse_date(date_str):
return unicode(formatted_date) return unicode(formatted_date)
except (ValueError, TypeError): except (ValueError, TypeError):
msg = ( msg = (
'The format for the given date ({}) is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.' 'The format of the given date ({date}) for the {name} is invalid. '
).format(date_str) 'Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'
).format(date=date_str, name=name)
raise UpdateFromXmlError(_(msg)) raise UpdateFromXmlError(_(msg))
...@@ -402,21 +406,17 @@ def parse_assessments_xml(assessments_root): ...@@ -402,21 +406,17 @@ def parse_assessments_xml(assessments_root):
# Assessment start # Assessment start
if 'start' in assessment.attrib: if 'start' in assessment.attrib:
parsed_start = parse_date(assessment.get('start')) parsed_start = parse_date(assessment.get('start'), name="{} start date".format(assessment_dict['name']))
if parsed_start is not None: if parsed_start is not None:
assessment_dict['start'] = parsed_start assessment_dict['start'] = parsed_start
else:
raise UpdateFromXmlError(_('The date format in the "start" attribute is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
else: else:
assessment_dict['start'] = None assessment_dict['start'] = None
# Assessment due # Assessment due
if 'due' in assessment.attrib: if 'due' in assessment.attrib:
parsed_start = parse_date(assessment.get('due')) parsed_start = parse_date(assessment.get('due'), name="{} due date".format(assessment_dict['name']))
if parsed_start is not None: if parsed_start is not None:
assessment_dict['due'] = parsed_start assessment_dict['due'] = parsed_start
else:
raise UpdateFromXmlError(_('The date format in the "due" attribute is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
else: else:
assessment_dict['due'] = None assessment_dict['due'] = None
...@@ -649,13 +649,13 @@ def parse_from_xml(root): ...@@ -649,13 +649,13 @@ def parse_from_xml(root):
# Set it to None by default; we will update it to the latest start date later on # Set it to None by default; we will update it to the latest start date later on
submission_start = None submission_start = None
if 'submission_start' in root.attrib: if 'submission_start' in root.attrib:
submission_start = parse_date(unicode(root.attrib['submission_start'])) submission_start = parse_date(unicode(root.attrib['submission_start']), name="submission start date")
# Retrieve the due date for the submission # Retrieve the due date for the submission
# Set it to None by default; we will update it to the earliest deadline later on # Set it to None by default; we will update it to the earliest deadline later on
submission_due = None submission_due = None
if 'submission_due' in root.attrib: if 'submission_due' in root.attrib:
submission_due = parse_date(unicode(root.attrib['submission_due'])) submission_due = parse_date(unicode(root.attrib['submission_due']), name="submission due date")
# Retrieve the title # Retrieve the title
title_el = root.find('title') title_el = root.find('title')
...@@ -765,8 +765,12 @@ def parse_examples_xml_str(xml): ...@@ -765,8 +765,12 @@ def parse_examples_xml_str(xml):
""" """
xml = u"<data>" + xml + u"</data>" # This should work for both wrapped and unwrapped examples. Based on our final configuration (and tests)
return parse_examples_xml(list(_unicode_to_xml(xml))) # we should handle both cases gracefully.
if "<examples>" not in xml:
xml = u"<examples>" + xml + u"</examples>"
return parse_examples_xml(list(_unicode_to_xml(xml).findall('example')))
def _unicode_to_xml(xml): def _unicode_to_xml(xml):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment