Commit 2e4c9ef4 by Usman Khalid

Merge pull request #655 from edx/usman/tnl708-multiple-prompts-studio

Multiple prompts 2: Studio changes.
parents 3f816d0a ac69321b
......@@ -25,9 +25,7 @@
</a>
</div>
<div id="oa_prompt_editor_wrapper" class="oa_editor_content_wrapper">
<textarea id="openassessment_prompt_editor" maxlength="10000">{{ prompt }}</textarea>
</div>
{% include "openassessmentblock/edit/oa_edit_prompts.html" %}
{% include "openassessmentblock/edit/oa_edit_rubric.html" %}
......
{% load i18n %}
{% spaceless %}
<li class="openassessment_criterion is-collapsible" data-criterion="{{ criterion_name }}">
<div class="openassessment_criterion_header view-outline">
<div class="openassessment_container_header openassessment_criterion_header view-outline">
<a class="action expand-collapse collapse"><i class="icon-caret-down ui-toggle-expansion"></i></a>
<div class="openassessment_criterion_header_title_box">
<h6 class="openassessment_criterion_header_title">{% trans "Criterion" %}</h6>
<p class="openassessment_criterion_guide">{% trans "You cannot delete a criterion after the assignment has been released." %}</p>
<div class="openassessment_container_header_title_box openassessment_criterion_header_title_box">
<h6 class="openassessment_container_header_title openassessment_criterion_header_title">{% trans "Criterion" %}</h6>
<p class="openassessment_container_guide openassessment_criterion_guide">{% trans "You cannot delete a criterion after the assignment has been released." %}</p>
</div>
<div class="openassessment_criterion_remove_button"><h2>{% trans "Remove" %}</h2></div>
<div class="openassessment_container_remove_button openassessment_criterion_remove_button"><h2>{% trans "Remove" %}</h2></div>
</div>
<div class="openassessment_criterion_body wrapper-comp-settings">
<input type="hidden" class="openassessment_criterion_name" value="{{ criterion_name }}" />
......
{% load i18n %}
{% spaceless %}
<li class="openassessment_prompt is-collapsible" data-prompt="{{ prompt_uuid }}">
<div class="openassessment_container_header openassessment_prompt_header view-outline">
<a class="action expand-collapse collapse"><i class="icon-caret-down ui-toggle-expansion"></i></a>
<div class="openassessment_container_header_title_box openassessment_prompt_header_title_box">
<h6 class="openassessment_container_header_title openassessment_prompt_header_title">{% trans "Prompt" %}</h6>
<p class="openassessment_container_guide openassessment_prompt_guide">{% trans "You cannot delete a prompt after the assignment has been released." %}</p>
</div>
<div class="openassessment_container_remove_button openassessment_prompt_remove_button"><h2>{% trans "Remove" %}</h2></div>
</div>
<div class="openassessment_prompt_body wrapper-comp-settings">
<input type="hidden" class="openassessment_prompt_uuid" value="{{ prompt_uuid }}" />
<ul class="list-input settings-list">
<li class="field comp-setting-entry openassessment_prompt_description_wrapper">
<div class="wrapper-comp-settings">
<textarea class="openassessment_prompt_description setting-input" maxlength="10000">{{ prompt_description }}</textarea>
</div>
</li>
</ul>
</div>
</li>
{% endspaceless %}
{% load i18n %}
{% spaceless %}
<div id="oa_prompts_editor_wrapper" class="oa_editor_content_wrapper">
<div id="openassessment_prompt_template" class="is--hidden">
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid="" prompt_description="" %}
</div>
<div id="openassessment_prompts_instructions" class="openassessment_tab_instructions">
<p class="openassessment_description">
{% trans "Prompts. Replace the sample text with your own text. For more information, see the ORA documentation." %}
</p>
</div>
<ul id="openassessment_prompts_list">
{% for prompt in prompts %}
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid=prompt.uuid prompt_description=prompt.description %}
{% endfor %}
</ul>
<div id="openassessment_prompts_add_prompt" class="openassessment_container_add_button">
<h6>
{% trans "Add Prompt" %}
</h6>
</div>
</div>
{% endspaceless %}
......@@ -9,7 +9,7 @@
{% include "openassessmentblock/edit/oa_edit_option.html" with option_name="" option_label="" option_points=1 option_explanation="" %}
</div>
<div id="openassessment_rubric_instructions">
<div id="openassessment_rubric_instructions" class="openassessment_tab_instructions">
<p class="openassessment_description">
{% trans "Rubrics are made up of criteria, which usually contain one or more options. Each option has a point value. This template contains two sample criteria and their options. Replace the sample text with your own text. For more information, see the ORA documentation." %}
</p>
......@@ -21,7 +21,7 @@
{% endfor %}
</ul>
<div id="openassessment_rubric_add_criterion">
<div id="openassessment_rubric_add_criterion" class="openassessment_container_add_button">
<h6>
{% trans "Add Criterion" %}
</h6>
......
......@@ -33,6 +33,11 @@
<ol id="openassessment_training_example_criterion_template" class="is--hidden">
{% include "openassessmentblock/edit/oa_training_example_criterion.html" %}
</ol>
<ol id="openassessment_training_example_part_template" class="is--hidden">
<li class="openassessment_training_example_essay_part">
<textarea maxlength="100000"></textarea>
</li>
</ol>
</div>
</li>
......
......@@ -28,7 +28,13 @@
<div class="openassessment_training_example_essay_wrapper">
<h2>{% trans "Response" %}</h2>
<textarea class="openassessment_training_example_essay" maxlength="100000">{{ example.answer }}</textarea>
<ol class="openassessment_training_example_essay">
{% for part in example.answer.parts %}
<li class="openassessment_training_example_essay_part">
<textarea maxlength="100000">{{ part.text }}</textarea>
</li>
{% endfor %}
</ol>
</div>
</div>
</li>
......
......@@ -66,7 +66,11 @@ VALID_ASSESSMENT_TYPES = [
# Schema definition for an update from the Studio JavaScript editor.
EDITOR_UPDATE_SCHEMA = Schema({
Required('prompt'): utf8_validator,
Required('prompts'): [
Schema({
Required('description'): utf8_validator,
})
],
Required('title'): utf8_validator,
Required('feedback_prompt'): utf8_validator,
Required('feedback_default_text'): utf8_validator,
......@@ -84,7 +88,7 @@ EDITOR_UPDATE_SCHEMA = Schema({
'must_be_graded_by': All(int, Range(min=0)),
'examples': [
Schema({
Required('answer'): utf8_validator,
Required('answer'): [utf8_validator],
Required('options_selected'): [
Schema({
Required('criterion'): utf8_validator,
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -101,7 +101,18 @@
{
"template": "openassessmentblock/student_training/student_training.html",
"context": {
"training_essay": "My special essay.",
"training_essay": {
"answer": {
"parts": [
{
"prompt": {
"description": "Given the state of the world today, what do you think should be done to combat poverty?"
},
"text": "My special essay."
}
]
}
},
"training_rubric": {
"criteria": [
{
......@@ -403,7 +414,7 @@
{
"template": "openassessmentblock/edit/oa_edit.html",
"context": {
"prompt": "How much do you like waffles?",
"prompts": [{ "description": "How much do you like waffles?" }, { "description": "How much do you like waffles 2?" }],
"title": "The most important of all questions.",
"submission_start": "2014-01-02T12:15",
"submission_due": "2014-10-01T04:53",
......@@ -484,7 +495,7 @@
{
"template": "openassessmentblock/edit/oa_edit.html",
"context": {
"prompt": "Test prompt",
"prompts": [{ "description": "How much do you like waffles?" }, { "description": "How much do you like waffles 2?" }],
"title": "Test title",
"submission_start": "2014-01-1T10:00:00",
"submission_due": "2014-10-1T10:00:00",
......@@ -528,7 +539,12 @@
"training": {
"examples": [
{
"answer": "Test answer",
"answer": {
"parts": [
{ "text": "Test answer 1"},
{ "text": "Test answer 2"}
]
},
"criteria": [
{
"name": "criterion_with_two_options",
......@@ -569,7 +585,12 @@
}
],
"template": {
"answer": "",
"answer": {
"parts": [
{ "text": ""},
{ "text": ""}
]
},
"criteria": [
{
"name": "criterion_with_two_options",
......
......@@ -30,7 +30,7 @@ describe("OpenAssessment.Server", function() {
);
};
var PROMPT = "Hello this is the prompt yes.";
var PROMPTS = [{"description": "Hello this is the prompt yes."}];
var FEEDBACK_PROMPT = "Prompt for feedback";
var FEEDBACK_DEFAULT_TEXT = "Default feedback response text";
......@@ -253,7 +253,7 @@ describe("OpenAssessment.Server", function() {
it("updates the XBlock's editor context definition", function() {
stubAjax(true, { success: true });
server.updateEditorContext({
prompt: PROMPT,
prompts: PROMPTS,
feedbackPrompt: FEEDBACK_PROMPT,
feedback_default_text: FEEDBACK_DEFAULT_TEXT,
title: TITLE,
......@@ -268,7 +268,7 @@ describe("OpenAssessment.Server", function() {
expect($.ajax).toHaveBeenCalledWith({
type: "POST", url: '/update_editor_context',
data: JSON.stringify({
prompt: PROMPT,
prompts: PROMPTS,
feedback_prompt: FEEDBACK_PROMPT,
feedback_default_text: FEEDBACK_DEFAULT_TEXT,
title: TITLE,
......
......@@ -43,7 +43,7 @@ describe("OpenAssessment.StudioView", function() {
var EXPECTED_SERVER_DATA = {
title: "The most important of all questions.",
prompt: "How much do you like waffles?",
prompts: [{"description": "How much do you like waffles?"}, {description : 'How much do you like waffles 2?'}],
feedbackPrompt: "",
submissionStart: "2014-01-02T12:15",
submissionDue: "2014-10-01T04:53",
......@@ -145,7 +145,7 @@ describe("OpenAssessment.StudioView", function() {
// Top-level attributes
expect(server.receivedData.title).toEqual(EXPECTED_SERVER_DATA.title);
expect(server.receivedData.prompt).toEqual(EXPECTED_SERVER_DATA.prompt);
expect(server.receivedData.prompts).toEqual(EXPECTED_SERVER_DATA.prompts);
expect(server.receivedData.feedbackPrompt).toEqual(EXPECTED_SERVER_DATA.feedbackPrompt);
expect(server.receivedData.submissionStart).toEqual(EXPECTED_SERVER_DATA.submissionStart);
expect(server.receivedData.submissionDue).toEqual(EXPECTED_SERVER_DATA.submissionDue);
......
......@@ -180,7 +180,7 @@ describe("OpenAssessment edit assessment views", function() {
expect(view.description()).toEqual({
examples: [
{
answer: 'Test answer',
answer: ['Test answer 1', 'Test answer 2'],
options_selected: [
{
criterion: 'criterion_with_two_options',
......@@ -197,7 +197,7 @@ describe("OpenAssessment edit assessment views", function() {
expect(view.description()).toEqual({
examples: [
{
answer: 'Test answer',
answer: ['Test answer 1', 'Test answer 2'],
options_selected: [
{
criterion: 'criterion_with_two_options',
......@@ -206,7 +206,7 @@ describe("OpenAssessment edit assessment views", function() {
]
},
{
answer: '',
answer: ['', ''],
options_selected: [
{
criterion: 'criterion_with_two_options',
......
......@@ -2,7 +2,20 @@
Tests for OpenAssessment prompt editing view.
**/
describe("OpenAssessment.EditPromptView", function() {
describe("OpenAssessment.EditPromptViews", function() {
// Use a stub notifier implementation that simply stores
// the notifications it receives.
var notifier = null;
var StubNotifier = function() {
this.notifications = [];
this.notificationFired = function(name, data) {
this.notifications.push({
name: name,
data: data
});
};
};
var view = null;
......@@ -12,13 +25,39 @@ describe("OpenAssessment.EditPromptView", function() {
// Create the view
var element = $("#oa_prompt_editor_wrapper").get(0);
view = new OpenAssessment.EditPromptView(element);
notifier = new StubNotifier();
view = new OpenAssessment.EditPromptsView(element, notifier);
});
it("reads prompts from the editor", function() {
// This assumes a particular structure of the DOM,
// which is set by the HTML fixture.
var prompts = view.promptsDefinition();
expect(prompts.length).toEqual(2);
expect(prompts[0]).toEqual({
"description": "How much do you like waffles?"
});
});
it("sets and loads prompt text", function() {
view.promptText("");
expect(view.promptText()).toEqual("");
view.promptText("This is a test prompt!");
expect(view.promptText()).toEqual("This is a test prompt!");
it("creates new prompts", function() {
// Delete all existing prompts
// Then add new prompts (created from a client-side template)
$.each(view.getAllPrompts(), function() { view.removePrompt(this); });
view.addPrompt();
view.addPrompt();
view.addPrompt();
var prompts = view.promptsDefinition();
expect(prompts.length).toEqual(3);
expect(prompts[0]).toEqual({
description: ""
});
expect(prompts[1]).toEqual({
description: ""
});
});
});
......@@ -445,7 +445,7 @@ if (typeof OpenAssessment.Server == "undefined" || !OpenAssessment.Server) {
updateEditorContext: function(kwargs) {
var url = this.url('update_editor_context');
var payload = JSON.stringify({
prompt: kwargs.prompt,
prompts: kwargs.prompts,
feedback_prompt: kwargs.feedbackPrompt,
feedback_default_text: kwargs.feedback_default_text,
title: kwargs.title,
......
......@@ -65,6 +65,113 @@ OpenAssessment.ItemUtilities = {
};
/**
The Prompt Class is used to construct and maintain references to prompts from within a prompts
container object. Constructs a new Prompt element.
Args:
element (OpenAssessment.Container): The container that the prompt is a member of.
notifier (OpenAssessment.Notifier): Used to send notifications of updates to prompts.
Returns:
OpenAssessment.Prompt
**/
OpenAssessment.Prompt = function(element, notifier) {
this.element = element;
this.notifier = notifier;
};
OpenAssessment.Prompt.prototype = {
/**
Finds the values currently entered in the Prompts's fields, and returns them.
Returns:
object literal of the form:
{
'description': 'Write a nice long essay about anything.'
}
**/
getFieldValues: function () {
var fields = {
description: this.description()
};
return fields;
},
/**
Get or set the description of the prompt.
Args:
text (string, optional): If provided, set the description of the prompt.
Returns:
string
**/
description: function(text) {
var sel = $('.openassessment_prompt_description', this.element);
return OpenAssessment.Fields.stringField(sel, text);
},
addEventListeners: function() {},
/**
Hook into the event handler for addition of a prompt.
*/
addHandler: function (){
this.notifier.notificationFired(
"promptAdd",
{
"index": this.element.index()
}
);
},
/**
Hook into the event handler for removal of a prompt.
*/
removeHandler: function (){
this.notifier.notificationFired(
"promptRemove",
{
"index": this.element.index()
}
);
},
updateHandler: function() {},
/**
Mark validation errors.
Returns:
Boolean indicating whether the option is valid.
**/
validate: function() {
return true;
},
/**
Return a list of validation errors visible in the UI.
Mainly useful for testing.
Returns:
list of strings
**/
validationErrors: function() {
return [];
},
/**
Clear all validation errors from the UI.
**/
clearValidationErrors: function() {}
};
/**
The RubricOption Class used to construct and maintain references to rubric options from within an options
container object. Constructs a new RubricOption element.
......@@ -506,7 +613,7 @@ OpenAssessment.RubricCriterion.prototype = {
OpenAssessment.TrainingExample = function(element){
this.element = element;
this.criteria = $(".openassessment_training_example_criterion_option", this.element);
this.answer = $('.openassessment_training_example_essay', this.element).first();
this.answer = $('.openassessment_training_example_essay_part textarea', this.element)
};
OpenAssessment.TrainingExample.prototype = {
......@@ -527,7 +634,9 @@ OpenAssessment.TrainingExample.prototype = {
).get();
return {
answer: this.answer.prop('value'),
answer: this.answer.map(function() {
return $(this).prop('value');
}).get(),
options_selected: optionsSelected
};
},
......
......@@ -25,9 +25,14 @@ OpenAssessment.StudioView = function(runtime, element, server) {
// Initialize the validation alert
this.alert = new OpenAssessment.ValidationAlert().install();
var studentTrainingListener = new OpenAssessment.StudentTrainingListener();
// Initialize the prompt tab view
this.promptView = new OpenAssessment.EditPromptView(
$("#oa_prompt_editor_wrapper", this.element).get(0)
this.promptsView = new OpenAssessment.EditPromptsView(
$("#oa_prompts_editor_wrapper", this.element).get(0),
new OpenAssessment.Notifier([
studentTrainingListener
])
);
// Initialize the settings tab view
......@@ -57,7 +62,7 @@ OpenAssessment.StudioView = function(runtime, element, server) {
this.rubricView = new OpenAssessment.EditRubricView(
$("#oa_rubric_editor_wrapper", this.element).get(0),
new OpenAssessment.Notifier([
new OpenAssessment.StudentTrainingListener()
studentTrainingListener
])
);
......@@ -185,7 +190,7 @@ OpenAssessment.StudioView.prototype = {
var view = this;
this.server.updateEditorContext({
prompt: view.promptView.promptText(),
prompts: view.promptsView.promptsDefinition(),
feedbackPrompt: view.rubricView.feedbackPrompt(),
feedback_default_text: view.rubricView.feedback_default_text(),
criteria: view.rubricView.criteriaDefinition(),
......@@ -236,7 +241,8 @@ OpenAssessment.StudioView.prototype = {
validate: function() {
var settingsValid = this.settingsView.validate();
var rubricValid = this.rubricView.validate();
return settingsValid && rubricValid;
var promptsValid = this.promptsView.validate();
return settingsValid && rubricValid && promptsValid;
},
/**
......@@ -249,7 +255,9 @@ OpenAssessment.StudioView.prototype = {
**/
validationErrors: function() {
return this.settingsView.validationErrors().concat(
this.rubricView.validationErrors()
this.rubricView.validationErrors().concat(
this.promptsView.validationErrors()
)
);
},
......@@ -259,6 +267,7 @@ OpenAssessment.StudioView.prototype = {
clearValidationErrors: function() {
this.settingsView.clearValidationErrors();
this.rubricView.clearValidationErrors();
this.promptsView.clearValidationErrors();
},
};
......
......@@ -421,7 +421,7 @@ OpenAssessment.EditStudentTrainingView.prototype = {
{
examples: [
{
answer: "I love pokemon",
answer: ("I love pokemon 1", "I love pokemon 2"),
options_selected: [
{
criterion: "brevity",
......
/**
Dynamically update student training examples based on
changes to the rubric.
changes to the prompts or the rubric.
**/
OpenAssessment.StudentTrainingListener = function() {
this.element = $('#oa_student_training_editor');
......@@ -8,6 +8,28 @@ OpenAssessment.StudentTrainingListener = function() {
};
OpenAssessment.StudentTrainingListener.prototype = {
/**
Add a answer part in the training examples when a prompt is added.
*/
promptAdd: function(data) {
var view = this.element;
var essay_part = $("#openassessment_training_example_part_template")
.children().first()
.clone()
.removeAttr('id')
.toggleClass('is--hidden', false)
.appendTo(".openassessment_training_example_essay", view);
},
/**
Remove the answer part in the training examples when a prompt is removed.
*/
promptRemove: function(data) {
var view = this.element;
$(".openassessment_training_example_essay li:nth-child(" + (data.index + 1) + ")", view).remove();
},
/**
Event handler for updating training examples when a criterion option has
been updated.
......
/**
Editing interface for the rubric prompt.
Args:
element (DOM element): The DOM element representing this view.
Returns:
OpenAssessment.EditPromptView
**/
OpenAssessment.EditPromptView = function(element) {
this.element = element;
};
OpenAssessment.EditPromptView.prototype = {
/**
Get or set the text of the prompt.
Args:
text (string, optional): If provided, set the text of the prompt.
Returns:
string
**/
promptText: function(text) {
var sel = $('#openassessment_prompt_editor', this.element);
return OpenAssessment.Fields.stringField(sel, text);
},
};
\ No newline at end of file
/**
Editing interface for the prompts.
Args:
element (DOM element): The DOM element representing this view.
Returns:
OpenAssessment.EditPromptsView
**/
OpenAssessment.EditPromptsView = function(element, notifier) {
this.element = element;
this.promptsContainer = new OpenAssessment.Container(
OpenAssessment.Prompt, {
containerElement: $("#openassessment_prompts_list", this.element).get(0),
templateElement: $("#openassessment_prompt_template", this.element).get(0),
addButtonElement: $("#openassessment_prompts_add_prompt", this.element).get(0),
removeButtonClass: "openassessment_prompt_remove_button",
containerItemClass: "openassessment_prompt",
notifier: notifier
}
);
this.promptsContainer.addEventListeners();
};
OpenAssessment.EditPromptsView.prototype = {
/**
Construct a list of prompts definitions from the editor UI.
Returns:
list of prompt objects
Example usage:
>>> editPromptsView.promptsDefinition();
[
{
uuid: "cfvgbh657",
description: "Description",
order_num: 0,
},
...
]
**/
promptsDefinition: function() {
var prompts = this.promptsContainer.getItemValues();
return prompts;
},
/**
Add a new prompt.
Uses a client-side template to create the new prompt.
**/
addPrompt: function() {
this.promptsContainer.add();
},
/**
Remove a prompt.
Args:
item (OpenAssessment.RubricCriterion): The criterion item to remove.
**/
removePrompt: function(item) {
this.promptsContainer.remove(item);
},
/**
Retrieve all prompts.
Returns:
Array of OpenAssessment.Prompt objects.
**/
getAllPrompts: function() {
return this.promptsContainer.getAllItems();
},
/**
Retrieve a prompt item from the prompts.
Args:
index (int): The index of the prompt, starting from 0.
Returns:
OpenAssessment.Prompt or null
**/
getPromptItem: function(index) {
return this.promptsContainer.getItem(index);
},
/**
Mark validation errors.
Returns:
Boolean indicating whether the view is valid.
**/
validate: function() {
return true;
},
/**
Return a list of validation errors visible in the UI.
Mainly useful for testing.
Returns:
list of string
**/
validationErrors: function() {
var errors = [];
return errors;
},
/**
Clear all validation errors from the UI.
**/
clearValidationErrors: function() {}
};
\ No newline at end of file
......@@ -331,26 +331,6 @@
-moz-transition: height 1s ease-in-out 0;
}
#openassessment_prompt_editor {
width: 100%;
height: 100%;
resize: none;
border: none;
@include border-radius(4px);
padding: 10px;
font-family: $f-sans-serif;
textarea{
font-size: 14px;
border: none;
overflow: auto;
outline: none;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
}
#openassessment_rubric_editor {
width: 100%;
height: 100%;
......@@ -493,30 +473,19 @@
bottom: 0;
}
#oa_rubric_editor_wrapper{
.wrapper-comp-settings{
display: block;
}
#openassessment_rubric_instructions{
.openassessment_tab_instructions {
background-color: $edx-gray-t1;
border-bottom: 1px solid $edx-gray-d1;
padding: 10px;
}
.openassessment_criterion {
.openassessment_criterion_remove_button{
@extend .openassessment_rubric_remove_button;
}
.openassessment_criterion_header {
margin: 10px;
padding-bottom: 5px;
.openassessment_container_header {
margin: ($baseline-v/2) ($baseline-h/4);
padding-bottom: $baseline-v/4;
border-bottom: 1px solid;
overflow: auto;
color: $edx-gray-d2;
}
.action.expand-collapse {
@include float(left);
......@@ -530,30 +499,112 @@
}
}
.openassessment_criterion_header_title_box {
.openassessment_container_header_title_box {
@include float(left);
width: 80%;
display: inline-block;
}
.openassessment_criterion_header_title {
@include float(left);
.openassessment_container_header_title {
text-transform: uppercase;
width: 50%;
display: inline-block;
cursor: default;
padding-top: 2px;
}
.openassessment_criterion_guide {
.openassessment_container_guide {
@extend %t-small;
}
.openassessment_criterion_header_remove {
@extend .openassessment_rubric_remove_button;
.openassessment_container_remove_button{
cursor: pointer;
h2:after{
font-family: FontAwesome;
content: "\f00d";
display: inline-block;
color: inherit;
margin: 0 5px;
}
h2{
text-transform: uppercase;
font-size: 80%;
float: right;
display: inline-block;
padding: 3px 8px 3px 13px;
}
@include float(right);
}
.openassessment_container_remove_button:hover{
background-color: $edx-gray-d2;
border-radius: 4px;
color: white;
}
.openassessment_container_add_button {
h6:before{
font-family: "FontAwesome";
display: inline-block;
@include margin-left(5px);
@include margin-right(10px);
width: auto;
height: auto;
content: "\f067";
}
background-color: white;
border: 1px solid;
border-radius: 4px;
text-align: center;
color: #009fe6;
padding: 10px;
margin: 15px 10px;
}
.openassessment_container_add_button.openassessment_highlighted_field {
color: red;
border-width: 2px;
}
.openassessment_container_add_button:hover {
color: white;
background-color: #009fe6;
cursor: pointer;
}
#oa_prompts_editor_wrapper {
.wrapper-comp-settings {
display: block;
}
.openassessment_prompt_description {
width: 100%;
min-height: 100px;
resize: none;
border: 1px solid #b2b2b2;
border-radius: 4px;
padding: 10px;
font-family: $f-sans-serif;
font-size: 14px;
overflow: auto;
outline: none;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
.openassessment_prompt_description.openassessment_highlighted_field {
border: 2px solid red;
}
}
#oa_rubric_editor_wrapper{
.wrapper-comp-settings{
display: block;
}
.openassessment_criterion {
.openassessment_criterion_add_option {
h2:before {
font-family: FontAwesome;
......@@ -653,7 +704,7 @@
@include padding(5px, 5px, 5px, 15px);
.openassessment_criterion_option_remove_button{
@extend .openassessment_rubric_remove_button;
@extend .openassessment_container_remove_button;
}
.openassessment_option_header{
......@@ -670,7 +721,7 @@
}
.openassessment_option_header_remove{
@extend .openassessment_rubric_remove_button;
@extend .openassessment_container_remove_button;
}
}
......@@ -767,30 +818,6 @@
outline: 0;
}
.openassessment_rubric_remove_button{
cursor: pointer;
h2:after{
font-family: FontAwesome;
content: "\f00d";
display: inline-block;
color: inherit;
margin: 0 5px;
}
h2{
text-transform: uppercase;
font-size: 80%;
@include float(right);
display: inline-block;
@include padding(3px, 8px, 3px, 13px);
}
@include float(right);
}
.openassessment_rubric_remove_button:hover{
background-color: $edx-gray-d2;
border-radius: 4px;
color: white;
}
#openassessment_rubric_feedback_wrapper{
padding: 0;
......@@ -813,39 +840,6 @@
@include float(right);
}
}
#openassessment_rubric_add_criterion{
h6:before{
font-family: "FontAwesome";
display: inline-block;
@include margin-left(5px);
@include margin-right(10px);
width: auto;
height: auto;
content: "\f067";
}
background-color: white;
border: 1px solid;
border-radius: 4px;
@include text-align(center);
color: #009fe6;
padding: 10px;
margin: 15px 10px;
}
#openassessment_rubric_add_criterion.openassessment_highlighted_field{
color: red;
border-width: 2px;
}
#openassessment_rubric_add_criterion:hover{
color: white;
background-color: #009fe6;
cursor: pointer;
}
}
......@@ -940,7 +934,7 @@
.openassessment_training_example_body {
@include padding(0, 15px, 15px, 15px);
position: relative;
overflow: hidden;
overflow: scroll;
.openassessment_training_example_essay_wrapper {
width: 58%;
display: inline-block;
......
"""
Studio editing view for OpenAssessment XBlock.
"""
import pkg_resources
import copy
import logging
import pkg_resources
from uuid import uuid4
from django.template import Context
from django.template.loader import get_template
from voluptuous import MultipleInvalid
......@@ -12,7 +14,7 @@ from xblock.fields import List, Scope
from xblock.fragment import Fragment
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.validation import validator
from openassessment.xblock.data_conversion import create_rubric_dict, make_django_template_key
from openassessment.xblock.data_conversion import create_rubric_dict, make_django_template_key, update_assessments_format
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str
......@@ -112,7 +114,7 @@ class StudioMixin(object):
feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
return {
'prompt': self.prompt,
'prompts': self.prompts,
'title': self.title,
'submission_due': submission_due,
'submission_start': submission_start,
......@@ -189,10 +191,14 @@ class StudioMixin(object):
)}
# This is where we default to EASE for problems which are edited in the GUI
assessment['algorithm_id'] = 'ease'
if assessment['name'] == 'student-training':
for example in assessment['examples']:
example['answer'] = {'parts': [{'text': text} for text in example['answer']]}
xblock_validator = validator(self, self._)
success, msg = xblock_validator(
create_rubric_dict(data['prompt'], data['criteria']),
create_rubric_dict(data['prompts'], data['criteria']),
data['assessments'],
submission_start=data['submission_start'],
submission_due=data['submission_due'],
......@@ -205,7 +211,7 @@ class StudioMixin(object):
# so we can safely modify the XBlock fields.
self.title = data['title']
self.display_name = data['title']
self.prompt = data['prompt']
self.prompts = data['prompts']
self.rubric_criteria = data['criteria']
self.rubric_assessments = data['assessments']
self.editor_assessments_order = data['editor_assessments_order']
......@@ -267,13 +273,20 @@ class StudioMixin(object):
# could be accomplished within the template, we are opting to remove logic from the template.
student_training_module = self.get_assessment_module('student-training')
student_training_template = {'answer': ""}
student_training_template = {
'answer': {
'parts': [
{'text': ''} for prompt in self.prompts
]
}
}
criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
for criterion in criteria_list:
criterion['option_selected'] = ""
student_training_template['criteria'] = criteria_list
if student_training_module:
student_training_module = update_assessments_format([student_training_module])[0]
example_list = []
# Adds each example to a modified version of the student training module dictionary.
for example in student_training_module['examples']:
......
{
"missing_feedback": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"label": "Test criterion label",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"label": "No label",
"explanation": "No explanation"
}
]
}
]
},
"prompt": "My new prompt.",
],
"prompts": [{"description": "My new prompt."}],
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
"title": "My new title.",
......@@ -36,9 +35,59 @@
"start": "",
"due": ""
}
]
},
"prompts_is_string": {
"title": "Foo",
"prompts": "My new prompt.",
"feedback_prompt": "Test Feedback Prompt",
"feedback_default_text": "Test default text...",
"submission_start": null,
"submission_due": null,
"allow_file_upload": true,
"leaderboard_show": true,
"allow_latex": false,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"label": "Test criterion label",
"prompt": "Test criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"label": "No label",
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"label": "Yes label",
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00"
}
],
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt"
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment"]
}
}
{
"zero_criteria": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": []
}
},
"negative_points": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -29,7 +29,7 @@
"duplicate_criteria_names": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -63,7 +63,7 @@
"duplicate_option_names": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -88,9 +88,62 @@
}
},
"change_prompts_number_after_release": {
"rubric": {
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}, {"description": "Test Prompt 3."}],
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"current_rubric": {
"prompts": [{"description": "Test Prompt 3."}, {"description": "Test Prompt 4."}],
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"is_released": true
},
"change_points_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -114,7 +167,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -142,7 +195,7 @@
"add_criteria_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -166,7 +219,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -213,7 +266,7 @@
"remove_criteria_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -256,7 +309,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -284,7 +337,7 @@
"add_options_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -308,7 +361,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -330,7 +383,7 @@
"remove_options_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -348,7 +401,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -376,7 +429,7 @@
"rename_criterion_name_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -400,7 +453,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -428,7 +481,7 @@
"rename_multiple_criteria_after_release": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -465,7 +518,7 @@
]
},
"current_rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -507,7 +560,7 @@
"example_based_duplicate_option_points": {
"is_example_based": true,
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -534,7 +587,7 @@
"zero_options_feedback_optional": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -549,7 +602,7 @@
"zero_options_feedback_disabled": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......@@ -564,7 +617,7 @@
"zero_options_no_feedback": {
"rubric": {
"prompt": "Test Prompt",
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
......
......@@ -90,7 +90,11 @@
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [
{
"criterion": "Test criterion",
......@@ -127,7 +131,11 @@
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [
{
"criterion": "Test criterion",
......@@ -140,7 +148,11 @@
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [
{
"criterion": "Another test criterion",
......
......@@ -10,7 +10,11 @@
],
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [
{
"criterion": "Test criterion",
......@@ -25,12 +29,18 @@
"xml": [
"<examples>",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<answer>",
"<part>ẗëṡẗ äṅṡẅëṛ 1</part>",
"<part>ẗëṡẗ äṅṡẅëṛ 2</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>",
"<answer>",
"<part>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 1</part>",
"<part>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 2</part>",
"</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
......@@ -38,7 +48,12 @@
],
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ 1"},
{"text": "ẗëṡẗ äṅṡẅëṛ 2"}
]
},
"options_selected": [
{
"criterion": "Test criterion",
......@@ -51,7 +66,12 @@
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 1"},
{"text": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 2"}
]
},
"options_selected": [
{
"criterion": "Another test criterion",
......
......@@ -1059,7 +1059,9 @@
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<answer>",
"<part>ẗëṡẗ äṅṡẅëṛ</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"</example>",
......@@ -1147,7 +1149,12 @@
"due": "2014-03-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ 1"},
{"text": "ẗëṡẗ äṅṡẅëṛ 2"}
]
},
"options_selected": [
{
"criterion": "Test criterion",
......@@ -1160,7 +1167,12 @@
]
},
{
"answer": "śéćőńd téśt áńśẃéŕ",
"answer": {
"parts": [
{"text": "śéćőńd téśt áńśẃéŕ 1"},
{"text": "śéćőńd téśt áńśẃéŕ 2"}
]
},
"options_selected": [
{
"criterion": "Test criterion",
......@@ -1181,12 +1193,18 @@
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<answer>",
"<part>ẗëṡẗ äṅṡẅëṛ 1</part>",
"<part>ẗëṡẗ äṅṡẅëṛ 2</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"</example>",
"<example>",
"<answer>śéćőńd téśt áńśẃéŕ</answer>",
"<answer>",
"<part>śéćőńd téśt áńśẃéŕ 1</part>",
"<part>śéćőńd téśt áńśẃéŕ 2</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
......@@ -1292,11 +1310,15 @@
"<assessments>",
"<assessment name=\"example-based-assessment\" algorithm_id=\"sample-algorithm-id\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<answer>",
"<part>тєѕт αηѕωєя</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<answer>",
"<part>тєѕт αηѕωєя TWO</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
......
......@@ -25,7 +25,7 @@
"feedback": "required"
}
],
"prompt": "My new prompt.",
"prompts": [{"description": "My new prompt 1."}, {"description": "My new prompt 2."}],
"feedback_prompt": "Feedback prompt",
"feedback_default_text": "Feedback default text",
"submission_due": "4014-02-27T09:46",
......@@ -77,7 +77,7 @@
"feedback": "required"
}
],
"prompt": "Ṁÿ ṅëẅ ṗṛöṁṗẗ.",
"prompts": [{"description": "Ṁÿ ṅëẅ ṗṛöṁṗẗ 1."}, {"description": "Ṁÿ ṅëẅ ṗṛöṁṗẗ 2."}],
"feedback_prompt": "ḟëëḋḅäċḳ ṗṛöṁṗẗ",
"feedback_default_text": "Ṫëṡẗ ḋëḟäüḷẗ ẗëẍẗ",
"submission_due": "4014-02-27T09:46",
......@@ -129,7 +129,7 @@
"feedback": "required"
}
],
"prompt": "My new prompt.",
"prompts": [{"description": "My new prompt 1."}, {"description": "My new prompt 2."}],
"feedback_prompt": "Feedback prompt",
"feedback_default_text": "Feedback default text",
"submission_due": "4014-02-27T09:46",
......@@ -143,13 +143,13 @@
"name": "student-training",
"examples": [
{
"answer": "Ṫḧïṡ ïṡ äṅ äṅṡẅëṛ",
"answer": ["Ṫḧïṡ ïṡ äṅ äṅṡẅëṛ 1", "Ṫḧïṡ ïṡ äṅ äṅṡẅëṛ 2"],
"options_selected": [
{ "criterion": "тєѕт ¢яιтєяιση", "option": "Ṅö" }
]
},
{
"answer": "This is another answer",
"answer": ["This is another answer 1", "This is another answer 2"],
"options_selected": [
{ "criterion": "тєѕт ¢яιтєяιση", "option": "sǝʎ" }
]
......@@ -193,7 +193,7 @@
"feedback": "required"
}
],
"prompt": "My new prompt.",
"prompts": [{"description": "My new prompt 1."}, {"description": "My new prompt 2."}],
"feedback_prompt": "Feedback prompt",
"feedback_default_text": "Feedback default text",
"submission_due": "4014-02-27T09:46",
......
......@@ -18,7 +18,7 @@ class StudioViewTest(XBlockHandlerTestCase):
"""
UPDATE_EDITOR_DATA = {
"title": "Test title",
"prompt": "Test prompt",
"prompts": [{"description": "Test prompt"}],
"feedback_prompt": "Test feedback prompt",
"feedback_default_text": "Test feedback default text",
"submission_start": "4014-02-10T09:46",
......@@ -205,10 +205,13 @@ class StudioViewTest(XBlockHandlerTestCase):
# Store old XBlock fields for later verification
old_title = xblock.title
old_prompt = xblock.prompt
old_prompts = xblock.prompts
old_assessments = xblock.rubric_assessments
old_criteria = xblock.rubric_criteria
xblock.runtime.modulestore = MagicMock()
xblock.runtime.modulestore.has_published_version.return_value = False
# Verify the response fails
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
......@@ -218,7 +221,7 @@ class StudioViewTest(XBlockHandlerTestCase):
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, old_title)
self.assertEqual(xblock.prompt, old_prompt)
self.assertEqual(xblock.prompts, old_prompts)
self.assertItemsEqual(xblock.rubric_assessments, old_assessments)
self.assertItemsEqual(xblock.rubric_criteria, old_criteria)
......
......@@ -81,7 +81,7 @@ class AssessmentValidationTest(TestCase):
class RubricValidationTest(TestCase):
@ddt.file_data('data/valid_rubrics.json')
def test_valid_assessment(self, data):
def test_valid_rubric(self, data):
current_rubric = data.get('current_rubric')
is_released = data.get('is_released', False)
is_example_based = data.get('is_example_based', False)
......@@ -92,7 +92,7 @@ class RubricValidationTest(TestCase):
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_rubrics.json')
def test_invalid_assessment(self, data):
def test_invalid_rubric(self, data):
current_rubric = data.get('current_rubric')
is_released = data.get('is_released', False)
is_example_based = data.get('is_example_based', False)
......
......@@ -11,7 +11,7 @@ import dateutil.parser
from django.test import TestCase
import ddt
from openassessment.xblock.data_conversion import create_prompts_list
from openassessment.xblock.data_conversion import create_prompts_list, update_assessments_format
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.xml import (
serialize_content, parse_from_xml_str, _parse_prompts_xml, parse_rubric_xml,
......@@ -120,7 +120,9 @@ class TestSerializeContent(TestCase):
self.oa_block.submission_start = data.get('submission_start')
self.oa_block.submission_due = data.get('submission_due')
self.oa_block.rubric_criteria = data.get('criteria', copy.deepcopy(self.BASIC_CRITERIA))
self.oa_block.rubric_assessments = data.get('assessments', copy.deepcopy(self.BASIC_ASSESSMENTS))
self.oa_block.rubric_assessments = update_assessments_format(
data.get('assessments', copy.deepcopy(self.BASIC_ASSESSMENTS))
)
self.oa_block.allow_file_upload = data.get('allow_file_upload')
self.oa_block.allow_latex = data.get('allow_latex')
self.oa_block.leaderboard_show = data.get('leaderboard_show', 0)
......@@ -183,7 +185,8 @@ class TestSerializeContent(TestCase):
for assessment in data['assessments']:
if 'student-training' == assessment['name'] and assessment['examples']:
xml_str = serialize_examples_to_xml_str(assessment)
self.assertIn(assessment['examples'][0]['answer'], xml_str)
for part in assessment['examples'][0]['answer']['parts']:
self.assertIn(part['text'], xml_str)
@ddt.file_data('data/serialize.json')
def test_serialize_assessments(self, data):
......@@ -228,7 +231,7 @@ class TestSerializeContent(TestCase):
def test_mutated_assessments_dict(self):
self._configure_xblock({})
for assessment_dict in self.BASIC_ASSESSMENTS:
for assessment_dict in update_assessments_format(self.BASIC_ASSESSMENTS):
for mutated_dict in self._dict_mutations(assessment_dict):
self.oa_block.rubric_assessments = [mutated_dict]
xml = serialize_content(self.oa_block)
......
......@@ -214,6 +214,10 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based,
# but nothing that would change the point value of a rubric.
if is_released:
# Number of prompts must be the same
if len(rubric_dict['prompts']) != len(current_rubric['prompts']):
return (False, _(u'Prompts cannot be created or deleted after a problem is released.'))
# Number of criteria must be the same
if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
return (False, _(u'The number of criteria cannot be changed after a problem is released.'))
......@@ -330,7 +334,7 @@ def validator(oa_block, _, strict_post_release=True):
# Rubric
is_example_based = 'example-based-assessment' in [asmnt.get('name') for asmnt in assessments]
current_rubric = {
'prompt': oa_block.prompt,
'prompts': oa_block.prompts,
'criteria': oa_block.rubric_criteria
}
success, msg = validate_rubric(rubric_dict, current_rubric, is_released, is_example_based, _)
......
......@@ -464,11 +464,21 @@ def parse_examples_xml(examples):
for example_el in examples:
example_dict = dict()
# Retrieve the answer from the training example
# Retrieve the answers from the training example
answers_list = list()
answer_elements = example_el.findall('answer')
if len(answer_elements) != 1:
raise UpdateFromXmlError(u'Each "example" element must contain exactly one "answer" element')
example_dict['answer'] = _safe_get_text(answer_elements[0])
answer_part_elements = answer_elements[0].findall('part')
if len(answer_part_elements) > 0:
for answer_part_element in answer_part_elements:
answers_list.append(_safe_get_text(answer_part_element))
else:
# Initially example answers had only one part.
answers_list.append(_safe_get_text(answer_elements[0]))
example_dict['answer'] = {"parts": [{"text": text} for text in answers_list]}
# Retrieve the options selected from the training example
example_dict['options_selected'] = []
......@@ -595,7 +605,9 @@ def serialize_training_examples(examples, assessment_el):
# Answer provided in the example (default to empty string)
answer_el = etree.SubElement(example_el, 'answer')
answer_el.text = unicode(example_dict.get('answer', ''))
for part in example_dict.get('answer', {}).get('parts', []):
part_el = etree.SubElement(answer_el, 'part')
part_el.text = unicode(part.get('text', u''))
# Options selected from the rubric
options_selected = example_dict.get('options_selected', [])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment