Unverified Commit a96a9e4b by Bill Filler Committed by GitHub

Merge pull request #1088 from edx/wysiwyg-editor-for-prompt

WL-1459, OSPR-2164 | Wysiwyg editor for prompt
parents 85f044d0 6d84e47d
......@@ -152,6 +152,7 @@
"XBlock",
// ORA-specific globals
"OpenAssessment"
"OpenAssessment",
"rewriteStaticLinks"
]
}
......@@ -16,7 +16,7 @@
<ul class="list-input settings-list">
<li class="field comp-setting-entry openassessment_prompt_description_wrapper">
<div class="wrapper-comp-settings">
<textarea class="openassessment_prompt_description setting-input" maxlength="10000">{{ prompt_description }}</textarea>
<textarea class="openassessment_prompt_description setting-input" maxlength="10000" data-type="{{ prompts_type }}">{{ prompt_description }}</textarea>
</div>
</li>
</ul>
......
{% load i18n %}
{% spaceless %}
<div id="oa_prompts_editor_wrapper" class="oa_editor_content_wrapper">
<div id="openassessment_prompt_template" class="is--hidden">
<div id="openassessment_prompt_template" class="is--hidden" data-base-asset-url="{{ base_asset_url }}">
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid="" prompt_description="" %}
</div>
......@@ -13,7 +13,7 @@
<ul id="openassessment_prompts_list">
{% for prompt in prompts %}
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid=prompt.uuid prompt_description=prompt.description %}
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid=prompt.uuid prompt_description=prompt.description prompts_type=prompts_type %}
{% endfor %}
</ul>
......
......@@ -9,7 +9,11 @@
</div>
<article class="submission__answer__part__prompt">
<div class="submission__answer__part__prompt__value">
{{ part.prompt.description|linebreaks }}
{% if prompts_type == 'html' %}
{{ part.prompt.description|safe }}
{% else %}
{{ part.prompt.description|linebreaks }}
{% endif %}
</div>
</article>
{% if part.text %}
......
......@@ -75,7 +75,11 @@
<h5 class="submission__answer__part__text__title">{% trans "The prompt for this section" %}</h5>
<article class="submission__answer__part__prompt">
<div class="submission__answer__part__prompt__copy">
{{ part.prompt.description|linebreaks }}
{% if prompts_type == 'html' %}
{{ part.prompt.description|safe }}
{% else %}
{{ part.prompt.description|linebreaks }}
{% endif %}
</div>
</article>
......
......@@ -176,6 +176,12 @@ class OpenAssessmentBlock(MessageMixin,
help="The prompts to display to a student."
)
prompts_type = String(
default='text',
scope=Scope.content,
help="The type of prompt. html or text"
)
rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content,
......@@ -407,6 +413,7 @@ class OpenAssessmentBlock(MessageMixin,
context_dict = {
"title": self.title,
"prompts": self.prompts,
"prompts_type": self.prompts_type,
"rubric_assessments": ui_models,
"show_staff_area": self.is_course_staff and not self.in_studio_preview,
}
......@@ -697,6 +704,7 @@ class OpenAssessmentBlock(MessageMixin,
block.submission_due = config['submission_due']
block.title = config['title']
block.prompts = config['prompts']
block.prompts_type = config['prompts_type']
block.text_response = config['text_response']
block.file_upload_response = config['file_upload_response']
block.allow_file_upload = config['allow_file_upload']
......
......@@ -56,6 +56,12 @@ def datetime_validator(value):
raise Invalid(u"Could not parse datetime from value \"{val}\"".format(val=value))
PROMPTS_TYPES = [
u'text',
u'html',
]
NECESSITY_OPTIONS = [
u'required',
u'optional',
......@@ -83,6 +89,7 @@ EDITOR_UPDATE_SCHEMA = Schema({
Required('description'): utf8_validator,
})
],
Required('prompts_type', default='text'): Any(All(utf8_validator, In(PROMPTS_TYPES)), None),
Required('title'): utf8_validator,
Required('feedback_prompt'): utf8_validator,
Required('feedback_default_text'): utf8_validator,
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -29,6 +29,10 @@ describe("OpenAssessment.EditPromptViews", function() {
view = new OpenAssessment.EditPromptsView(element, notifier);
});
afterEach(function() {
OpenAssessment.Prompt.prototype.tinyMCEEnabled = undefined;
});
it("reads prompts from the editor", function() {
// This assumes a particular structure of the DOM,
// which is set by the HTML fixture.
......@@ -59,6 +63,16 @@ describe("OpenAssessment.EditPromptViews", function() {
description: ""
});
});
it("creates new html prompts", function() {
OpenAssessment.Prompt.prototype.tinyMCEEnabled = true;
spyOn(OpenAssessment.Prompt.prototype, 'attachWysiwygToPrompt');
spyOn(OpenAssessment.Prompt.prototype, 'addHandler');
view.addPrompt();
expect(OpenAssessment.Prompt.prototype.attachWysiwygToPrompt).toHaveBeenCalled();
expect(OpenAssessment.Prompt.prototype.addHandler).toHaveBeenCalled();
});
});
describe("OpenAssessment.EditPromptViews after release", function() {
......
......@@ -434,6 +434,7 @@ if (typeof OpenAssessment.Server === "undefined" || !OpenAssessment.Server) {
var url = this.url('update_editor_context');
var payload = JSON.stringify({
prompts: options.prompts,
prompts_type: options.prompts_type,
feedback_prompt: options.feedbackPrompt,
feedback_default_text: options.feedback_default_text,
title: options.title,
......
......@@ -76,11 +76,47 @@ OpenAssessment.ItemUtilities = {
OpenAssessment.Prompt
**/
OpenAssessment.Prompt = function(element, notifier) {
if (this.tinyMCEEnabled) {
element = this.attachWysiwygToPrompt(element);
}
this.element = element;
this.notifier = notifier;
};
OpenAssessment.Prompt.prototype = {
tinyMCEEnabled: window.tinyMCE !== undefined,
/**
Attach Wysiwyg editor to the textarea field.
Args:
el (OpenAssessment.Container): The container that the prompt is a member of.
Returns:
Updated OpenAssessment.Container
**/
attachWysiwygToPrompt: function(el) {
var elId = $(el).find('textarea').attr('id');
if (!elId) {
/* jshint undef:false */
var textarea = $(el).find('textarea');
var text = $(textarea).val();
var type = $(textarea).data('type');
if (text && (type === 'text')) {
text = _.escape(text).replace(/(?:\r\n|\r|\n)/g, '<br />');
$(textarea).val(text);
}
var newElId = Date.now() + '-textarea-' + (Math.random() * 100);
$(textarea).attr('id', newElId).tinymce(oaTinyMCE(
{
base_asset_url: $('#openassessment_prompt_template').data("baseAssetUrl")
}
));
}
return $(el);
},
/**
Finds the values currently entered in the Prompts's fields, and returns them.
......@@ -109,7 +145,17 @@ OpenAssessment.Prompt.prototype = {
**/
description: function(text) {
var sel = $('.openassessment_prompt_description', this.element);
return OpenAssessment.Fields.stringField(sel, text);
if (!this.tinyMCEEnabled) {
return OpenAssessment.Fields.stringField(sel, text);
}
var tinyEl = window.tinyMCE.get(sel.attr('id'));
if (text) {
tinyEl.setContent(text);
} else {
return tinyEl.getContent();
}
},
addEventListeners: function() {},
......
......@@ -195,6 +195,7 @@ OpenAssessment.StudioView.prototype = {
this.server.updateEditorContext({
prompts: view.promptsView.promptsDefinition(),
prompts_type: view.promptsView.promptsType(),
feedbackPrompt: view.rubricView.feedbackPrompt(),
feedback_default_text: view.rubricView.feedback_default_text(),
criteria: view.rubricView.criteriaDefinition(),
......@@ -278,6 +279,133 @@ OpenAssessment.StudioView.prototype = {
}
};
/* Get tinyMCE comfig */
/* jshint unused:false */
function oaTinyMCE(options) {
var CUSTOM_FONTS, STANDARD_FONTS, _getFonts, _this = this;
CUSTOM_FONTS = "Default='Open Sans', Verdana, Arial, Helvetica, sans-serif;";
STANDARD_FONTS = "Andale Mono=andale mono,times;" +
"Arial=arial,helvetica,sans-serif;" +
"Arial Black=arial black,avant garde;" +
"Book Antiqua=book antiqua,palatino;" +
"Comic Sans MS=comic sans ms,sans-serif;" +
"Courier New=courier new,courier;" +
"Georgia=georgia,palatino;" +
"Helvetica=helvetica;" +
"Impact=impact,chicago;" +
"Symbol=symbol;" +
"Tahoma=tahoma,arial,helvetica,sans-serif;" +
"Terminal=terminal,monaco;" +
"Times New Roman=times new roman,times;" +
"Trebuchet MS=trebuchet ms,geneva;" +
"Verdana=verdana,geneva;" +
"Webdings=webdings;" +
"Wingdings=wingdings,zapf dingbats";
_getFonts = function() {
return CUSTOM_FONTS + STANDARD_FONTS;
};
this.initInstanceCallback = function(ed) {
return oaTinyMCE.prototype.initInstanceCallback.apply(_this, arguments);
};
this.saveLink = function(data) {
return oaTinyMCE.prototype.saveLink.apply(_this, arguments);
};
this.editLink = function(data) {
return oaTinyMCE.prototype.editLink.apply(_this, arguments);
};
this.saveImage = function(data) {
return oaTinyMCE.prototype.saveImage.apply(_this, arguments);
};
this.editImage = function(data) {
return oaTinyMCE.prototype.editImage.apply(_this, arguments);
};
this.setupTinyMCE = function(ed) {
return oaTinyMCE.prototype.setupTinyMCE.apply(_this, arguments);
};
oaTinyMCE.prototype.setupTinyMCE = function(ed) {
ed.on('SaveImage', this.saveImage);
ed.on('EditImage', this.editImage);
ed.on('SaveLink', this.saveLink);
ed.on('EditLink', this.editLink);
};
oaTinyMCE.prototype.editImage = function(data) {
if (data.src) {
/* jshint undef:false */
data.src = rewriteStaticLinks(data.src, this.base_asset_url, '/static/');
}
};
oaTinyMCE.prototype.saveImage = function(data) {
if (data.src) {
data.src = rewriteStaticLinks(data.src, '/static/', this.base_asset_url);
}
};
oaTinyMCE.prototype.editLink = function(data) {
if (data.href) {
data.href = rewriteStaticLinks(data.href, this.base_asset_url, '/static/');
}
};
oaTinyMCE.prototype.saveLink = function(data) {
if (data.href) {
data.href = rewriteStaticLinks(data.href, '/static/', this.base_asset_url);
}
};
oaTinyMCE.prototype.initInstanceCallback = function(ed) {
ed.setContent(rewriteStaticLinks(ed.getContent({
no_events: 1
}), '/static/', this.base_asset_url));
return ed.focus();
};
this.base_asset_url = options.base_asset_url;
return {
height: "300",
font_formats: _getFonts(),
theme: "modern",
skin: 'studio-tmce4',
schema: "html5",
convert_urls: false,
directionality: $(".wrapper-view, .window-wrap").prop('dir'),
formats: {
code: {inline: 'code'}
},
visual: false,
plugins: "textcolor, link, image, media",
image_advtab: true,
toolbar: "formatselect | fontselect | bold italic underline forecolor | " +
"bullist numlist outdent indent blockquote | link unlink image media",
block_formats: gettext("Paragraph") + "=p;" +
gettext("Preformatted") + "=pre;" +
gettext("Heading 3") + "=h3;" +
gettext("Heading 4") + "=h4;" +
gettext("Heading 5") + "=h5;" +
gettext("Heading 6") + "=h6",
menubar: false,
statusbar: false,
valid_children: "+body[style]",
valid_elements: "*[*]",
extended_valid_elements: "*[*]",
invalid_elements: "",
setup: this.setupTinyMCE,
init_instance_callback: this.initInstanceCallback,
browser_spellcheck: true
};
}
/* XBlock entry point for Studio view */
/* jshint unused:false */
function OpenAssessmentEditor(runtime, element, data) {
......
......@@ -368,7 +368,7 @@ OpenAssessment.EditSelfAssessmentView.prototype = {
clearValidationErrors: function() {
this.startDatetimeControl.clearValidationErrors();
this.dueDatetimeControl.clearValidationErrors();
},
}
};
/**
......
......@@ -53,6 +53,18 @@ OpenAssessment.EditPromptsView.prototype = {
},
/**
Get available prompts mode. In case if tinyMCE is enabled is is "html" mode
Otherwise it is 'text' mode.
Returns:
string: "html" or "text"
**/
promptsType: function() {
var firstPrompt = this.promptsContainer.getItem(0);
return (firstPrompt && firstPrompt.tinyMCEEnabled) ? 'html' : 'text';
},
/**
Add a new prompt.
Uses a client-side template to create the new prompt.
**/
......
......@@ -129,9 +129,11 @@ class StudioMixin(object):
feedback_default_text = copy.deepcopy(self.rubric_feedback_default_text)
if not feedback_default_text:
feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
course_id = self.location.course_key if hasattr(self, 'location') else None
return {
'prompts': self.prompts,
'prompts_type': self.prompts_type,
'title': self.title,
'submission_due': submission_due,
'submission_start': submission_start,
......@@ -150,6 +152,7 @@ class StudioMixin(object):
make_django_template_key(asmnt)
for asmnt in self.editor_assessments_order
],
'base_asset_url': self._get_base_url_path_for_course_assets(course_id),
'is_released': self.is_released(),
}
......@@ -228,6 +231,7 @@ class StudioMixin(object):
self.title = data['title']
self.display_name = data['title']
self.prompts = data['prompts']
self.prompts_type = data['prompts_type']
self.rubric_criteria = data['criteria']
self.rubric_assessments = data['assessments']
self.editor_assessments_order = data['editor_assessments_order']
......@@ -370,3 +374,17 @@ class StudioMixin(object):
for i in range(len(sorted_superset_indices)):
superset[sorted_superset_indices[i]] = subset[i]
return superset
def _get_base_url_path_for_course_assets(self, course_key):
"""
Returns base url path for course assets
"""
if course_key is None:
return None
placeholder_id = uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = unicode(course_key.make_asset_key('asset', placeholder_id).for_branch(None))
if not url_path.startswith('/'):
url_path = '/' + url_path
return url_path.replace(placeholder_id, '')
......@@ -498,6 +498,7 @@ class SubmissionMixin(object):
"xblock_id": self.get_xblock_id(),
"text_response": self.text_response,
"file_upload_response": self.file_upload_response,
"prompts_type": self.prompts_type,
}
# Due dates can default to the distant future, in which case
......
<openassessment text_response="required" file_upload_response="" group_access="{&quot;381451918&quot;: [1179773159]}">
<openassessment text_response="required" file_upload_response="" group_access="{&quot;381451918&quot;: [1179773159]}" prompts_type="text">
<title>Open Assessment Test</title>
<prompts>
<prompt>
......
<openassessment text_response="required" file_upload_response="" group_access="{&quot;381451918&quot;: [1179773159]}" prompts_type="html">
<title>Open Assessment Test</title>
<prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty? Please answer in a short essay of 200-300 words.</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -58,7 +58,6 @@
"explanation": "Yes explanation"
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"feedback": "optional"
}
],
......@@ -77,6 +76,7 @@
"due": null
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"submission_due": "2014-02-27T09:46",
"submission_start": "2014-02-10T09:46"
},
......@@ -1629,5 +1629,60 @@
"submission_due": "2012-02-27T09:46",
"submission_start": "2012-02-10T09:46",
"expected_error": "error: in case if file upload response is disabled text response must be required"
},
"invalid_prompt_type": {
"prompts": [{"description": "My new prompt 1."}, {"description": "My new prompt 2."}],
"prompts_type": "not_text_or_html",
"feedback_prompt": "Feedback prompt",
"feedback_default_text": "Feedback default text",
"text_response": "required",
"file_upload_response": null,
"file_upload_type": null,
"allow_latex": false,
"leaderboard_show": 0,
"criteria": [
{
"order_num": 0,
"name": "0",
"label": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "0",
"label": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "1",
"label": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "optional"
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"title": "My new title.",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": null,
"due": null
},
{
"name": "self-assessment",
"start": null,
"due": null
}
],
"submission_due": "2014-02-27T09:46",
"submission_start": "2014-02-10T09:46"
}
}
<openassessment prompts_type="html">
<title>Open Assessment Test</title>
<rubric>
<prompt><![CDATA[<code><strong>Question 123</strong></code>]]></prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -23,6 +23,7 @@ class StudioViewTest(XBlockHandlerTestCase):
"text_response": "required",
"file_upload_response": None,
"prompts": [{"description": "Test prompt"}],
"prompts_type": "html",
"feedback_prompt": "Test feedback prompt",
"feedback_default_text": "Test feedback default text",
"submission_start": "4014-02-10T09:46",
......@@ -176,6 +177,16 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
self.assertIn(expected_error, resp['msg'].lower())
@scenario('data/basic_scenario_html_prompts_type.xml')
def test_update_context_with_prompts_type(self, xblock):
data = copy.deepcopy(self.UPDATE_EDITOR_DATA)
data['prompts_type'] = 'text'
xblock.runtime.modulestore = MagicMock()
xblock.runtime.modulestore.has_published_version.return_value = False
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg'))
@file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data):
......
......@@ -124,10 +124,22 @@ class SubmissionTest(XBlockHandlerTestCase):
@scenario('data/line_breaks.xml')
def test_prompt_line_breaks(self, xblock):
# Verify that prompts with multiple lines retain line breaks
# (backward compatibility in case if prompt_type == 'text')
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
expected_prompt = u"<p><br />Line 1</p><p>Line 2</p><p>Line 3<br /></p>"
self.assertIn(expected_prompt, resp)
@scenario('data/prompt_html.xml')
def test_prompt_html_to_text(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
expected_prompt = u"<code><strong>Question 123</strong></code>"
self.assertIn(expected_prompt, resp)
xblock.prompts_type = "text"
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
expected_prompt = "&lt;code&gt;&lt;strong&gt;Question 123&lt;/strong&gt;&lt;/code&gt;"
self.assertIn(expected_prompt, resp)
@mock_s3
@override_settings(
AWS_ACCESS_KEY_ID='foobar',
......@@ -266,7 +278,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'submission_start': dt.datetime(4999, 4, 1).replace(tzinfo=pytz.utc),
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -292,7 +305,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'self_incomplete': True,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -314,7 +328,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -335,7 +350,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'submit_enabled': False,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -362,7 +378,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -389,7 +406,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -411,7 +429,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'self_incomplete': True,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -446,7 +465,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'cancelled_by': mock_staff
},
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -475,7 +495,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'self_incomplete': True,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -490,7 +511,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'submission_due': dt.datetime(2014, 4, 5).replace(tzinfo=pytz.utc),
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -512,7 +534,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'self_incomplete': True,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -540,7 +563,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'file_upload_type': None,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......@@ -568,7 +592,8 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'file_upload_type': None,
'allow_latex': False,
'user_timezone': None,
'user_language': None
'user_language': None,
'prompts_type': 'text'
}
)
......
......@@ -119,6 +119,7 @@ class TestSerializeContent(TestCase):
self.oa_block.file_upload_response = data.get('file_upload_response', None)
self.oa_block.prompt = data.get('prompt')
self.oa_block.prompts = create_prompts_list(data.get('prompt'))
self.oa_block.prompts_type = data.get('prompts_type', 'text')
self.oa_block.rubric_feedback_prompt = data.get('rubric_feedback_prompt')
self.oa_block.rubric_feedback_default_text = data.get('rubric_feedback_default_text')
self.oa_block.start = _parse_date(data.get('start'))
......
......@@ -738,6 +738,8 @@ def serialize_content_to_xml(oa_block, root):
prompts_root = etree.SubElement(root, 'prompts')
_serialize_prompts(prompts_root, oa_block.prompts)
root.set('prompts_type', unicode(oa_block.prompts_type))
# Rubric
rubric_root = etree.SubElement(root, 'rubric')
serialize_rubric(rubric_root, oa_block)
......@@ -893,6 +895,10 @@ def parse_from_xml(root):
# Retrieve the prompts
prompts = _parse_prompts_xml(root)
prompts_type = 'text'
if 'prompts_type' in root.attrib:
prompts_type = unicode(root.attrib['prompts_type'])
# Retrieve the leaderboard if it exists, otherwise set it to 0
leaderboard_show = 0
if 'leaderboard_show' in root.attrib:
......@@ -911,6 +917,7 @@ def parse_from_xml(root):
return {
'title': title,
'prompts': prompts,
'prompts_type': prompts_type,
'rubric_criteria': rubric['criteria'],
'rubric_assessments': assessments,
'rubric_feedback_prompt': rubric['feedbackprompt'],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment