Commit 56f21177 by Dmitry Viskov

WYSISYG editor for prompts (in studio) - backward compatibility added

parent 0aaa92f2
...@@ -8,7 +8,11 @@ ...@@ -8,7 +8,11 @@
</div> </div>
<article class="submission__answer__part__prompt"> <article class="submission__answer__part__prompt">
<div class="submission__answer__part__prompt__value"> <div class="submission__answer__part__prompt__value">
{% autoescape off %}{{ part.prompt.description }}{% endautoescape %} {% if prompts_type == 'html' %}
{% autoescape off %}{{ part.prompt.description }}{% endautoescape %}
{% else %}
{{ part.prompt.description|linebreaks }}
{% endif %}
</div> </div>
</article> </article>
{% if part.text %} {% if part.text %}
......
...@@ -74,7 +74,11 @@ ...@@ -74,7 +74,11 @@
<h5 class="submission__answer__part__text__title">{% trans "The prompt for this section" %}</h5> <h5 class="submission__answer__part__text__title">{% trans "The prompt for this section" %}</h5>
<article class="submission__answer__part__prompt"> <article class="submission__answer__part__prompt">
<div class="submission__answer__part__prompt__copy"> <div class="submission__answer__part__prompt__copy">
{% autoescape off %}{{ part.prompt.description }}{% endautoescape %} {% if prompts_type == 'html' %}
{% autoescape off %}{{ part.prompt.description }}{% endautoescape %}
{% else %}
{{ part.prompt.description|linebreaks }}
{% endif %}
</div> </div>
</article> </article>
......
...@@ -2,14 +2,14 @@ ...@@ -2,14 +2,14 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# pylint: disable=line-too-long # pylint: disable=line-too-long
DEFAULT_PROMPT = """<p> DEFAULT_PROMPT = """
Censorship in the Libraries<br /> Censorship in the Libraries
<br />
'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author<br /> 'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
<br />
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.<br /> Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
<br />
Read for conciseness, clarity of thought, and form.</p> Read for conciseness, clarity of thought, and form.
""" """
DEFAULT_RUBRIC_CRITERIA = [ DEFAULT_RUBRIC_CRITERIA = [
......
...@@ -176,6 +176,12 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -176,6 +176,12 @@ class OpenAssessmentBlock(MessageMixin,
help="The prompts to display to a student." help="The prompts to display to a student."
) )
prompts_type = String(
default='text',
scope=Scope.content,
help="The type of prompt (available options: 'text', 'html')."
)
rubric_criteria = List( rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA, default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content, scope=Scope.content,
...@@ -407,6 +413,7 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -407,6 +413,7 @@ class OpenAssessmentBlock(MessageMixin,
context_dict = { context_dict = {
"title": self.title, "title": self.title,
"prompts": self.prompts, "prompts": self.prompts,
"prompts_type": self.prompts_type,
"rubric_assessments": ui_models, "rubric_assessments": ui_models,
"show_staff_area": self.is_course_staff and not self.in_studio_preview, "show_staff_area": self.is_course_staff and not self.in_studio_preview,
} }
...@@ -697,6 +704,7 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -697,6 +704,7 @@ class OpenAssessmentBlock(MessageMixin,
block.submission_due = config['submission_due'] block.submission_due = config['submission_due']
block.title = config['title'] block.title = config['title']
block.prompts = config['prompts'] block.prompts = config['prompts']
block.prompts_type = config['prompts_type']
block.text_response = config['text_response'] block.text_response = config['text_response']
block.file_upload_response = config['file_upload_response'] block.file_upload_response = config['file_upload_response']
block.allow_file_upload = config['allow_file_upload'] block.allow_file_upload = config['allow_file_upload']
......
...@@ -56,6 +56,12 @@ def datetime_validator(value): ...@@ -56,6 +56,12 @@ def datetime_validator(value):
raise Invalid(u"Could not parse datetime from value \"{val}\"".format(val=value)) raise Invalid(u"Could not parse datetime from value \"{val}\"".format(val=value))
PROMPTS_TYPES = [
u'text',
u'html',
]
NECESSITY_OPTIONS = [ NECESSITY_OPTIONS = [
u'required', u'required',
u'optional', u'optional',
...@@ -83,6 +89,7 @@ EDITOR_UPDATE_SCHEMA = Schema({ ...@@ -83,6 +89,7 @@ EDITOR_UPDATE_SCHEMA = Schema({
Required('description'): utf8_validator, Required('description'): utf8_validator,
}) })
], ],
Required('prompts_type', default='text'): Any(All(utf8_validator, In(PROMPTS_TYPES)), None),
Required('title'): utf8_validator, Required('title'): utf8_validator,
Required('feedback_prompt'): utf8_validator, Required('feedback_prompt'): utf8_validator,
Required('feedback_default_text'): utf8_validator, Required('feedback_default_text'): utf8_validator,
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -434,6 +434,7 @@ if (typeof OpenAssessment.Server === "undefined" || !OpenAssessment.Server) { ...@@ -434,6 +434,7 @@ if (typeof OpenAssessment.Server === "undefined" || !OpenAssessment.Server) {
var url = this.url('update_editor_context'); var url = this.url('update_editor_context');
var payload = JSON.stringify({ var payload = JSON.stringify({
prompts: options.prompts, prompts: options.prompts,
prompts_type: options.prompts_type,
feedback_prompt: options.feedbackPrompt, feedback_prompt: options.feedbackPrompt,
feedback_default_text: options.feedback_default_text, feedback_default_text: options.feedback_default_text,
title: options.title, title: options.title,
......
...@@ -195,6 +195,7 @@ OpenAssessment.StudioView.prototype = { ...@@ -195,6 +195,7 @@ OpenAssessment.StudioView.prototype = {
this.server.updateEditorContext({ this.server.updateEditorContext({
prompts: view.promptsView.promptsDefinition(), prompts: view.promptsView.promptsDefinition(),
prompts_type: view.promptsView.promptsType(),
feedbackPrompt: view.rubricView.feedbackPrompt(), feedbackPrompt: view.rubricView.feedbackPrompt(),
feedback_default_text: view.rubricView.feedback_default_text(), feedback_default_text: view.rubricView.feedback_default_text(),
criteria: view.rubricView.criteriaDefinition(), criteria: view.rubricView.criteriaDefinition(),
......
...@@ -52,6 +52,11 @@ OpenAssessment.EditPromptsView.prototype = { ...@@ -52,6 +52,11 @@ OpenAssessment.EditPromptsView.prototype = {
return prompts; return prompts;
}, },
promptsType: function() {
var firstPrompt = this.promptsContainer.getItem(0);
return (firstPrompt && firstPrompt.tinyMCEEnabled) ? 'html': 'text';
},
/** /**
Add a new prompt. Add a new prompt.
Uses a client-side template to create the new prompt. Uses a client-side template to create the new prompt.
......
...@@ -132,6 +132,7 @@ class StudioMixin(object): ...@@ -132,6 +132,7 @@ class StudioMixin(object):
return { return {
'prompts': self.prompts, 'prompts': self.prompts,
'prompts_type': self.prompts_type,
'title': self.title, 'title': self.title,
'submission_due': submission_due, 'submission_due': submission_due,
'submission_start': submission_start, 'submission_start': submission_start,
...@@ -228,6 +229,7 @@ class StudioMixin(object): ...@@ -228,6 +229,7 @@ class StudioMixin(object):
self.title = data['title'] self.title = data['title']
self.display_name = data['title'] self.display_name = data['title']
self.prompts = data['prompts'] self.prompts = data['prompts']
self.prompt_type = data['prompt_type']
self.rubric_criteria = data['criteria'] self.rubric_criteria = data['criteria']
self.rubric_assessments = data['assessments'] self.rubric_assessments = data['assessments']
self.editor_assessments_order = data['editor_assessments_order'] self.editor_assessments_order = data['editor_assessments_order']
......
...@@ -733,6 +733,8 @@ def serialize_content_to_xml(oa_block, root): ...@@ -733,6 +733,8 @@ def serialize_content_to_xml(oa_block, root):
prompts_root = etree.SubElement(root, 'prompts') prompts_root = etree.SubElement(root, 'prompts')
_serialize_prompts(prompts_root, oa_block.prompts) _serialize_prompts(prompts_root, oa_block.prompts)
root.set('prompts_type', unicode(oa_block.prompts_type))
# Rubric # Rubric
rubric_root = etree.SubElement(root, 'rubric') rubric_root = etree.SubElement(root, 'rubric')
serialize_rubric(rubric_root, oa_block) serialize_rubric(rubric_root, oa_block)
...@@ -884,6 +886,10 @@ def parse_from_xml(root): ...@@ -884,6 +886,10 @@ def parse_from_xml(root):
# Retrieve the prompts # Retrieve the prompts
prompts = _parse_prompts_xml(root) prompts = _parse_prompts_xml(root)
prompts_type = 'text'
if 'prompts_type' in root.attrib:
prompts_type = unicode(root.attrib['prompts_type'])
# Retrieve the leaderboard if it exists, otherwise set it to 0 # Retrieve the leaderboard if it exists, otherwise set it to 0
leaderboard_show = 0 leaderboard_show = 0
if 'leaderboard_show' in root.attrib: if 'leaderboard_show' in root.attrib:
...@@ -902,6 +908,7 @@ def parse_from_xml(root): ...@@ -902,6 +908,7 @@ def parse_from_xml(root):
return { return {
'title': title, 'title': title,
'prompts': prompts, 'prompts': prompts,
'prompts_type': prompts_type,
'rubric_criteria': rubric['criteria'], 'rubric_criteria': rubric['criteria'],
'rubric_assessments': assessments, 'rubric_assessments': assessments,
'rubric_feedback_prompt': rubric['feedbackprompt'], 'rubric_feedback_prompt': rubric['feedbackprompt'],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment