Commit 30f052e0 by Stephen Sanchez

Merge pull request #409 from edx/sanchez/xml-validation-refactoring

Split XML serialization and validation contracts.
parents 041e5e43 a83b25cf
......@@ -22,7 +22,7 @@ from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
......@@ -324,8 +324,59 @@ class OpenAssessmentBlock(
"""
block = runtime.construct_xblock_from_class(cls, keys)
config = parse_from_xml(node)
rubric = {
"prompt": config["prompt"],
"feedbackprompt": config["rubric_feedback_prompt"],
"criteria": config["rubric_criteria"],
}
xblock_validator = validator(block, strict_post_release=False)
xblock_validator(
rubric,
{'due': config['submission_due']},
config['rubric_assessments']
)
block.update(
config['rubric_criteria'],
config['rubric_feedback_prompt'],
config['rubric_assessments'],
config['submission_due'],
config['submission_start'],
config['title'],
config['prompt']
)
return block
def update(self, criteria, feedback_prompt, assessments, submission_due,
submission_start, title, prompt):
"""
Given a dictionary of properties, update the XBlock
Args:
criteria (list): A list of rubric criteria for this XBlock.
feedback_prompt (str):
assessments (list): A list of assessment module configurations for
this XBlock.
submission_due (str): ISO formatted submission due date.
submission_start (str): ISO formatted submission start date.
title (str): The title of this XBlock
prompt (str): The prompt for this XBlock.
return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
Returns:
None
"""
# If we've gotten this far, then we've successfully parsed the XML
# and validated the contents. At long last, we can safely update the XBlock.
self.title = title
self.prompt = prompt
self.rubric_criteria = criteria
self.rubric_assessments = assessments
self.rubric_feedback_prompt = feedback_prompt
self.submission_start = submission_start
self.submission_due = submission_due
@property
def valid_assessments(self):
......
......@@ -8,7 +8,7 @@ from django.template.loader import get_template
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from xblock.fragment import Fragment
from openassessment.xblock.xml import serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
from openassessment.xblock import xml
from openassessment.xblock.validation import validator
......@@ -37,13 +37,17 @@ class StudioMixin(object):
return frag
@XBlock.json_handler
def update_xml(self, data, suffix=''):
def update_editor_context(self, data, suffix=''):
"""
Update the XBlock's XML.
Update the XBlock's configuration.
Args:
data (dict): Data from the request; should have a value for the key 'xml'
containing the XML for this XBlock.
data (dict): Data from the request; should have a value for the keys
'rubric', 'settings' and 'prompt'. The 'rubric' should be an XML
representation of the new rubric. The 'prompt' should be a plain
text prompt. The 'settings' should be a dict of 'title',
'submission_due', 'submission_start' and the XML configuration for
all 'assessments'.
Kwargs:
suffix (str): Not used
......@@ -51,26 +55,43 @@ class StudioMixin(object):
Returns:
dict with keys 'success' (bool) and 'msg' (str)
"""
if 'xml' in data:
try:
update_from_xml_str(self, data['xml'], validator=validator(self))
except ValidationError as ex:
return {'success': False, 'msg': _('Validation error: {error}').format(error=ex)}
except UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)}
else:
return {'success': True, 'msg': _('Successfully updated OpenAssessment XBlock')}
missing_keys = list({'rubric', 'settings', 'prompt'} - set(data.keys()))
if missing_keys:
logger.warn(
'Must specify the following keys in request JSON dict: {}'.format(missing_keys)
)
return {'success': False, 'msg': _('Error updating XBlock configuration')}
settings = data['settings']
try:
else:
return {'success': False, 'msg': _('Must specify "xml" in request JSON dict.')}
rubric = xml.parse_rubric_xml_str(data['rubric'])
assessments = xml.parse_assessments_xml_str(settings['assessments'])
submission_due = settings["submission_due"]
except xml.UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)}
xblock_validator = validator(self)
success, msg = xblock_validator(rubric, {'due': submission_due}, assessments)
if not success:
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update(
rubric['criteria'],
rubric['feedbackprompt'],
assessments,
settings["submission_due"],
settings["submission_start"],
settings["title"],
data["prompt"]
)
return {'success': True, 'msg': 'Successfully updated OpenAssessment XBlock'}
@XBlock.json_handler
def xml(self, data, suffix=''):
def editor_context(self, data, suffix=''):
"""
Retrieve the XBlock's content definition, serialized as XML.
Retrieve the XBlock's content definition, serialized as a JSON object
containing all the configuration as it will be displayed for studio
editing.
Args:
data (dict): Not used
......@@ -79,19 +100,37 @@ class StudioMixin(object):
suffix (str): Not used
Returns:
dict with keys 'success' (bool), 'message' (unicode), and 'xml' (unicode)
dict with keys 'success' (bool), 'message' (unicode),
'rubric' (unicode), 'prompt' (unicode), and 'settings' (dict)
"""
try:
xml = serialize_content(self)
# We do not expect `serialize_content` to raise an exception,
assessments = xml.serialize_assessments_to_xml_str(self)
rubric = xml.serialize_rubric_to_xml_str(self)
# We do not expect serialization to raise an exception,
# but if it does, handle it gracefully.
except Exception as ex:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex)
logger.error(msg)
return {'success': False, 'msg': msg, 'xml': u''}
else:
return {'success': True, 'msg': '', 'xml': xml}
# Populates the context for the assessments section of the editing
# panel. This will adjust according to the fields laid out in this
# section.
settings = {
'submission_due': self.submission_due,
'submission_start': self.submission_start,
'title': self.title,
'assessments': assessments
}
return {
'success': True,
'msg': '',
'rubric': rubric,
'prompt': self.prompt,
'settings': settings
}
@XBlock.json_handler
def check_released(self, data, suffix=''):
......
<openassessment>
<title>Foo</title>
<assessments>
<!-- assessment name not supported -->
<assessment name="unsupported-assessment" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<option points="0"><name>No</name><explanation>No explanation</explanation></option>
<option points="2"><name>Yes</name><explanation>Yes explanation</explanation></option>
</criterion>
</rubric>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<!-- start date is after due date -->
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2010-01-01" due="2003-01-01"/>
</assessments>
</openassessment>
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<!-- no options -->",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28"
},
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt"
}
}
\ No newline at end of file
<openassessment>
<title>Foo</title>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<!-- no options -->
</criterion>
</rubric>
</openassessment>
{
"no_rubric": {
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28"
},
"expected_error": "error"
},
"no_prompt": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28"
},
"expected_error": "error"
},
"no_settings": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"expected_error": "error"
},
"invalid_dates": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2010-01-01\" due=\"2003-01-01\"/>",
"</assessments>"
],
"submission_due": "2012-02-27T09:46:28",
"submission_start": "2015-02-10T09:46:28"
},
"expected_error": "cannot be later"
}
}
\ No newline at end of file
{
"simple": {
"xml": [
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>"
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00"
}
]
},
"multiple_criteria": {
"xml": [
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>"
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"no_dates_specified": {
"xml": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>"
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"student_training_no_examples": {
"xml": [
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>"
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": []
}
]
},
"student_training_one_example": {
"xml": [
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>"
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
},
"student_training_multiple_examples": {
"xml": [
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"</assessment>",
"</assessments>"
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
}
]
}
}
{
"student_training_one_example": {
"xml": [
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>"
],
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
},
"student_training_multiple_examples": {
"xml": [
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>"
],
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
}
}
{
"simple": {
"xml": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "Test prompt",
"feedbackprompt": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"feedback_prompt": {
"xml": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<feedbackprompt>This is the feedback prompt</feedbackprompt>",
"</rubric>"
],
"prompt": "Test prompt",
"feedbackprompt": "This is the feedback prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"promptless": {
"xml": [
"<rubric>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": null,
"feedbackprompt": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"empty_prompt": {
"xml": [
"<rubric>",
"<prompt></prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "",
"feedbackprompt": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"unicode": {
"xml": [
"<rubric>",
"<prompt>ՇєรՇ קг๏๓קՇ</prompt>",
"<criterion>",
"<name>𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷</name>",
"<prompt>Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ</prompt>",
"<option points=\"0\"><name>𝕹𝖔</name><explanation>𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓</explanation></option>",
"<option points=\"2\"><name>ﻉร</name><explanation>ﻉร ﻉซρɭคกคՇٱѻก</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "ՇєรՇ קг๏๓קՇ",
"feedbackprompt": null,
"criteria": [
{
"order_num": 0,
"name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷",
"prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "𝕹𝖔",
"explanation": "𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓"
},
{
"order_num": 1,
"points": 2,
"name": "ﻉร",
"explanation": "ﻉร ﻉซρɭคกคՇٱѻก"
}
]
}
]
},
"multiple_criteria": {
"xml": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<criterion>",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "Test prompt",
"feedbackprompt": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
]
},
"criterion_feedback_optional": {
"xml": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<criterion feedback=\"optional\">",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "Test prompt",
"feedbackprompt": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
]
}
}
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28"
},
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt"
}
}
\ No newline at end of file
......@@ -7,7 +7,8 @@ import datetime as dt
import lxml.etree as etree
import mock
import pytz
from ddt import ddt, data
from ddt import ddt, data, file_data
from openassessment.xblock.xml import UpdateFromXmlError
from .base import scenario, XBlockHandlerTestCase
......@@ -23,92 +24,92 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(frag.body_html().find('openassessment-edit'))
@scenario('data/basic_scenario.xml')
def test_get_xml(self, xblock):
resp = self.request(xblock, 'xml', '""', response_format='json')
def test_get_editor_context(self, xblock):
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
# Verify that the XML is parseable and the root is <openassessment>
root = etree.fromstring(resp['xml'])
self.assertEqual(root.tag, 'openassessment')
# Verify that the Rubric XML is parse-able and the root is <rubric>
rubric = etree.fromstring(resp['rubric'])
self.assertEqual(rubric.tag, 'rubric')
@mock.patch('openassessment.xblock.studio_mixin.serialize_content')
assessments = etree.fromstring(resp['settings']['assessments'])
self.assertEqual(assessments.tag, 'assessments')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str')
@scenario('data/basic_scenario.xml')
def test_get_xml_error(self, xblock, mock_serialize):
def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock
mock_serialize.side_effect = ValueError('Test error!')
mock_rubric_serializer.side_effect = UpdateFromXmlError('Test error!')
# Check that we get a failure message
resp = self.request(xblock, 'xml', '""', response_format='json')
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xml(self, xblock):
# Set the XBlock's release date to the future,
# so we are not restricted in what we can edit
xblock.start = dt.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
request = json.dumps({'xml': self.load_fixture_str('data/updated_block.xml')})
def test_update_xblock(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
xblock.published_date = None
# Test that we can update the xblock with the expected configuration.
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_xml', request, response_format='json')
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
print "ERROR IS {}".format(resp['msg'])
self.assertTrue(resp['success'])
self.assertIn('success', resp['msg'].lower())
# Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, u'Foo')
self.assertEqual(xblock.prompt, u'Test prompt')
self.assertEqual(xblock.rubric_assessments[0]['name'], 'peer-assessment')
self.assertEqual(xblock.rubric_criteria[0]['prompt'], 'Test criterion prompt')
self.assertEqual(xblock.title, data['settings']['title'])
self.assertEqual(xblock.prompt, data['prompt'])
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xml_post_release(self, xblock):
def test_update_context_post_release(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
# XBlock start date defaults to already open,
# so we should get an error when trying to update anything that change the number of points
request = json.dumps({'xml': self.load_fixture_str('data/updated_block.xml')})
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_xml', request, response_format='json')
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
@file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_request_data(self, xblock):
resp = self.request(xblock, 'update_xml', json.dumps({}), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('xml', resp['msg'].lower())
def test_update_context_invalid_request_data(self, xblock, data):
# First, parse XML data into a single string.
if 'rubric' in data:
data['rubric'] = "".join(data['rubric'])
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_date_format(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_dates.xml')})
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("cannot be later than", resp['msg'].lower())
if 'settings' in data and 'assessments' in data['settings']:
data['settings']['assessments'] = "".join(data['settings']['assessments'])
# Test that we enforce that there are exactly two assessments,
# peer ==> self
# If and when we remove this restriction, this test can be deleted.
@data('data/invalid_assessment_combo_order.xml')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock, invalid_workflow):
request = json.dumps(
{'xml': self.load_fixture_str(invalid_workflow)}
)
resp = self.request(xblock, 'update_xml', request, response_format='json')
xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertFalse(resp['success'])
self.assertIn("for this assignment", resp['msg'].lower())
self.assertIn(data['expected_error'], resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid(self, xblock, data):
xml_path = data[0]
expected_msg = data[1]
def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
request = json.dumps({'xml': self.load_fixture_str(xml_path)})
request = json.dumps(data)
# Store old XBlock fields for later verification
old_title = xblock.title
......@@ -117,9 +118,9 @@ class StudioViewTest(XBlockHandlerTestCase):
old_criteria = xblock.rubric_criteria
# Verify the response fails
resp = self.request(xblock, 'update_xml', request, response_format='json')
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn(expected_msg, resp['msg'].lower())
self.assertIn("not valid", resp['msg'].lower())
# Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests
......
......@@ -12,7 +12,10 @@ from django.test import TestCase
import ddt
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.xml import (
serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
serialize_content, parse_from_xml_str, parse_rubric_xml_str,
parse_examples_xml_str, parse_assessments_xml_str,
serialize_rubric_to_xml_str, serialize_examples_to_xml_str,
serialize_assessments_to_xml_str, UpdateFromXmlError
)
......@@ -91,8 +94,8 @@ class TestSerializeContent(TestCase):
"""
self.oa_block = mock.MagicMock(OpenAssessmentBlock)
@ddt.file_data('data/serialize.json')
def test_serialize(self, data):
def _configure_xblock(self, data):
self.oa_block.title = data['title']
self.oa_block.prompt = data['prompt']
self.oa_block.rubric_feedback_prompt = data['rubric_feedback_prompt']
......@@ -102,6 +105,10 @@ class TestSerializeContent(TestCase):
self.oa_block.submission_due = data['submission_due']
self.oa_block.rubric_criteria = data['criteria']
self.oa_block.rubric_assessments = data['assessments']
@ddt.file_data('data/serialize.json')
def test_serialize(self, data):
self._configure_xblock(data)
xml = serialize_content(self.oa_block)
# Compare the XML with our expected output
......@@ -144,6 +151,23 @@ class TestSerializeContent(TestCase):
)
)
@ddt.file_data('data/serialize.json')
def test_serialize_rubric(self, data):
self._configure_xblock(data)
xml_str = serialize_rubric_to_xml_str(self.oa_block)
@ddt.file_data('data/serialize.json')
def test_serialize_examples(self, data):
self._configure_xblock(data)
for assessment in data['assessments']:
if 'student-training' == assessment['name']:
xml_str = serialize_examples_to_xml_str(assessment)
@ddt.file_data('data/serialize.json')
def test_serialize_assessments(self, data):
self._configure_xblock(data)
xml_str = serialize_assessments_to_xml_str(self.oa_block)
def test_mutated_criteria_dict(self):
self.oa_block.title = "Test title"
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
......@@ -300,6 +324,36 @@ class TestSerializeContent(TestCase):
mutated[key] = new_val
return mutated
@ddt.ddt
class TestParseRubricFromXml(TestCase):
@ddt.file_data("data/parse_rubric_xml.json")
def test_parse_rubric_from_xml(self, data):
rubric = parse_rubric_xml_str("".join(data['xml']))
self.assertEqual(rubric['prompt'], data['prompt'])
self.assertEqual(rubric['feedbackprompt'], data['feedbackprompt'])
self.assertEqual(rubric['criteria'], data['criteria'])
@ddt.ddt
class TestParseExamplesFromXml(TestCase):
@ddt.file_data("data/parse_examples_xml.json")
def test_parse_examples_from_xml(self, data):
examples = parse_examples_xml_str("".join(data['xml']))
self.assertEqual(examples, data['examples'])
@ddt.ddt
class TestParseAssessmentsFromXml(TestCase):
@ddt.file_data("data/parse_assessments_xml.json")
def test_parse_assessments_from_xml(self, data):
assessments = parse_assessments_xml_str("".join(data['xml']))
self.assertEqual(assessments, data['assessments'])
@ddt.ddt
class TestUpdateFromXml(TestCase):
......@@ -324,36 +378,20 @@ class TestUpdateFromXml(TestCase):
self.oa_block.submission_due = "2000-01-01T00:00:00"
@ddt.file_data('data/update_from_xml.json')
def test_update_from_xml(self, data):
def test_parse_from_xml(self, data):
# Update the block based on the fixture XML definition
returned_block = update_from_xml_str(self.oa_block, "".join(data['xml']))
# The block we passed in should be updated and returned
self.assertEqual(self.oa_block, returned_block)
config = parse_from_xml_str("".join(data['xml']))
# Check that the contents of the modified XBlock are correct
self.assertEqual(self.oa_block.title, data['title'])
self.assertEqual(self.oa_block.prompt, data['prompt'])
self.assertEqual(self.oa_block.start, _parse_date(data['start']))
self.assertEqual(self.oa_block.due, _parse_date(data['due']))
self.assertEqual(self.oa_block.submission_start, data['submission_start'])
self.assertEqual(self.oa_block.submission_due, data['submission_due'])
self.assertEqual(self.oa_block.rubric_criteria, data['criteria'])
self.assertEqual(self.oa_block.rubric_assessments, data['assessments'])
self.assertEqual(config['title'], data['title'])
self.assertEqual(config['prompt'], data['prompt'])
self.assertEqual(config['submission_start'], data['submission_start'])
self.assertEqual(config['submission_due'], data['submission_due'])
self.assertEqual(config['rubric_criteria'], data['criteria'])
self.assertEqual(config['rubric_assessments'], data['assessments'])
@ddt.file_data('data/update_from_xml_error.json')
def test_update_from_xml_error(self, data):
def test_parse_from_xml_error(self, data):
with self.assertRaises(UpdateFromXmlError):
update_from_xml_str(self.oa_block, "".join(data['xml']))
@ddt.file_data('data/update_from_xml.json')
def test_invalid(self, data):
# Plug in a rubric validator that always reports that the rubric dict is invalid.
# We need to back this up with an integration test that checks whether the XBlock
# provides an appropriate rubric validator.
with self.assertRaises(ValidationError):
update_from_xml_str(
self.oa_block, "".join(data['xml']),
validator=lambda *args: (False, '')
)
parse_from_xml_str("".join(data['xml']))
......@@ -124,7 +124,7 @@ def _serialize_criteria(criteria_root, criteria_list):
_serialize_options(criterion_el, options_list)
def _serialize_rubric(rubric_root, oa_block):
def serialize_rubric(rubric_root, oa_block):
"""
Serialize a rubric dictionary as XML, adding children to the XML
with root node `rubric_root`.
......@@ -156,7 +156,8 @@ def _serialize_rubric(rubric_root, oa_block):
feedback_prompt = etree.SubElement(rubric_root, 'feedbackprompt')
feedback_prompt.text = unicode(oa_block.rubric_feedback_prompt)
def _parse_date(date_str):
def parse_date(date_str):
"""
Attempt to parse a date string into ISO format (without milliseconds)
Returns `None` if this cannot be done.
......@@ -282,7 +283,7 @@ def _parse_criteria_xml(criteria_root):
return criteria_list
def _parse_rubric_xml(rubric_root):
def parse_rubric_xml(rubric_root):
"""
Parse <rubric> element in the OpenAssessment XBlock's content XML.
......@@ -320,7 +321,7 @@ def _parse_rubric_xml(rubric_root):
return rubric_dict
def _parse_examples_xml(examples):
def parse_examples_xml(examples):
"""
Parse <example> (training examples) from the XML.
......@@ -362,7 +363,7 @@ def _parse_examples_xml(examples):
return examples_list
def _parse_assessments_xml(assessments_root):
def parse_assessments_xml(assessments_root):
"""
Parse the <assessments> element in the OpenAssessment XBlock's content XML.
......@@ -390,7 +391,7 @@ def _parse_assessments_xml(assessments_root):
# Assessment start
if 'start' in assessment.attrib:
parsed_start = _parse_date(assessment.get('start'))
parsed_start = parse_date(assessment.get('start'))
if parsed_start is not None:
assessment_dict['start'] = parsed_start
else:
......@@ -400,7 +401,7 @@ def _parse_assessments_xml(assessments_root):
# Assessment due
if 'due' in assessment.attrib:
parsed_start = _parse_date(assessment.get('due'))
parsed_start = parse_date(assessment.get('due'))
if parsed_start is not None:
assessment_dict['due'] = parsed_start
else:
......@@ -431,7 +432,7 @@ def _parse_assessments_xml(assessments_root):
# Other assessment types ignore examples.
# Later, we can add AI assessment here.
if assessment_dict['name'] == 'student-training':
assessment_dict['examples'] = _parse_examples_xml(examples)
assessment_dict['examples'] = parse_examples_xml(examples)
# Update the list of assessments
assessments_list.append(assessment_dict)
......@@ -439,7 +440,7 @@ def _parse_assessments_xml(assessments_root):
return assessments_list
def _serialize_training_examples(examples, assessment_el):
def serialize_training_examples(examples, assessment_el):
"""
Serialize a training example to XML.
......@@ -466,34 +467,19 @@ def _serialize_training_examples(examples, assessment_el):
select_el.set('option', unicode(selected_dict.get('option', '')))
def serialize_content_to_xml(oa_block, root):
def serialize_assessments(assessments_root, oa_block):
"""
Serialize the OpenAssessment XBlock's content to XML.
Serialize the assessment modules for an OpenAssessment XBlock.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize.
root (etree.Element): The XML root node to update.
assessments_root (lxml.etree.Element): The <assessments> XML element.
oa_block (OpenAssessmentXBlock): The XBlock with configuration to
serialize.
Returns:
etree.Element
None
"""
root.tag = 'openassessment'
# Set the submission start date
if oa_block.submission_start is not None:
root.set('submission_start', unicode(oa_block.submission_start))
# Set submission due date
if oa_block.submission_due is not None:
root.set('submission_due', unicode(oa_block.submission_due))
# Open assessment displayed title
title = etree.SubElement(root, 'title')
title.text = unicode(oa_block.title)
# Assessment list
assessments_root = etree.SubElement(root, 'assessments')
for assessment_dict in oa_block.rubric_assessments:
assessment = etree.SubElement(assessments_root, 'assessment')
......@@ -517,11 +503,42 @@ def serialize_content_to_xml(oa_block, root):
examples = assessment_dict.get('examples', [])
if not isinstance(examples, list):
examples = []
_serialize_training_examples(examples, assessment)
serialize_training_examples(examples, assessment)
def serialize_content_to_xml(oa_block, root):
"""
Serialize the OpenAssessment XBlock's content to XML.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
"""
root.tag = 'openassessment'
# Set the submission start date
if oa_block.submission_start is not None:
root.set('submission_start', unicode(oa_block.submission_start))
# Set submission due date
if oa_block.submission_due is not None:
root.set('submission_due', unicode(oa_block.submission_due))
# Open assessment displayed title
title = etree.SubElement(root, 'title')
title.text = unicode(oa_block.title)
# Assessment list
assessments_root = etree.SubElement(root, 'assessments')
serialize_assessments(assessments_root, oa_block)
# Rubric
rubric_root = etree.SubElement(root, 'rubric')
_serialize_rubric(rubric_root, oa_block)
serialize_rubric(rubric_root, oa_block)
def serialize_content(oa_block):
......@@ -541,9 +558,59 @@ def serialize_content(oa_block):
return etree.tostring(root, pretty_print=True, encoding='utf-8')
DEFAULT_VALIDATOR = lambda *args: (True, '')
def serialize_rubric_to_xml_str(oa_block):
"""
Serialize the OpenAssessment XBlock's rubric into an XML string.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize
a rubric from.
Returns:
xml (unicode) representation of the Rubric.
def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
"""
rubric_root = etree.Element('rubric')
serialize_rubric(rubric_root, oa_block)
return etree.tostring(rubric_root, pretty_print=True, encoding='utf-8')
def serialize_examples_to_xml_str(assessment):
"""
Serializes the OpenAssessment XBlock's training examples into an XML unicode
string.
Args:
assessment (dict): Dictionary representation of an Assessment Module's
configuration. If this contains a list of examples, the examples
will be returned serialized.
Returns:
A unicode string of the XML serialized examples.
"""
examples = assessment.get('examples', [])
if not isinstance(examples, list):
examples = []
examples_root = etree.Element('examples')
serialize_training_examples(examples, examples_root)
return etree.tostring(examples_root, pretty_print=True, encoding='utf-8')
def serialize_assessments_to_xml_str(oa_block):
"""
Serializes the OpenAssessment XBlock's assessment modules into an XML
unicode string.
Args:
oa_block (OpenAssessmentBlock
"""
assessments_root = etree.Element('assessments')
serialize_assessments(assessments_root, oa_block)
return etree.tostring(assessments_root, pretty_print=True, encoding='utf-8')
def parse_from_xml(root):
"""
Update the OpenAssessment XBlock's content from an XML definition.
......@@ -551,24 +618,13 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
the XBlock to an invalid state (which will then be persisted).
Args:
oa_block (OpenAssessmentBlock): The open assessment block to update.
root (lxml.etree.Element): The XML definition of the XBlock's content.
Kwargs:
validator(callable): Function of the form:
(rubric_dict, submission_dict, assessments) -> (bool, unicode)
where the returned bool indicates whether the XML is semantically valid,
and the returned unicode is an error message.
`rubric_dict` is a serialized Rubric model
`submission_dict` contains a single key "due" which is an ISO-formatted date string.
`assessments` is a list of serialized Assessment models.
Returns:
OpenAssessmentBlock
A dictionary of all of the XBlock's content.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
ValidationError: The validator indicated that the XML was not semantically valid.
UpdateFromXmlError: The XML definition is invalid
"""
# Check that the root has the correct tag
......@@ -579,7 +635,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
# Set it to None by default; we will update it to the latest start date later on
submission_start = None
if 'submission_start' in root.attrib:
submission_start = _parse_date(unicode(root.attrib['submission_start']))
submission_start = parse_date(unicode(root.attrib['submission_start']))
if submission_start is None:
raise UpdateFromXmlError(_('The format for the submission start date is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
......@@ -587,7 +643,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
# Set it to None by default; we will update it to the earliest deadline later on
submission_due = None
if 'submission_due' in root.attrib:
submission_due = _parse_date(unicode(root.attrib['submission_due']))
submission_due = parse_date(unicode(root.attrib['submission_due']))
if submission_due is None:
raise UpdateFromXmlError(_('The format for the submission due date is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
......@@ -603,59 +659,120 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if rubric_el is None:
raise UpdateFromXmlError(_('Every assessment must contain a "rubric" element.'))
else:
rubric = _parse_rubric_xml(rubric_el)
rubric = parse_rubric_xml(rubric_el)
# Retrieve the assessments
assessments_el = root.find('assessments')
if assessments_el is None:
raise UpdateFromXmlError(_('Every assessment must contain an "assessments" element.'))
else:
assessments = _parse_assessments_xml(assessments_el)
# Validate
success, msg = validator(rubric, {'due': submission_due}, assessments)
if not success:
raise ValidationError(msg)
assessments = parse_assessments_xml(assessments_el)
# If we've gotten this far, then we've successfully parsed the XML
# and validated the contents. At long last, we can safely update the XBlock.
oa_block.title = title
oa_block.prompt = rubric['prompt']
oa_block.rubric_criteria = rubric['criteria']
oa_block.rubric_assessments = assessments
oa_block.rubric_feedback_prompt = rubric['feedbackprompt']
oa_block.submission_start = submission_start
oa_block.submission_due = submission_due
return {
'title': title,
'prompt': rubric['prompt'],
'rubric_criteria': rubric['criteria'],
'rubric_assessments': assessments,
'rubric_feedback_prompt': rubric['feedbackprompt'],
'submission_start': submission_start,
'submission_due': submission_due,
}
return oa_block
def update_from_xml_str(oa_block, xml, **kwargs):
def parse_from_xml_str(xml):
"""
Update the OpenAssessment XBlock's content from an XML string definition.
Parses the string using a library that avoids some known security vulnerabilities in etree.
Create a dictionary for the OpenAssessment XBlock's content from an XML
string definition. Parses the string using a library that avoids some known
security vulnerabilities in etree.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to update.
xml (unicode): The XML definition of the XBlock's content.
Kwargs:
same as `update_from_xml`
Returns:
A dictionary of all configuration values for the XBlock.
Raises:
UpdateFromXmlError: The XML definition is invalid.
InvalidRubricError: The rubric was not semantically valid.
InvalidAssessmentsError: The assessments are not semantically valid.
"""
return parse_from_xml(_unicode_to_xml(xml))
def parse_rubric_xml_str(xml):
"""
Create a dictionary representation of the OpenAssessment XBlock rubric from
the given XML string.
Args:
xml (unicode): The XML definition of the XBlock's rubric.
Returns:
OpenAssessmentBlock
A dictionary of all rubric configuration.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
UpdateFromXmlError: The XML definition is invalid.
InvalidRubricError: The rubric was not semantically valid.
"""
return parse_rubric_xml(_unicode_to_xml(xml))
def parse_assessments_xml_str(xml):
"""
Create a dictionary representation of the OpenAssessment XBlock assessments
from the given XML string.
Args:
xml (unicode): The XML definition of the XBlock's assessments.
Returns:
A list of dictionaries representing the deserialized XBlock
configuration for each assessment module.
Raises:
UpdateFromXmlError: The XML definition is invalid.
InvalidAssessmentsError: The assessments are not semantically valid.
"""
return parse_assessments_xml(_unicode_to_xml(xml))
def parse_examples_xml_str(xml):
"""
Create a list representation of the OpenAssessment XBlock assessment
examples from the given XML string.
Args:
xml (unicode): The XML definition of the Assessment module's examples.
Returns:
A list of dictionaries representing the deserialized XBlock
configuration for each assessment example.
Raises:
UpdateFromXmlError: The XML definition is invalid.
"""
xml = u"<data>" + xml + u"</data>"
return parse_examples_xml(list(_unicode_to_xml(xml)))
def _unicode_to_xml(xml):
"""
Converts unicode string to XML node.
Args:
xml (unicode): The XML definition of some XBlock configuration.
Raises:
UpdateFromXmlError: Raised when the XML definition is invalid.
"""
# Parse the XML content definition
# Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree:
# http://docs.python.org/2/library/xml.html#xml-vulnerabilities
try:
root = safe_etree.fromstring(xml.encode('utf-8'))
return safe_etree.fromstring(xml.encode('utf-8'))
except (ValueError, safe_etree.ParseError):
raise UpdateFromXmlError(_("An error occurred while parsing the XML content."))
return update_from_xml(oa_block, root, **kwargs)
raise UpdateFromXmlError(_("An error occurred while parsing the XML content."))
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment