Commit 30f052e0 by Stephen Sanchez

Merge pull request #409 from edx/sanchez/xml-validation-refactoring

Split XML serialization and validation contracts.
parents 041e5e43 a83b25cf
......@@ -22,7 +22,7 @@ from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
......@@ -324,8 +324,59 @@ class OpenAssessmentBlock(
"""
block = runtime.construct_xblock_from_class(cls, keys)
config = parse_from_xml(node)
rubric = {
"prompt": config["prompt"],
"feedbackprompt": config["rubric_feedback_prompt"],
"criteria": config["rubric_criteria"],
}
xblock_validator = validator(block, strict_post_release=False)
xblock_validator(
rubric,
{'due': config['submission_due']},
config['rubric_assessments']
)
block.update(
config['rubric_criteria'],
config['rubric_feedback_prompt'],
config['rubric_assessments'],
config['submission_due'],
config['submission_start'],
config['title'],
config['prompt']
)
return block
def update(self, criteria, feedback_prompt, assessments, submission_due,
submission_start, title, prompt):
"""
Given a dictionary of properties, update the XBlock
Args:
criteria (list): A list of rubric criteria for this XBlock.
feedback_prompt (str):
assessments (list): A list of assessment module configurations for
this XBlock.
submission_due (str): ISO formatted submission due date.
submission_start (str): ISO formatted submission start date.
title (str): The title of this XBlock
prompt (str): The prompt for this XBlock.
return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
Returns:
None
"""
# If we've gotten this far, then we've successfully parsed the XML
# and validated the contents. At long last, we can safely update the XBlock.
self.title = title
self.prompt = prompt
self.rubric_criteria = criteria
self.rubric_assessments = assessments
self.rubric_feedback_prompt = feedback_prompt
self.submission_start = submission_start
self.submission_due = submission_due
@property
def valid_assessments(self):
......
......@@ -8,7 +8,7 @@ from django.template.loader import get_template
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from xblock.fragment import Fragment
from openassessment.xblock.xml import serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
from openassessment.xblock import xml
from openassessment.xblock.validation import validator
......@@ -37,13 +37,17 @@ class StudioMixin(object):
return frag
@XBlock.json_handler
def update_xml(self, data, suffix=''):
def update_editor_context(self, data, suffix=''):
"""
Update the XBlock's XML.
Update the XBlock's configuration.
Args:
data (dict): Data from the request; should have a value for the key 'xml'
containing the XML for this XBlock.
data (dict): Data from the request; should have a value for the keys
'rubric', 'settings' and 'prompt'. The 'rubric' should be an XML
representation of the new rubric. The 'prompt' should be a plain
text prompt. The 'settings' should be a dict of 'title',
'submission_due', 'submission_start' and the XML configuration for
all 'assessments'.
Kwargs:
suffix (str): Not used
......@@ -51,26 +55,43 @@ class StudioMixin(object):
Returns:
dict with keys 'success' (bool) and 'msg' (str)
"""
if 'xml' in data:
try:
update_from_xml_str(self, data['xml'], validator=validator(self))
except ValidationError as ex:
return {'success': False, 'msg': _('Validation error: {error}').format(error=ex)}
except UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)}
else:
return {'success': True, 'msg': _('Successfully updated OpenAssessment XBlock')}
missing_keys = list({'rubric', 'settings', 'prompt'} - set(data.keys()))
if missing_keys:
logger.warn(
'Must specify the following keys in request JSON dict: {}'.format(missing_keys)
)
return {'success': False, 'msg': _('Error updating XBlock configuration')}
settings = data['settings']
try:
else:
return {'success': False, 'msg': _('Must specify "xml" in request JSON dict.')}
rubric = xml.parse_rubric_xml_str(data['rubric'])
assessments = xml.parse_assessments_xml_str(settings['assessments'])
submission_due = settings["submission_due"]
except xml.UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)}
xblock_validator = validator(self)
success, msg = xblock_validator(rubric, {'due': submission_due}, assessments)
if not success:
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update(
rubric['criteria'],
rubric['feedbackprompt'],
assessments,
settings["submission_due"],
settings["submission_start"],
settings["title"],
data["prompt"]
)
return {'success': True, 'msg': 'Successfully updated OpenAssessment XBlock'}
@XBlock.json_handler
def xml(self, data, suffix=''):
def editor_context(self, data, suffix=''):
"""
Retrieve the XBlock's content definition, serialized as XML.
Retrieve the XBlock's content definition, serialized as a JSON object
containing all the configuration as it will be displayed for studio
editing.
Args:
data (dict): Not used
......@@ -79,19 +100,37 @@ class StudioMixin(object):
suffix (str): Not used
Returns:
dict with keys 'success' (bool), 'message' (unicode), and 'xml' (unicode)
dict with keys 'success' (bool), 'message' (unicode),
'rubric' (unicode), 'prompt' (unicode), and 'settings' (dict)
"""
try:
xml = serialize_content(self)
# We do not expect `serialize_content` to raise an exception,
assessments = xml.serialize_assessments_to_xml_str(self)
rubric = xml.serialize_rubric_to_xml_str(self)
# We do not expect serialization to raise an exception,
# but if it does, handle it gracefully.
except Exception as ex:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex)
logger.error(msg)
return {'success': False, 'msg': msg, 'xml': u''}
else:
return {'success': True, 'msg': '', 'xml': xml}
# Populates the context for the assessments section of the editing
# panel. This will adjust according to the fields laid out in this
# section.
settings = {
'submission_due': self.submission_due,
'submission_start': self.submission_start,
'title': self.title,
'assessments': assessments
}
return {
'success': True,
'msg': '',
'rubric': rubric,
'prompt': self.prompt,
'settings': settings
}
@XBlock.json_handler
def check_released(self, data, suffix=''):
......
<openassessment>
<title>Foo</title>
<assessments>
<!-- assessment name not supported -->
<assessment name="unsupported-assessment" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<option points="0"><name>No</name><explanation>No explanation</explanation></option>
<option points="2"><name>Yes</name><explanation>Yes explanation</explanation></option>
</criterion>
</rubric>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<!-- start date is after due date -->
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2010-01-01" due="2003-01-01"/>
</assessments>
</openassessment>
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<!-- no options -->",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28"
},
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt"
}
}
\ No newline at end of file
<openassessment>
<title>Foo</title>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<!-- no options -->
</criterion>
</rubric>
</openassessment>
{
"no_rubric": {
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28"
},
"expected_error": "error"
},
"no_prompt": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "2014-02-27T09:46:28",
"submission_start": "2014-02-10T09:46:28"
},
"expected_error": "error"
},
"no_settings": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"expected_error": "error"
},
"invalid_dates": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2010-01-01\" due=\"2003-01-01\"/>",
"</assessments>"
],
"submission_due": "2012-02-27T09:46:28",
"submission_start": "2015-02-10T09:46:28"
},
"expected_error": "cannot be later"
}
}
\ No newline at end of file
{
"simple": {
"xml": [
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>"
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00"
}
]
},
"multiple_criteria": {
"xml": [
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>"
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"no_dates_specified": {
"xml": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>"
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"student_training_no_examples": {
"xml": [
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>"
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": []
}
]
},
"student_training_one_example": {
"xml": [
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>"
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
},
"student_training_multiple_examples": {
"xml": [
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"</assessment>",
"</assessments>"
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
}
]
}
}
{
"student_training_one_example": {
"xml": [
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>"
],
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
},
"student_training_multiple_examples": {
"xml": [
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>"
],
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
}
}
{
"simple": {
"rubric": [
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>"
],
"prompt": "My new prompt.",
"settings": {
"title": "My new title.",
"assessments": [
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>"
],
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28"
},
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt"
}
}
\ No newline at end of file
......@@ -7,7 +7,8 @@ import datetime as dt
import lxml.etree as etree
import mock
import pytz
from ddt import ddt, data
from ddt import ddt, data, file_data
from openassessment.xblock.xml import UpdateFromXmlError
from .base import scenario, XBlockHandlerTestCase
......@@ -23,92 +24,92 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(frag.body_html().find('openassessment-edit'))
@scenario('data/basic_scenario.xml')
def test_get_xml(self, xblock):
resp = self.request(xblock, 'xml', '""', response_format='json')
def test_get_editor_context(self, xblock):
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
# Verify that the XML is parseable and the root is <openassessment>
root = etree.fromstring(resp['xml'])
self.assertEqual(root.tag, 'openassessment')
# Verify that the Rubric XML is parse-able and the root is <rubric>
rubric = etree.fromstring(resp['rubric'])
self.assertEqual(rubric.tag, 'rubric')
@mock.patch('openassessment.xblock.studio_mixin.serialize_content')
assessments = etree.fromstring(resp['settings']['assessments'])
self.assertEqual(assessments.tag, 'assessments')
@mock.patch('openassessment.xblock.xml.serialize_rubric_to_xml_str')
@scenario('data/basic_scenario.xml')
def test_get_xml_error(self, xblock, mock_serialize):
def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock
mock_serialize.side_effect = ValueError('Test error!')
mock_rubric_serializer.side_effect = UpdateFromXmlError('Test error!')
# Check that we get a failure message
resp = self.request(xblock, 'xml', '""', response_format='json')
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xml(self, xblock):
# Set the XBlock's release date to the future,
# so we are not restricted in what we can edit
xblock.start = dt.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
request = json.dumps({'xml': self.load_fixture_str('data/updated_block.xml')})
def test_update_xblock(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
xblock.published_date = None
# Test that we can update the xblock with the expected configuration.
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_xml', request, response_format='json')
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
print "ERROR IS {}".format(resp['msg'])
self.assertTrue(resp['success'])
self.assertIn('success', resp['msg'].lower())
# Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, u'Foo')
self.assertEqual(xblock.prompt, u'Test prompt')
self.assertEqual(xblock.rubric_assessments[0]['name'], 'peer-assessment')
self.assertEqual(xblock.rubric_criteria[0]['prompt'], 'Test criterion prompt')
self.assertEqual(xblock.title, data['settings']['title'])
self.assertEqual(xblock.prompt, data['prompt'])
self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xml_post_release(self, xblock):
def test_update_context_post_release(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
# XBlock start date defaults to already open,
# so we should get an error when trying to update anything that change the number of points
request = json.dumps({'xml': self.load_fixture_str('data/updated_block.xml')})
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_xml', request, response_format='json')
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
@file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_request_data(self, xblock):
resp = self.request(xblock, 'update_xml', json.dumps({}), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('xml', resp['msg'].lower())
def test_update_context_invalid_request_data(self, xblock, data):
# First, parse XML data into a single string.
if 'rubric' in data:
data['rubric'] = "".join(data['rubric'])
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_date_format(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_dates.xml')})
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("cannot be later than", resp['msg'].lower())
if 'settings' in data and 'assessments' in data['settings']:
data['settings']['assessments'] = "".join(data['settings']['assessments'])
# Test that we enforce that there are exactly two assessments,
# peer ==> self
# If and when we remove this restriction, this test can be deleted.
@data('data/invalid_assessment_combo_order.xml')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock, invalid_workflow):
request = json.dumps(
{'xml': self.load_fixture_str(invalid_workflow)}
)
resp = self.request(xblock, 'update_xml', request, response_format='json')
xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertFalse(resp['success'])
self.assertIn("for this assignment", resp['msg'].lower())
self.assertIn(data['expected_error'], resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid(self, xblock, data):
xml_path = data[0]
expected_msg = data[1]
def test_update_rubric_invalid(self, xblock, data):
# First, parse XML data into a single string.
data['rubric'] = "".join(data['rubric'])
data['settings']['assessments'] = "".join(data['settings']['assessments'])
request = json.dumps({'xml': self.load_fixture_str(xml_path)})
request = json.dumps(data)
# Store old XBlock fields for later verification
old_title = xblock.title
......@@ -117,9 +118,9 @@ class StudioViewTest(XBlockHandlerTestCase):
old_criteria = xblock.rubric_criteria
# Verify the response fails
resp = self.request(xblock, 'update_xml', request, response_format='json')
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn(expected_msg, resp['msg'].lower())
self.assertIn("not valid", resp['msg'].lower())
# Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests
......
......@@ -12,7 +12,10 @@ from django.test import TestCase
import ddt
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.xml import (
serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
serialize_content, parse_from_xml_str, parse_rubric_xml_str,
parse_examples_xml_str, parse_assessments_xml_str,
serialize_rubric_to_xml_str, serialize_examples_to_xml_str,
serialize_assessments_to_xml_str, UpdateFromXmlError
)
......@@ -91,8 +94,8 @@ class TestSerializeContent(TestCase):
"""
self.oa_block = mock.MagicMock(OpenAssessmentBlock)
@ddt.file_data('data/serialize.json')
def test_serialize(self, data):
def _configure_xblock(self, data):
self.oa_block.title = data['title']
self.oa_block.prompt = data['prompt']
self.oa_block.rubric_feedback_prompt = data['rubric_feedback_prompt']
......@@ -102,6 +105,10 @@ class TestSerializeContent(TestCase):
self.oa_block.submission_due = data['submission_due']
self.oa_block.rubric_criteria = data['criteria']
self.oa_block.rubric_assessments = data['assessments']
@ddt.file_data('data/serialize.json')
def test_serialize(self, data):
self._configure_xblock(data)
xml = serialize_content(self.oa_block)
# Compare the XML with our expected output
......@@ -144,6 +151,23 @@ class TestSerializeContent(TestCase):
)
)
@ddt.file_data('data/serialize.json')
def test_serialize_rubric(self, data):
self._configure_xblock(data)
xml_str = serialize_rubric_to_xml_str(self.oa_block)
@ddt.file_data('data/serialize.json')
def test_serialize_examples(self, data):
self._configure_xblock(data)
for assessment in data['assessments']:
if 'student-training' == assessment['name']:
xml_str = serialize_examples_to_xml_str(assessment)
@ddt.file_data('data/serialize.json')
def test_serialize_assessments(self, data):
self._configure_xblock(data)
xml_str = serialize_assessments_to_xml_str(self.oa_block)
def test_mutated_criteria_dict(self):
self.oa_block.title = "Test title"
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
......@@ -300,6 +324,36 @@ class TestSerializeContent(TestCase):
mutated[key] = new_val
return mutated
@ddt.ddt
class TestParseRubricFromXml(TestCase):
@ddt.file_data("data/parse_rubric_xml.json")
def test_parse_rubric_from_xml(self, data):
rubric = parse_rubric_xml_str("".join(data['xml']))
self.assertEqual(rubric['prompt'], data['prompt'])
self.assertEqual(rubric['feedbackprompt'], data['feedbackprompt'])
self.assertEqual(rubric['criteria'], data['criteria'])
@ddt.ddt
class TestParseExamplesFromXml(TestCase):
@ddt.file_data("data/parse_examples_xml.json")
def test_parse_examples_from_xml(self, data):
examples = parse_examples_xml_str("".join(data['xml']))
self.assertEqual(examples, data['examples'])
@ddt.ddt
class TestParseAssessmentsFromXml(TestCase):
@ddt.file_data("data/parse_assessments_xml.json")
def test_parse_assessments_from_xml(self, data):
assessments = parse_assessments_xml_str("".join(data['xml']))
self.assertEqual(assessments, data['assessments'])
@ddt.ddt
class TestUpdateFromXml(TestCase):
......@@ -324,36 +378,20 @@ class TestUpdateFromXml(TestCase):
self.oa_block.submission_due = "2000-01-01T00:00:00"
@ddt.file_data('data/update_from_xml.json')
def test_update_from_xml(self, data):
def test_parse_from_xml(self, data):
# Update the block based on the fixture XML definition
returned_block = update_from_xml_str(self.oa_block, "".join(data['xml']))
# The block we passed in should be updated and returned
self.assertEqual(self.oa_block, returned_block)
config = parse_from_xml_str("".join(data['xml']))
# Check that the contents of the modified XBlock are correct
self.assertEqual(self.oa_block.title, data['title'])
self.assertEqual(self.oa_block.prompt, data['prompt'])
self.assertEqual(self.oa_block.start, _parse_date(data['start']))
self.assertEqual(self.oa_block.due, _parse_date(data['due']))
self.assertEqual(self.oa_block.submission_start, data['submission_start'])
self.assertEqual(self.oa_block.submission_due, data['submission_due'])
self.assertEqual(self.oa_block.rubric_criteria, data['criteria'])
self.assertEqual(self.oa_block.rubric_assessments, data['assessments'])
self.assertEqual(config['title'], data['title'])
self.assertEqual(config['prompt'], data['prompt'])
self.assertEqual(config['submission_start'], data['submission_start'])
self.assertEqual(config['submission_due'], data['submission_due'])
self.assertEqual(config['rubric_criteria'], data['criteria'])
self.assertEqual(config['rubric_assessments'], data['assessments'])
@ddt.file_data('data/update_from_xml_error.json')
def test_update_from_xml_error(self, data):
def test_parse_from_xml_error(self, data):
with self.assertRaises(UpdateFromXmlError):
update_from_xml_str(self.oa_block, "".join(data['xml']))
@ddt.file_data('data/update_from_xml.json')
def test_invalid(self, data):
# Plug in a rubric validator that always reports that the rubric dict is invalid.
# We need to back this up with an integration test that checks whether the XBlock
# provides an appropriate rubric validator.
with self.assertRaises(ValidationError):
update_from_xml_str(
self.oa_block, "".join(data['xml']),
validator=lambda *args: (False, '')
)
parse_from_xml_str("".join(data['xml']))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment