Commit 1ef7ddda by Will Daly

Merge pull request #57 from edx/will/studio-integration-full

Add editing view for Open Assessment block in Studio
parents b7417383 4f63fb69
...@@ -30,6 +30,7 @@ pip-log.txt ...@@ -30,6 +30,7 @@ pip-log.txt
.coverage .coverage
.tox .tox
nosetests.xml nosetests.xml
htmlcov
# Translations # Translations
*.mo *.mo
......
{% load i18n %}
<div id="openassessment-edit">
<textarea class="openassessment-editor"></textarea>
<input type="button" class="openassessment-save-button" value="{% trans 'Save' %}"/>
<input type="button" class="openassessment-cancel-button" value="{% trans 'Cancel' %}"/>
</div>
...@@ -54,7 +54,6 @@ ...@@ -54,7 +54,6 @@
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post"> <form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="assessment__fields"> <fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields assessment__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
......
...@@ -16,6 +16,7 @@ from xblock.fragment import Fragment ...@@ -16,6 +16,7 @@ from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from scenario_parser import ScenarioParser from scenario_parser import ScenarioParser
...@@ -33,105 +34,80 @@ DEFAULT_PROMPT = """ ...@@ -33,105 +34,80 @@ DEFAULT_PROMPT = """
movies, magazines, etc., should be removed from the shelves if they are movies, magazines, etc., should be removed from the shelves if they are
found offensive? Support your position with convincing arguments from your found offensive? Support your position with convincing arguments from your
own experience, observations, and/or reading. own experience, observations, and/or reading.
"""
DEFAULT_RUBRIC_INSTRUCTIONS = "Read for conciseness, clarity of thought, and form." Read for conciseness, clarity of thought, and form.
"""
DEFAULT_RUBRIC_CRITERIA = [ DEFAULT_RUBRIC_CRITERIA = [
{ {
'name': "Ideas", 'name': "Ideas",
'instructions': "Determine if there is a unifying theme or main idea.", 'prompt': "Determine if there is a unifying theme or main idea.",
'total_value': 5, 'order_num': 0,
'options': [ 'options': [
(0, "Poor", """Difficult for the reader to discern the main idea. {
Too brief or too repetitive to establish or maintain a focus.""",), 'order_num': 0, 'points': 0, 'name': 'Poor',
(3, "Fair", """Presents a unifying theme or main idea, but may 'explanation': """Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus."""
},
{
'order_num': 1, 'points': 3, 'name': 'Fair',
'explanation': """Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and include minor tangents. Stays somewhat focused on topic and
task.""",), task."""
(5, "Good", """Presents a unifying theme or main idea without going },
off on tangents. Stays completely focused on topic and task.""",), {
'order_num': 2, 'points': 5, 'name': 'Good',
'explanation': """Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task."""
},
], ],
}, },
{ {
'name': "Content", 'name': "Content",
'instructions': "Assess the content of the submission", 'prompt': "Assess the content of the submission",
'total_value': 5, 'order_num': 0,
'options': [ 'options': [
(0, "Poor", """Includes little information with few or no details or {
'order_num': 0, 'points': 0, 'name': 'Poor',
'explanation': """Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any unrelated details. Unsuccessful in attempts to explore any
facets of the topic.""",), facets of the topic."""
(1, "Fair", """Includes little information and few or no details. },
Explores only one or two facets of the topic.""",), {
(3, "Good", """Includes sufficient information and supporting 'order_num': 0, 'points': 1, 'name': 'Fair',
'explanation': """Includes little information and few or no details.
Explores only one or two facets of the topic."""
},
{
'order_num': 0, 'points': 3, 'name': 'Good',
'explanation': """Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.""",), listed.) Explores some facets of the topic."""
(5, "Excellent", """Includes in-depth information and exceptional },
{
'order_num': 0, 'points': 3, 'name': 'Excellent',
'explanation': """Includes in-depth information and exceptional
supporting details that are fully developed. Explores all supporting details that are fully developed. Explores all
facets of the topic.""",), facets of the topic."""
], },
},
{
'name': "Organization",
'instructions': "Determine if the submission is well organized.",
'total_value': 2,
'options': [
(0, "Poor", """Ideas organized illogically, transitions weak, and
response difficult to follow.""",),
(1, "Fair", """Attempts to logically organize ideas. Attempts to
progress in an order that enhances meaning, and demonstrates use
of transitions.""",),
(2, "Good", """Ideas organized logically. Progresses in an order
that enhances meaning. Includes smooth transitions.""",),
],
},
{
'name': "Style",
'instructions': "Read for style.",
'total_value': 2,
'options': [
(0, "Poor", """Contains limited vocabulary, with many words used
incorrectly. Demonstrates problems with sentence patterns.""",),
(1, "Fair", """Contains basic vocabulary, with words that are
predictable and common. Contains mostly simple sentences
(although there may be an attempt at more varied sentence
patterns).""",),
(2, "Good", """Includes vocabulary to make explanations detailed and
precise. Includes varied sentence patterns, including complex
sentences.""",),
], ],
}, },
{
'name': "Voice",
'instructions': "Read for style.",
'total_value': 2,
'options': [
(0, "Poor", """Demonstrates language and tone that may be
inappropriate to task and reader.""",),
(1, "Fair", """Demonstrates an attempt to adjust language and tone
to task and reader.""",),
(2, "Good", """Demonstrates effective adjustment of language and
tone to task and reader.""",),
],
}
] ]
UI_MODELS = { UI_MODELS = {
"submission": { "submission": {
"assessment_type": "submission",
"name": "submission", "name": "submission",
"class_id": "openassessment__response", "class_id": "openassessment__response",
"navigation_text": "Your response to this problem", "navigation_text": "Your response to this problem",
"title": "Your Response" "title": "Your Response"
}, },
"peer-assessment": { "peer-assessment": {
"assessment_type": "peer-assessment",
"name": "peer-assessment", "name": "peer-assessment",
"class_id": "openassessment__peer-assessment", "class_id": "openassessment__peer-assessment",
"navigation_text": "Your assessment(s) of peer responses", "navigation_text": "Your assessment(s) of peer responses",
"title": "Assess Peers' Responses" "title": "Assess Peers' Responses"
}, },
"self-assessment": { "self-assessment": {
"assessment_type": "self-assessment",
"name": "self-assessment", "name": "self-assessment",
"class_id": "openassessment__self-assessment", "class_id": "openassessment__self-assessment",
"navigation_text": "Your assessment of your response", "navigation_text": "Your assessment of your response",
...@@ -145,7 +121,6 @@ configured. If no configuration is specified, this is the default assessment ...@@ -145,7 +121,6 @@ configured. If no configuration is specified, this is the default assessment
module(s) associated with the XBlock. module(s) associated with the XBlock.
""" """
DEFAULT_PEER_ASSESSMENT = { DEFAULT_PEER_ASSESSMENT = {
"assessment_type": "peer-assessment",
"name": "peer-assessment", "name": "peer-assessment",
"start_datetime": datetime.datetime.now().isoformat(), "start_datetime": datetime.datetime.now().isoformat(),
"must_grade": 5, "must_grade": 5,
...@@ -164,7 +139,7 @@ def load(path): ...@@ -164,7 +139,7 @@ def load(path):
return data.decode("utf8") return data.decode("utf8")
class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAssessmentMixin): class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAssessmentMixin, StudioMixin):
"""Displays a question and gives an area where students can compose a response.""" """Displays a question and gives an area where students can compose a response."""
start_datetime = String( start_datetime = String(
...@@ -172,6 +147,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -172,6 +147,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
scope=Scope.content, scope=Scope.content,
help="ISO-8601 formatted string representing the start date of this assignment." help="ISO-8601 formatted string representing the start date of this assignment."
) )
due_datetime = String( due_datetime = String(
default=None, default=None,
scope=Scope.content, scope=Scope.content,
...@@ -183,31 +159,25 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -183,31 +159,25 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
scope=Scope.content, scope=Scope.content,
help="A title to display to a student (plain text)." help="A title to display to a student (plain text)."
) )
prompt = String( prompt = String(
default=DEFAULT_PROMPT, default=DEFAULT_PROMPT,
scope=Scope.content, scope=Scope.content,
help="A prompt to display to a student (plain text)." help="A prompt to display to a student (plain text)."
) )
rubric = List(
default=[],
scope=Scope.content,
help="Instructions and criteria for students giving feedback."
)
rubric_instructions = String(
default=DEFAULT_RUBRIC_INSTRUCTIONS,
scope=Scope.content,
help="Instructions for self and peer assessment."
)
rubric_criteria = List( rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA, default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content, scope=Scope.content,
help="The different parts of grading for students giving feedback." help="The different parts of grading for students giving feedback."
) )
rubric_assessments = List( rubric_assessments = List(
default=DEFAULT_ASSESSMENT_MODULES, default=DEFAULT_ASSESSMENT_MODULES,
scope=Scope.content, scope=Scope.content,
help="The requested set of assessments and the order in which to apply them." help="The requested set of assessments and the order in which to apply them."
) )
course_id = String( course_id = String(
default=u"TestCourse", default=u"TestCourse",
scope=Scope.content, scope=Scope.content,
...@@ -268,7 +238,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -268,7 +238,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
"xblock_trace": trace, "xblock_trace": trace,
"title": self.title, "title": self.title,
"question": self.prompt, "question": self.prompt,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria, "rubric_criteria": self.rubric_criteria,
"rubric_assessments": ui_models, "rubric_assessments": ui_models,
"grade_state": grade_state, "grade_state": grade_state,
...@@ -293,7 +262,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -293,7 +262,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
""" """
ui_models = [UI_MODELS["submission"]] ui_models = [UI_MODELS["submission"]]
for assessment in self.rubric_assessments: for assessment in self.rubric_assessments:
ui_model = UI_MODELS[assessment["assessment_type"]] ui_model = UI_MODELS[assessment["name"]]
ui_models.append(dict(assessment, **ui_model)) ui_models.append(dict(assessment, **ui_model))
return ui_models return ui_models
...@@ -316,23 +285,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -316,23 +285,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
), ),
] ]
@staticmethod
def studio_view(context=None):
"""Determines how the XBlock is rendered for editing in Studio.
Displays the section where Editing can occur within Studio to modify
this XBlock instance.
Args:
context: Not actively used for this view.
Returns:
(Fragment): An HTML fragment for editing the configuration of this
XBlock.
"""
return Fragment(u"<div>Edit the XBlock.</div>")
@classmethod @classmethod
def parse_xml(cls, node, runtime, keys, id_generator): def parse_xml(cls, node, runtime, keys, id_generator):
"""Instantiate XBlock object from runtime XML definition. """Instantiate XBlock object from runtime XML definition.
......
...@@ -72,7 +72,6 @@ class PeerAssessmentMixin(object): ...@@ -72,7 +72,6 @@ class PeerAssessmentMixin(object):
peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment) peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment)
context_dict = { context_dict = {
"peer_submission": peer_sub, "peer_submission": peer_sub,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria "rubric_criteria": self.rubric_criteria
} }
return self.render_assessment('openassessmentblock/oa_peer_assessment.html', context_dict) return self.render_assessment('openassessmentblock/oa_peer_assessment.html', context_dict)
......
...@@ -69,7 +69,7 @@ class ScenarioParser(object): ...@@ -69,7 +69,7 @@ class ScenarioParser(object):
) )
rubric_criteria.append(crit) rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria) return rubric_criteria
def get_assessments(self, assessments): def get_assessments(self, assessments):
"""<assessments> """<assessments>
...@@ -113,8 +113,7 @@ class ScenarioParser(object): ...@@ -113,8 +113,7 @@ class ScenarioParser(object):
if child.tag == 'prompt': if child.tag == 'prompt':
self.xblock.prompt = self.get_prompt(child) self.xblock.prompt = self.get_prompt(child)
elif child.tag == 'rubric': elif child.tag == 'rubric':
(self.xblock.rubric_instructions, self.xblock.rubric_criteria = self.get_rubric(child)
self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'title': elif child.tag == 'title':
self.xblock.title = self.get_title(child) self.xblock.title = self.get_title(child)
elif child.tag == 'assessments': elif child.tag == 'assessments':
......
/* JavaScript for Studio editing view of Open Assessment XBlock */
function OpenAssessmentBlock(runtime, element) {
function displayError(errorMsg) {
runtime.notify('error', {msg: errorMsg});
}
// Update editor with the XBlock's current content
function updateEditorFromXBlock(editor) {
$.ajax({
type: "POST",
url: runtime.handlerUrl(element, 'xml'),
data: "\"\"",
success: function(data) {
if (data.success) {
editor.setValue(data.xml);
}
else {
displayError(data.msg);
}
}
});
}
function initializeEditor() {
var textAreas = $(element).find('.openassessment-editor');
if (textAreas.length < 1) {
console.warn("Could not find element for OpenAssessmentBlock XML editor");
return null;
}
else {
return CodeMirror.fromTextArea(
textAreas[0], {mode: "xml", lineNumbers: true, lineWrapping: true}
);
}
}
function initializeSaveButton(editor) {
saveButtons = $(element).find('.openassessment-save-button');
if (saveButtons.length < 1) {
console.warn("Could not find element for OpenAssessmentBlock save button");
}
else {
saveButtons.click(function (eventObject) {
// Notify the client-side runtime that we are starting
// to save so it can show the "Saving..." notification
runtime.notify('save', {state: 'start'});
// POST the updated description to the XBlock
// The server-side code is responsible for validating and persisting
// the updated content.
$.ajax({
type: "POST",
url: runtime.handlerUrl(element, 'update_xml'),
data: JSON.stringify({ xml: editor.getValue() }),
success: function(data) {
// Notify the client-side runtime that we finished saving
// so it can hide the "Saving..." notification.
if (data.success) {
runtime.notify('save', {state: 'end'});
}
// Display an error alert if any errors occurred
else {
displayError(data.msg);
}
}
});
});
}
}
function initializeCancelButton(editor) {
cancelButtons = $(element).find('.openassessment-cancel-button');
if (cancelButtons.length < 1) {
console.warn("Could not find element for OpenAssessmentBlock cancel button");
}
else {
cancelButtons.click(function (eventObject) {
// Revert to the XBlock's current content
updateEditorFromXBlock(editor);
// Notify the client-side runtime so it will close the editing modal.
runtime.notify('cancel', {});
});
}
}
$(function ($) {
editor = initializeEditor();
if (editor) {
updateEditorFromXBlock(editor);
initializeSaveButton(editor);
initializeCancelButton(editor);
}
});
}
"""
Studio editing view for OpenAssessment XBlock.
"""
import pkg_resources
import logging
import dateutil.parser
from django.template.context import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from xblock.fragment import Fragment
from openassessment.xblock.xml import (
serialize_content, update_from_xml,
UpdateFromXmlError, InvalidRubricError
)
from openassessment.peer.serializers import (
rubric_from_dict, AssessmentSerializer, InvalidRubric
)
logger = logging.getLogger(__name__)
class StudioMixin(object):
"""
Studio editing view for OpenAssessment XBlock.
"""
def studio_view(self, context=None):
"""
Render the OpenAssessment XBlock for editing in Studio.
Args:
context: Not actively used for this view.
Returns:
(Fragment): An HTML fragment for editing the configuration of this XBlock.
"""
rendered_template = get_template('openassessmentblock/oa_edit.html').render(Context({}))
frag = Fragment(rendered_template)
frag.add_javascript(pkg_resources.resource_string(__name__, "static/js/src/oa_edit.js"))
frag.initialize_js('OpenAssessmentBlock')
return frag
@XBlock.json_handler
def update_xml(self, data, suffix=''):
"""
Update the XBlock's XML.
Args:
data (dict): Data from the request; should have a value for the key 'xml'
containing the XML for this XBlock.
Kwargs:
suffix (str): Not used
Returns:
dict with keys 'success' (bool) and 'msg' (str)
"""
if 'xml' in data:
try:
update_from_xml(
self, data['xml'],
rubric_validator=self._validate_rubric,
assessment_validator=self._validate_assessment
)
except InvalidRubricError:
return {'success': False, 'msg': _('Rubric definition was not valid.')}
except UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex.message)}
else:
return {'success': True, 'msg': _('Successfully updated OpenAssessment XBlock')}
else:
return {'success': False, 'msg': _('Must specify "xml" in request JSON dict.')}
@XBlock.json_handler
def xml(self, data, suffix=''):
"""
Retrieve the XBlock's content definition, serialized as XML.
Args:
data (dict): Not used
Kwargs:
suffix (str): Not used
Returns:
dict with keys 'success' (bool), 'message' (unicode), and 'xml' (unicode)
"""
try:
xml = serialize_content(self)
# We do not expect `serialize_content` to raise an exception,
# but if it does, handle it gracefully.
except Exception as ex:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex.message)
logger.error(msg)
return {'success': False, 'msg': msg, 'xml': u''}
else:
return {'success': True, 'msg': '', 'xml': xml}
def _validate_rubric(self, rubric_dict):
"""
Check that the rubric is semantically valid.
Args:
rubric_dict (dict): Serialized Rubric model from the peer grading app.
Returns:
boolean indicating whether the rubric is semantically valid.
"""
try:
rubric_from_dict(rubric_dict)
except InvalidRubric as ex:
return (False, ex.message)
else:
return (True, u'')
def _validate_assessment(self, assessment_dict):
"""
Check that the assessment is semantically valid.
Args:
assessment (dict): Serialized Assessment model from the peer grading app.
Returns:
boolean indicating whether the assessment is semantically valid.
"""
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported"))
# Number you need to grade is >= the number of people that need to grade you
if assessment_dict.get('must_grade') < assessment_dict.get('must_be_graded_by'):
return (False, _('"must_grade" should be less than "must_be_graded_by"'))
# Due date is after start date, if both are specified.
start_datetime = assessment_dict.get('start_datetime')
due_datetime = assessment_dict.get('due_datetime')
if start_datetime is not None and due_datetime is not None:
start = dateutil.parser.parse(assessment_dict.get('start_datetime'))
due = dateutil.parser.parse(assessment_dict.get('due_datetime'))
if start > due:
return (False, _('Due date must be after start date'))
return (True, u'')
...@@ -156,7 +156,6 @@ class SubmissionMixin(object): ...@@ -156,7 +156,6 @@ class SubmissionMixin(object):
assessment_ui_model["must_be_graded_by"] assessment_ui_model["must_be_graded_by"]
) )
context["peer_assessments"] = assessments context["peer_assessments"] = assessments
context["rubric_instructions"] = self.rubric_instructions
context["rubric_criteria"] = self.rubric_criteria context["rubric_criteria"] = self.rubric_criteria
for criterion in context["rubric_criteria"]: for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]] criterion["median_score"] = median_scores[criterion["name"]]
......
{
"simple": {
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start_datetime": "2014-04-01T00:00:00",
"due_datetime": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"expected_xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"unicode": {
"title": "ƒσσ",
"prompt": "Ṫëṡẗ ṗṛöṁṗẗ",
"criteria": [
{
"order_num": 0,
"name": "ՇﻉรՇ ƈɼٱՇﻉɼٱѻก",
"prompt": "TɘꙅT ↄᴙiTɘᴙioᴎ qᴙomqT",
"options": [
{
"order_num": 0,
"points": 0,
"name": "ℕ𝕠",
"explanation": "ℕ𝕠 𝕖𝕩𝕡𝕝𝕒𝕟𝕒𝕥𝕚𝕠𝕟"
},
{
"order_num": 1,
"points": 2,
"name": "Чэѕ",
"explanation": "Чэѕ эхрlаиатіои"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"expected_xml": [
"<openassessmentblock>",
"<title>ƒσσ</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Ṫëṡẗ ṗṛöṁṗẗ</prompt>",
"<criterion>",
"<name>ՇﻉรՇ ƈɼٱՇﻉɼٱѻก</name>",
"<prompt>TɘꙅT ↄᴙiTɘᴙioᴎ qᴙomqT</prompt>",
"<option points=\"0\"><name>ℕ𝕠</name><explanation>ℕ𝕠 𝕖𝕩𝕡𝕝𝕒𝕟𝕒𝕥𝕚𝕠𝕟</explanation></option>",
"<option points=\"2\"><name>Чэѕ</name><explanation>Чэѕ эхрlаиатіои</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"reverse_option_order": {
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 2,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 0,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"expected_xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"reverse_criteria_order": {
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 2,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 2,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"expected_xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"default_dates": {
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": null,
"due_datetime": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start_datetime": "2014-04-01T00:00:00",
"due_datetime": null,
"must_grade": 5,
"must_be_graded_by": 3
}
],
"expected_xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
}
}
{
"simple": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"1\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
],
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start_datetime": "2014-04-01T00:00:00",
"due_datetime": "2014-06-01T00:00:00",
"must_grade": 2,
"must_be_graded_by": 1
}
]
},
"unicode": {
"xml": [
"<openassessmentblock>",
"<title>िѻѻ</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>ՇєรՇ קг๏๓קՇ</prompt>",
"<criterion>",
"<name>𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷</name>",
"<prompt>Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ</prompt>",
"<option points=\"0\"><name>𝕹𝖔</name><explanation>𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓</explanation></option>",
"<option points=\"2\"><name>ﻉร</name><explanation>ﻉร ﻉซρɭคกคՇٱѻก</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
],
"title": "िѻѻ",
"prompt": "ՇєรՇ קг๏๓קՇ",
"criteria": [
{
"order_num": 0,
"name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷",
"prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ",
"options": [
{
"order_num": 0,
"points": 0,
"name": "𝕹𝖔",
"explanation": "𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓"
},
{
"order_num": 1,
"points": 2,
"name": "ﻉร",
"explanation": "ﻉร ﻉซρɭคกคՇٱѻก"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"multiple_criteria": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<criterion>",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
],
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"no_dates_specified": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
],
"title": "Foo",
"prompt": "Test prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start_datetime": null,
"due_datetime": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
}
}
{
"empty_string": {"xml": [""]},
"invalid_syntax": {"xml": ["<openassessmentblock><div>no closing tag</openassessmentblock>"]},
"missing_root": {"xml": "<div>Incorrect</div>"},
"missing_assessment_name": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"missing_title": {
"xml": [
"<openassessmentblock>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"missing_rubric": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"</openassessmentblock>"
]
},
"missing_assessments": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"non_numeric_points": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"non-numeric\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"non_numeric_must_grade": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"non-numeric\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"5\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"non_numeric_must_be_graded_by": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"non-numeric\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"5\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"invalid_start_date": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"non-date\" due=\"2014-03-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"5\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"5\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
},
"invalid_due_date": {
"xml": [
"<openassessmentblock>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-03-01T00:00:00\" due=\"non-date\" must_grade=\"2\" must_be_graded_by=\"5\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"5\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
]
}
}
...@@ -53,7 +53,7 @@ RUBRIC_CONFIG = """ ...@@ -53,7 +53,7 @@ RUBRIC_CONFIG = """
due="2014-12-21T22:22" due="2014-12-21T22:22"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<self-assessment/> <self-assessment name="self-assessment"/>
</assessments> </assessments>
</openassessment> </openassessment>
""" """
......
...@@ -43,10 +43,9 @@ class TestScenarioParser(TestCase): ...@@ -43,10 +43,9 @@ class TestScenarioParser(TestCase):
cit=criterion_prompt_text, cit=criterion_prompt_text,
coet=criterion_option_explain_text) coet=criterion_option_explain_text)
rubric_xml = etree.fromstring(rubric_text) rubric_xml = etree.fromstring(rubric_text)
rubric_prompt, rubric_criteria = self.test_parser.get_rubric(rubric_xml) rubric_criteria = self.test_parser.get_rubric(rubric_xml)
# Basic shape of the rubric: prompt and criteria # Basic shape of the rubric: prompt and criteria
self.assertEqual(rubric_prompt, rubric_prompt_text)
self.assertEqual(len(rubric_criteria), 1) self.assertEqual(len(rubric_criteria), 1)
# Look inside the criterion to make sure it's shaped correctly # Look inside the criterion to make sure it's shaped correctly
......
"""
Tests for serializing to/from XML.
"""
import copy
import mock
import lxml.etree as etree
from django.test import TestCase
from ddt import ddt, data, file_data, unpack
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock, UI_MODELS
from openassessment.xblock.xml import (
serialize_content, update_from_xml,
UpdateFromXmlError, InvalidRubricError, InvalidAssessmentError
)
@ddt
class TestSerializeContent(TestCase):
"""
Test serialization of OpenAssessment XBlock content to XML.
"""
BASIC_CRITERIA = [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
]
BASIC_ASSESSMENTS = [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28.873926",
"due_datetime": "2014-05-30T00:00:00.92926",
"must_grade": 5,
"must_be_graded_by": 3,
},
{
"name": "self-assessment",
"start_datetime": '2014-04-01T00:00:00.000000',
"due_datetime": "2014-06-01T00:00:00.92926",
"must_grade": 5,
"must_be_graded_by": 3,
}
]
def setUp(self):
"""
Mock the OA XBlock.
"""
self.oa_block = mock.MagicMock(OpenAssessmentBlock)
@file_data('data/serialize.json')
def test_serialize(self, data):
self.oa_block.title = data['title']
self.oa_block.prompt = data['prompt']
self.oa_block.rubric_criteria = data['criteria']
self.oa_block.rubric_assessments = data['assessments']
xml = serialize_content(self.oa_block)
# Compare the XML with our expected output
# To make the comparison robust, first parse the actual and expected XML
# then compare elements/attributes in the tree.
try:
parsed_actual = etree.fromstring(xml)
except (ValueError, etree.XMLSyntaxError):
self.fail("Could not parse output XML:\n{}".format(xml))
# Assume that the test data XML is valid; if not, this will raise an error
# instead of a test failure.
parsed_expected = etree.fromstring("".join(data['expected_xml']))
# Pretty-print and reparse the expected XML
pretty_expected = etree.tostring(parsed_expected, pretty_print=True, encoding='utf-8')
parsed_expected = etree.fromstring(pretty_expected)
# Walk both trees, comparing elements and attributes
actual_elements = [el for el in parsed_actual.getiterator()]
expected_elements = [el for el in parsed_expected.getiterator()]
self.assertEqual(
len(actual_elements), len(expected_elements),
msg="Incorrect XML output:\nActual: {}\nExpected: {}".format(actual_elements, expected_elements)
)
for actual, expected in zip(actual_elements, expected_elements):
self.assertEqual(actual.tag, expected.tag)
self.assertEqual(
actual.text, expected.text,
msg="Incorrect text for {tag}".format(tag=actual.tag)
)
self.assertItemsEqual(
actual.items(), expected.items(),
msg="Incorrect attributes for {tag}".format(tag=actual.tag)
)
def test_mutated_criteria_dict(self):
self.oa_block.title = "Test title"
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
# We have to be really permissive with the data we'll accept.
# If the data we're retrieving is somehow corrupted,
# Studio authors should still be able to retrive an XML representation
# so they can edit and fix the issue.
# To test this, we systematically mutate a valid rubric dictionary by
# mutating the dictionary, then asserting that we can parse the generated XML.
for criteria_dict in self.BASIC_CRITERIA:
for mutated_dict in self._dict_mutations(criteria_dict):
self.oa_block.rubric_criteria = mutated_dict
xml = serialize_content(self.oa_block)
try:
etree.fromstring(xml)
except Exception as ex:
msg = "Could not parse mutated criteria dict {criteria}\n{ex}".format(criteria=mutated_dict, ex=ex)
self.fail(msg)
def test_mutated_assessments_dict(self):
self.oa_block.title = "Test title"
self.oa_block.rubric_criteria = self.BASIC_CRITERIA
for assessment_dict in self.BASIC_ASSESSMENTS:
for mutated_dict in self._dict_mutations(assessment_dict):
self.oa_block.rubric_assessments = [mutated_dict]
xml = serialize_content(self.oa_block)
try:
etree.fromstring(xml)
except Exception as ex:
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex)
self.fail(msg)
@data("title", "prompt")
def test_mutated_field(self, field):
self.oa_block.rubric_criteria = self.BASIC_CRITERIA
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
for mutated_value in [0, u"\u9282", None]:
setattr(self.oa_block, 'title', mutated_value)
xml = serialize_content(self.oa_block)
try:
etree.fromstring(xml)
except Exception as ex:
msg = "Could not parse mutated field {field} with value {value}\n{ex}".format(
field=field, value=mutated_value, ex=ex
)
self.fail(msg)
def _dict_mutations(self, input_dict):
"""
Iterator over mutations of a dictionary:
1) Remove keys
2) Empty lists/dictionaries
3) Change value to None
4) Change value to unicode
5) Change value to an integer
Args:
input_dict (dict): A JSON-serializable dictionary to traverse.
Yields:
dict
"""
for key, val in input_dict.iteritems():
# Mutation #1: Remove the key
print "== Removing key {}".format(key)
yield {k:v for k,v in input_dict.iteritems() if k != key}
if isinstance(val, dict):
# Mutation #2: Empty dict
print "== Emptying dict {}".format(key)
yield self._mutate_dict(input_dict, key, dict())
# Mutation #3-5: value mutations
for mutated in self._value_mutations(input_dict, key):
yield mutated
# Recursively mutate sub keys
for sub_mutation in self._dict_mutations(val):
yield self._mutate_dict(input_dict, key, sub_mutation)
elif isinstance(val, list):
# Mutation #2: Empty list
print "== Emptying list {}".format(key)
yield self._mutate_dict(input_dict, key, list())
# Mutation #3-5: value mutations
for mutated in self._value_mutations(input_dict, key):
yield mutated
# Recursively mutate sub-items
for item in val:
if isinstance(item, dict):
for sub_mutation in self._dict_mutations(item):
yield self._mutate_dict(input_dict, key, sub_mutation)
else:
# Mutation #3-5: value mutations
for mutated in self._value_mutations(input_dict, key):
yield mutated
def _value_mutations(self, input_dict, key):
"""
Iterate over mutations of the value for `key` in a dictionary.
Args:
input_dict (dict): The dictionary to mutate.
key (str): The key whose value will be mutated.
Yields:
dict
"""
print "== None value {}".format(key)
yield self._mutate_dict(input_dict, key, None)
print "== Unicode value {}".format(key)
yield self._mutate_dict(input_dict, key, u"\u9731")
print "== int value {}".format(key)
yield self._mutate_dict(input_dict, key, 0)
def _mutate_dict(self, input_dict, key, new_val):
"""
Copy and update a dictionary.
Args:
input_dict (dict): The dictionary to copy and update.
key (str): The key of the value to update.
new_val: The new value to set for the key
Returns:
A copy of the dictionary with the value for `key` set to `new_val`.
"""
mutated = copy.deepcopy(input_dict)
mutated[key] = new_val
return mutated
@ddt
class TestUpdateFromXml(TestCase):
"""
Test deserialization of OpenAssessment XBlock content from XML.
"""
maxDiff = None
def setUp(self):
"""
Mock the OA XBlock.
"""
self.oa_block = mock.MagicMock(OpenAssessmentBlock)
self.oa_block.title = ""
self.oa_block.prompt = ""
self.oa_block.rubric_criteria = dict()
self.oa_block.rubric_assessments = list()
@file_data('data/update_from_xml.json')
def test_update_from_xml(self, data):
# Update the block based on the fixture XML definition
returned_block = update_from_xml(self.oa_block, "".join(data['xml']))
# The block we passed in should be updated and returned
self.assertEqual(self.oa_block, returned_block)
# Check that the contents of the modified XBlock are correct
self.assertEqual(self.oa_block.title, data['title'])
self.assertEqual(self.oa_block.prompt, data['prompt'])
self.assertEqual(self.oa_block.rubric_criteria, data['criteria'])
self.assertEqual(self.oa_block.rubric_assessments, data['assessments'])
@file_data('data/update_from_xml_error.json')
def test_update_from_xml_error(self, data):
with self.assertRaises(UpdateFromXmlError):
update_from_xml(self.oa_block, "".join(data['xml']))
@file_data('data/update_from_xml.json')
def test_invalid_rubric(self, data):
# Plug in a rubric validator that always reports that the rubric dict is invalid.
# We need to back this up with an integration test that checks whether the XBlock
# provides an appropriate rubric validator.
with self.assertRaises(InvalidRubricError):
update_from_xml(
self.oa_block, "".join(data['xml']),
rubric_validator=lambda _: (False, '')
)
@file_data('data/update_from_xml.json')
def test_invalid_assessment(self, data):
# Plug in an assessment validator that always reports that the assessment dict is invalid.
with self.assertRaises(InvalidAssessmentError):
update_from_xml(
self.oa_block, "".join(data['xml']),
assessment_validator=lambda _: (False, '')
)
"""
Serialize and deserialize OpenAssessment XBlock content to/from XML.
"""
import lxml.etree as etree
import dateutil.parser
import defusedxml.ElementTree as safe_etree
from django.utils.translation import ugettext as _
class UpdateFromXmlError(Exception):
"""
Error occurred while deserializing the OpenAssessment XBlock content from XML.
"""
pass
class InvalidRubricError(UpdateFromXmlError):
"""
The rubric definition is semantically invalid.
"""
pass
class InvalidAssessmentError(UpdateFromXmlError):
"""
The assessment definition is semantically invalid.
"""
pass
def _sort_by_order_num(items):
"""
Sort dictionaries by the key "order_num".
If no order number is specified, assign an arbitrary order.
Ignores non-dict items in the list.
Args:
items (list): List of dictionaries to sort.
Returns:
dict
"""
return sorted([
el for el in items
if isinstance(el, dict)
], key=lambda el: el.get('order_num', 0)
)
def _safe_get_text(element):
"""
Retrieve the text from the element, safely handling empty elements.
Args:
element (lxml.etree.Element): The XML element.
Returns:
unicode
"""
return unicode(element.text) if element.text is not None else u""
def _serialize_options(options_root, options_list):
"""
Serialize rubric criterion options as XML, adding children to the XML
with root node `options_root`.
We don't make any assumptions about the contents of `options_list`,
and we handle unexpected inputs gracefully.
Args:
options_root (lxml.etree.Element): The root node of the tree.
options_list (list): List of options dictionaries.
Returns:
None
"""
# Sort the options by order number, then serialize as XML
for option in _sort_by_order_num(options_list):
option_el = etree.SubElement(options_root, 'option')
# Points (default to 0)
option_el.set('points', unicode(option.get('points', 0)))
# Name (default to empty str)
option_name = etree.SubElement(option_el, 'name')
option_name.text = unicode(option.get('name', u''))
# Explanation (default to empty str)
option_explanation = etree.SubElement(option_el, 'explanation')
option_explanation.text = unicode(option.get('explanation', u''))
def _serialize_criteria(criteria_root, criteria_list):
"""
Serialize rubric criteria as XML, adding children to the XML
with root node `criteria_root`.
We don't make any assumptions about the contents of `criteria_list`,
and we handle unexpected inputs gracefully.
Args:
critera_root (lxml.etree.Element): The root node of the tree.
criteria_list (list): List of criteria dictionaries.
Returns:
None
"""
# Sort the criteria by order number, then serialize as XML
for criterion in _sort_by_order_num(criteria_list):
criterion_el = etree.SubElement(criteria_root, 'criterion')
# Criterion name (default to empty string)
criterion_name = etree.SubElement(criterion_el, u'name')
criterion_name.text = unicode(criterion.get('name', ''))
# Criterion prompt (default to empty string)
criterion_prompt = etree.SubElement(criterion_el, 'prompt')
criterion_prompt.text = unicode(criterion.get('prompt', u''))
# Criterion options
options_list = criterion.get('options', None)
if isinstance(options_list, list):
_serialize_options(criterion_el, options_list)
def _serialize_rubric(rubric_root, oa_block):
"""
Serialize a rubric dictionary as XML, adding children to the XML
with root node `rubric_root`.
This is very liberal in what it accepts. If the rubric dict persisted
by the XBlock is invalid for some reason, we still want to generate XML
so that Studio authors can fix the error.
Args:
oa_block (OpenAssessmentBlock): The OpenAssessmentBlock to serialize
rubric_dict (dict): A dictionary representation of the rubric, of the form
described in the serialized Rubric model (peer grading serializers).
Returns:
None
"""
# Rubric prompt (default to empty text)
prompt = etree.SubElement(rubric_root, 'prompt')
prompt.text = unicode(oa_block.prompt)
# Criteria
criteria_list = oa_block.rubric_criteria
if isinstance(criteria_list, list):
_serialize_criteria(rubric_root, criteria_list)
def _parse_date(date_str):
"""
Attempt to parse a date string into ISO format (without milliseconds)
Returns `None` if this cannot be done.
Args:
date_str (str): The date string to parse.
Returns:
unicode in ISO format (without milliseconds) if the date string is parseable
None if parsing fails.
"""
try:
# Get the date into ISO format
parsed_date = dateutil.parser.parse(unicode(date_str))
formatted_date = parsed_date.strftime("%Y-%m-%dT%H:%M:%S")
return unicode(formatted_date)
except (TypeError, ValueError):
return None
def _parse_options_xml(options_root):
"""
Parse <options> element in the OpenAssessment XBlock's content XML.
Args:
options_root (lxml.etree.Element): The root of the tree.
Returns:
list of option dictionaries, as defined in the Rubric model of the peer grading app.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
"""
options_list = []
order_num = 0
for option in options_root.findall('option'):
option_dict = dict()
# Option order number (sequential)
option_dict['order_num'] = order_num
order_num += 1
# Option points -- must be an integer!
if 'points' in option.attrib:
try:
option_dict['points'] = int(option.get('points'))
except ValueError:
raise UpdateFromXmlError(_("XML option points must be an integer."))
else:
raise UpdateFromXmlError(_("XML option definition must contain a 'points' attribute."))
# Option name
option_name = option.find('name')
if option_name is not None:
option_dict['name'] = _safe_get_text(option_name)
else:
raise UpdateFromXmlError(_("XML option definition must contain a <name> element."))
# Option explanation
option_explanation = option.find('explanation')
if option_explanation is not None:
option_dict['explanation'] = _safe_get_text(option_explanation)
else:
raise UpdateFromXmlError(_("XML option definition must contain an <explanation> element."))
# Add the options dictionary to the list
options_list.append(option_dict)
return options_list
def _parse_criteria_xml(criteria_root):
"""
Parse <criteria> element in the OpenAssessment XBlock's content XML.
Args:
criteria_root (lxml.etree.Element): The root node of the tree.
Returns:
list of criteria dictionaries, as defined in the Rubric model of the peer grading app.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
"""
criteria_list = []
order_num = 0
for criterion in criteria_root.findall('criterion'):
criterion_dict = dict()
# Criterion order number (sequential)
criterion_dict['order_num'] = order_num
order_num += 1
# Criterion name
criterion_name = criterion.find('name')
if criterion_name is not None:
criterion_dict['name'] = _safe_get_text(criterion_name)
else:
raise UpdateFromXmlError(_("XML criterion definition must contain a <name> element."))
# Criterion prompt
criterion_prompt = criterion.find('prompt')
if criterion_prompt is not None:
criterion_dict['prompt'] = _safe_get_text(criterion_prompt)
else:
raise UpdateFromXmlError(_("XML criterion definition must contain a <prompt> element."))
# Criterion options
criterion_dict['options'] = _parse_options_xml(criterion)
# Add the newly constructed criterion dict to the list
criteria_list.append(criterion_dict)
return criteria_list
def _parse_rubric_xml(rubric_root, validator):
"""
Parse <rubric> element in the OpenAssessment XBlock's content XML.
Args:
rubric_root (lxml.etree.Element): The root of the <rubric> node in the tree.
validator (callable): Function that accepts a rubric dict and returns
a boolean indicating whether the rubric is semantically valid
and an error message string.
Returns:
dict, a serialized representation of a rubric, as defined by the peer grading serializers.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
InvalidRubricError: The rubric was not semantically valid.
"""
rubric_dict = dict()
# Rubric prompt
prompt_el = rubric_root.find('prompt')
if prompt_el is not None:
rubric_dict['prompt'] = _safe_get_text(prompt_el)
else:
raise UpdateFromXmlError(_("XML rubric definition must contain a <prompt> element."))
# Criteria
rubric_dict['criteria'] = _parse_criteria_xml(rubric_root)
# Validate the rubric
success, msg = validator(rubric_dict)
if not success:
raise InvalidRubricError(msg)
return rubric_dict
def _parse_assessments_xml(assessments_root, validator):
"""
Parse the <assessments> element in the OpenAssessment XBlock's content XML.
Args:
assessments_root (lxml.etree.Element): The root of the <assessments> node in the tree.
validator (callable): Function that accepts an assessment dict and returns
a boolean indicating whether the assessment is semantically valid
and an error message.
Returns:
list of assessment dicts
Raises:
InvalidAssessmentError: Assessment definition was not semantically valid.
"""
assessments_list = []
for assessment in assessments_root.findall('assessment'):
assessment_dict = dict()
# Assessment name
if 'name' in assessment.attrib:
assessment_dict['name'] = unicode(assessment.get('name'))
else:
raise UpdateFromXmlError(_('XML assessment definition must have a "name" attribute'))
# Assessment start
if 'start' in assessment.attrib:
parsed_start = _parse_date(assessment.get('start'))
if parsed_start is not None:
assessment_dict['start_datetime'] = parsed_start
else:
raise UpdateFromXmlError(_("Could not parse 'start' attribute as a valid date time"))
else:
# If no start is specified, default to None, meaning always open
assessment_dict['start_datetime'] = None
# Assessment due
if 'due' in assessment.attrib:
parsed_start = _parse_date(assessment.get('due'))
if parsed_start is not None:
assessment_dict['due_datetime'] = parsed_start
else:
raise UpdateFromXmlError(_("Could not parse 'due' attribute as a valid date time"))
else:
# If no due date is specified, default to None, meaning never due
assessment_dict['due_datetime'] = None
# Assessment must_grade
if 'must_grade' in assessment.attrib:
try:
assessment_dict['must_grade'] = int(assessment.get('must_grade'))
except ValueError:
raise UpdateFromXmlError(_('Assessment "must_grade" attribute must be an integer.'))
else:
raise UpdateFromXmlError(_('XML assessment definition must have a "must_grade" attribute'))
# Assessment must_be_graded_by
if 'must_be_graded_by' in assessment.attrib:
try:
assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by'))
except ValueError:
raise UpdateFromXmlError(_('Assessment "must_be_graded_by" attribute must be an integer.'))
else:
raise UpdateFromXmlError(_('XML assessment definition must have a "must_be_graded_by" attribute'))
# Validate the semantics of the assessment definition
success, msg = validator(assessment_dict)
if not success:
raise InvalidAssessmentError(msg)
assessments_list.append(assessment_dict)
return assessments_list
def serialize_content(oa_block):
"""
Serialize the OpenAssessment XBlock's content to XML.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize.
Returns:
xml (unicode)
"""
root = etree.Element('openassessmentblock')
# Open assessment displayed title
title = etree.SubElement(root, 'title')
title.text = unicode(oa_block.title)
# Assessment list
assessments_root = etree.SubElement(root, 'assessments')
for assessment_dict in oa_block.rubric_assessments:
assessment = etree.SubElement(assessments_root, 'assessment')
# Set assessment attributes, defaulting to empty values
assessment.set('name', unicode(assessment_dict.get('name', '')))
assessment.set('must_grade', unicode(assessment_dict.get('must_grade', '')))
assessment.set('must_be_graded_by', unicode(assessment_dict.get('must_be_graded_by', '')))
# Start and due dates default to None, indicating always open / never closed respectively
start_datetime = assessment_dict.get('start_datetime')
due_datetime = assessment_dict.get('due_datetime')
if start_datetime is not None:
assessment.set('start', unicode(start_datetime))
if due_datetime is not None:
assessment.set('due', unicode(due_datetime))
# Rubric
rubric_root = etree.SubElement(root, 'rubric')
_serialize_rubric(rubric_root, oa_block)
# Return a UTF-8 representation of the XML
return etree.tostring(root, pretty_print=True, encoding='utf-8')
def update_from_xml(
oa_block, xml,
rubric_validator=lambda _: (True, ''),
assessment_validator=lambda _: (True, '')
):
"""
Update the OpenAssessment XBlock's content from an XML definition.
We need to be strict about the XML we accept, to avoid setting
the XBlock to an invalid state (which will then be persisted).
Args:
oa_block (OpenAssessmentBlock): The open assessment block to update.
xml (unicode): The XML definition of the XBlock's content.
Kwargs:
rubric_validator (callable): Function that accepts a rubric dict and returns
a boolean indicating whether the rubric is semantically valid and an error message.
The default implementation performs no validation.
assessment_validator (callable): Function that accepts an assessment dict and returns
a boolean indicating whether the assessment is semantically valid and an error message.
The default implementation performs no validation.
Returns:
OpenAssessmentBlock
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
InvalidRubricError: The rubric was not semantically valid.
"""
# Parse the XML content definition
# Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree:
# http://docs.python.org/2/library/xml.html#xml-vulnerabilities
try:
root = safe_etree.fromstring(xml.encode('utf-8'))
except (ValueError, safe_etree.ParseError) as ex:
raise UpdateFromXmlError(_("An error occurred while parsing the XML content."))
# Check that the root has the correct tag
if root.tag != 'openassessmentblock':
raise UpdateFromXmlError(_("XML content must contain an <openassessmentblock> root element."))
# Retrieve the title
title_el = root.find('title')
if title_el is None:
raise UpdateFromXmlError(_("XML content must contain a <title> element."))
else:
title = _safe_get_text(title_el)
# Retrieve the rubric
rubric_el = root.find('rubric')
if rubric_el is None:
raise UpdateFromXmlError(_("XML content must contain a <rubric> element."))
else:
rubric = _parse_rubric_xml(rubric_el, rubric_validator)
# Retrieve the assessments
assessments_el = root.find('assessments')
if assessments_el is None:
raise UpdateFromXmlError(_("XML content must contain an <assessments> element."))
else:
assessments = _parse_assessments_xml(assessments_el, assessment_validator)
# If we've gotten this far, then we've successfully parsed the XML
# and validated the contents. At long last, we can safely update the XBlock.
oa_block.title = title
oa_block.prompt = rubric['prompt']
oa_block.rubric_criteria = rubric['criteria']
oa_block.rubric_assessments = assessments
return oa_block
...@@ -3,6 +3,7 @@ git+https://github.com/edx/XBlock.git@923978c5#egg=XBlock ...@@ -3,6 +3,7 @@ git+https://github.com/edx/XBlock.git@923978c5#egg=XBlock
git+https://github.com/ormsbee/xblock-sdk.git@295678ff#egg=xblock-sdk git+https://github.com/ormsbee/xblock-sdk.git@295678ff#egg=xblock-sdk
# Third Party Requirements # Third Party Requirements
defusedxml==0.4.1
django==1.4.8 django==1.4.8
django-extensions==1.2.5 django-extensions==1.2.5
djangorestframework==2.3.5 djangorestframework==2.3.5
......
# Grab everything in base requirements # Grab everything in base requirements
-r base.txt -r base.txt
ddt==0.4.0 ddt==0.7.0
django-nose==1.2 django-nose==1.2
mock==1.0.1 mock==1.0.1
nose==1.3.0 nose==1.3.0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment