Commit 64e667a4 by Will Daly

Merge pull request #65 from edx/will/more-test-cleanup

More test cleanup
parents e85718ed 34893331
......@@ -5,6 +5,8 @@ scope of the Tim APIs.
"""
from copy import deepcopy
import dateutil.parser
from django.utils.translation import ugettext as _
from rest_framework import serializers
from openassessment.peer.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
......@@ -257,3 +259,51 @@ def rubric_from_dict(rubric_dict):
rubric = rubric_serializer.save()
return rubric
def validate_assessment_dict(assessment_dict):
"""
Check that the assessment dict is semantically valid.
Args:
assessment (dict): Serialized Assessment model.
Returns:
boolean indicating whether the assessment is semantically valid.
"""
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported"))
# Number you need to grade is >= the number of people that need to grade you
must_grade = assessment_dict.get('must_grade')
must_be_graded_by = assessment_dict.get('must_be_graded_by')
if must_grade is None or must_grade < 1:
return (False, _('"must_grade" must be a positive integer'))
if must_be_graded_by is None or must_be_graded_by < 1:
return (False, _('"must_be_graded_by" must be a positive integer'))
if must_grade < must_be_graded_by:
return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"'))
return (True, u'')
def validate_rubric_dict(rubric_dict):
"""
Check that the rubric is semantically valid.
Args:
rubric_dict (dict): Serialized Rubric model
Returns:
boolean indicating whether the rubric is semantically valid.
"""
try:
rubric_from_dict(rubric_dict)
except InvalidRubric as ex:
return (False, ex.message)
else:
return (True, u'')
{
"empty_dict": {
"assessment": {}
},
"must_be_graded_by_zero": {
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 0
}
},
"unsupported_type": {
"assessment": {
"name": "unsupported-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
},
"no_type": {
"assessment": {
"must_grade": 5,
"must_be_graded_by": 3
}
},
"unsupported_unicode_type": {
"assessment": {
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽",
"must_grade": 5,
"must_be_graded_by": 3
}
},
"no_must_grade": {
"assessment": {
"name": "peer-assessment",
"must_be_graded_by": 3
}
},
"no_must_be_graded_by": {
"assessment": {
"name": "peer-assessment",
"must_grade": 5
}
},
"must_grade_less_than_must_be_graded_by": {
"assessment": {
"name": "peer-assessment",
"must_grade": 4,
"must_be_graded_by": 5
}
},
"must_grade_zero": {
"assessment": {
"name": "self-assessment",
"must_grade": 0,
"must_be_graded_by": 0
}
},
"must_be_graded_by_zero": {
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 0
}
}
}
{
"zero_criteria": {
"rubric": {
"prompt": "Test Prompt",
"criteria": []
}
},
"zero_options": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": []
}
]
}
},
"negative_points": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": -1,
"name": "No",
"explanation": "No explanation"
}
]
}
]
}
}
}
{
"peer": {
"assessment": {
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
},
"self": {
"assessment": {
"name": "self-assessment",
"must_grade": 2,
"must_be_graded_by": 1
}
},
"must_be_graded_by_equals_must_grade": {
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 1
}
}
}
{
"simple": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
}
},
"unicode": {
"rubric": {
"prompt": "☃",
"criteria": [
{
"order_num": 0,
"name": "☃",
"prompt": "☃",
"options": [
{
"order_num": 0,
"points": 0,
"name": "☃",
"explanation": "☃"
},
{
"order_num": 1,
"points": 2,
"name": "☃",
"explanation": "☃"
}
]
}
]
}
}
}
......@@ -82,4 +82,3 @@ class TestCriterionOptionDeserialization(TestCase):
]
}
)
"""
Test validation of serialized models.
"""
import ddt
from django.test import TestCase
from openassessment.peer.serializers import validate_assessment_dict, validate_rubric_dict
@ddt.ddt
class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data):
success, msg = validate_assessment_dict(data['assessment'])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data):
success, msg = validate_assessment_dict(data['assessment'])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
@ddt.ddt
class RubricValidationTest(TestCase):
@ddt.file_data('data/valid_rubrics.json')
def test_valid_assessment(self, data):
success, msg = validate_rubric_dict(data['rubric'])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_rubrics.json')
def test_invalid_assessment(self, data):
success, msg = validate_rubric_dict(data['rubric'])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
......@@ -18,8 +18,8 @@ from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import update_from_xml
from scenario_parser import ScenarioParser
DEFAULT_PROMPT = """
Censorship in the Libraries
......@@ -303,10 +303,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
"""Recursively embed xblocks for nodes we don't recognize"""
block.runtime.add_node_as_child(block, child, id_generator)
block = runtime.construct_xblock_from_class(cls, keys)
sparser = ScenarioParser(block, node, unknown_handler)
block = sparser.parse()
return block
return update_from_xml(block, node)
def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML
......
# -*- coding: utf-8 -*-
"""XBlock scenario parsing routines"""
class ScenarioParser(object):
"""Utility class to capture parsing of xml from runtime scenarios."""
def __init__(self, xblock, node, unknown_handler=lambda x,y: (x,y)):
"""Save the little bit of state we expect to re-use.
Args:
xblock (XBlock): The xblock instance whose fields we fill out.
node (lxml.etree): The root of the xml hierarchy we are to walk.
unknown_handler (function): A closure over some environmental data
from our caller, which is used when we encounter an unexpected
child node.
"""
self.xblock = xblock
self.root = node
self.unknown_handler = unknown_handler
def get_prompt(self, e):
"""<prompt>This tells you what you should write about. There should be only one prompt.</prompt>"""
return e.text.strip()
def get_title(self, e):
"""<title>The title of this block</title>
"""
return e.text.strip()
def get_rubric(self, e):
"""<rubric>
This text is general instructions relating to this rubric.
There should only be one set of instructions for the rubric.
<criterion name="myCrit">
This text is instructions for this criterion. There can be multiple criteria,
but each one should only have one set of instructions.
<option val=99>
This is some text labeling the criterion option worth 99 points
Three can be multiple options per criterion.
<explain>
And this explains what the label for this option means. There can be only
one explanation per label.
</explain
</option>
</criterion>
</rubric>"""
rubric_criteria = []
for criterion in e:
crit = {
'name': criterion.attrib.get('name', ''),
'prompt': criterion.text.strip(),
'total_value': criterion.attrib.get('total_value', None),
'options': [],
}
for option in criterion:
explanations = option.getchildren()
if explanations and len(explanations) == 1 and explanations[0].tag == 'explain':
explanation = explanations[0].text.strip()
else:
explanation = ''
crit['options'].append(
{
'name': option.text.strip(),
'points': int(option.attrib['val']),
'explanation': explanation,
}
)
rubric_criteria.append(crit)
return rubric_criteria
def get_assessments(self, assessments):
"""<assessments>
<!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a
peer assessment -->
<self-assessment />
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</peer-assessment>"""
assessment_list = []
for asmnt in assessments:
assessment = None
assessment_type = asmnt.tag
if 'peer-assessment' == assessment_type:
assessment = {
"must_grade": int(asmnt.attrib.get('must_grade', 1)),
"must_be_graded_by": int(asmnt.attrib.get('must_be_graded_by', 0))
}
elif 'self-assessment' == assessment_type:
assessment = {}
if assessment is not None:
assessment["assessment_type"] = assessment_type
assessment["name"] = asmnt.attrib.get('name', None)
assessment["start_datetime"] = asmnt.attrib.get('start', None)
assessment["due_datetime"] = asmnt.attrib.get('due', None)
assessment_list.append(assessment)
return assessment_list
def parse(self):
"""Instantiate xblock object from runtime XML definition."""
self.xblock.start_datetime = self.root.attrib.get('start', None)
self.xblock.due_datetime = self.root.attrib.get('due', None)
for child in self.root:
if child.tag == 'prompt':
self.xblock.prompt = self.get_prompt(child)
elif child.tag == 'rubric':
self.xblock.rubric_criteria = self.get_rubric(child)
elif child.tag == 'title':
self.xblock.title = self.get_title(child)
elif child.tag == 'assessments':
self.xblock.rubric_assessments = self.get_assessments(child)
else:
self.unknown_handler(self.xblock, child)
return self.xblock
......@@ -3,19 +3,17 @@ Studio editing view for OpenAssessment XBlock.
"""
import pkg_resources
import logging
import dateutil.parser
from django.template.context import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from xblock.fragment import Fragment
from openassessment.xblock.xml import (
serialize_content, update_from_xml,
serialize_content, update_from_xml_str,
UpdateFromXmlError, InvalidRubricError
)
from openassessment.peer.serializers import (
rubric_from_dict, AssessmentSerializer, InvalidRubric
)
from openassessment.peer.serializers import validate_assessment_dict, validate_rubric_dict
logger = logging.getLogger(__name__)
......@@ -60,10 +58,10 @@ class StudioMixin(object):
"""
if 'xml' in data:
try:
update_from_xml(
update_from_xml_str(
self, data['xml'],
rubric_validator=self._validate_rubric,
assessment_validator=self._validate_assessment
rubric_validator=validate_rubric_dict,
assessment_validator=validate_assessment_dict
)
except InvalidRubricError:
......@@ -103,51 +101,3 @@ class StudioMixin(object):
return {'success': False, 'msg': msg, 'xml': u''}
else:
return {'success': True, 'msg': '', 'xml': xml}
def _validate_rubric(self, rubric_dict):
"""
Check that the rubric is semantically valid.
Args:
rubric_dict (dict): Serialized Rubric model from the peer grading app.
Returns:
boolean indicating whether the rubric is semantically valid.
"""
try:
rubric_from_dict(rubric_dict)
except InvalidRubric as ex:
return (False, ex.message)
else:
return (True, u'')
def _validate_assessment(self, assessment_dict):
"""
Check that the assessment is semantically valid.
Args:
assessment (dict): Serialized Assessment model from the peer grading app.
Returns:
boolean indicating whether the assessment is semantically valid.
"""
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported"))
# Number you need to grade is >= the number of people that need to grade you
if assessment_dict.get('must_grade') < assessment_dict.get('must_be_graded_by'):
return (False, _('"must_grade" should be less than "must_be_graded_by"'))
# Due date is after start date, if both are specified.
start_datetime = assessment_dict.get('start_datetime')
due_datetime = assessment_dict.get('due_datetime')
if start_datetime is not None and due_datetime is not None:
start = dateutil.parser.parse(assessment_dict.get('start_datetime'))
due = dateutil.parser.parse(assessment_dict.get('due_datetime'))
if start > due:
return (False, _('Due date must be after start date'))
return (True, u'')
......@@ -92,12 +92,10 @@ class XBlockHandlerTestCase(TestCase):
Returns:
XBlock
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, xml_path)) as xml_file:
block_id = self.runtime.parse_xml_string(
xml_file.read(), self.runtime.id_generator
)
return self.runtime.get_block(block_id)
block_id = self.runtime.parse_xml_string(
self.load_fixture_str(xml_path), self.runtime.id_generator
)
return self.runtime.get_block(block_id)
def request(self, xblock, handler_name, content, response_format=None):
"""
......@@ -133,3 +131,18 @@ class XBlockHandlerTestCase(TestCase):
return json.loads(response.body)
else:
raise NotImplementedError("Response format '{format}' not supported".format(response_format))
@staticmethod
def load_fixture_str(path):
"""
Load data from a fixture file.
Args:
path (str): Path to the file.
Returns:
unicode: contents of the file.
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, path)) as file_handle:
return file_handle.read()
<openassessment start="2014-12-19T23:00:00" due="2014-12-21T23:00:00">
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">Neal Stephenson (late)</option>
<option val="1">HP Lovecraft</option>
<option val="3">Robert Heinlein</option>
<option val="4">Neal Stephenson (early)</option>
<option val="5">Earnest Hemingway</option>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="10">Spock</option>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="5">The Elements of Style</option>
<criterion>
<name>Form</name>
<prompt>Lastly, how is it's form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
......
<openassessment>
<title>Foo</title>
<assessments>
<!-- assessment name not supported -->
<assessment name="unsupported-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" must_grade="2" must_be_graded_by="1" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<option points="0"><name>No</name><explanation>No explanation</explanation></option>
<option points="2"><name>Yes</name><explanation>Yes explanation</explanation></option>
</criterion>
</rubric>
</openassessment>
<openassessment>
<title>Foo</title>
<assessments>
<assessment name="peer-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" must_grade="2" must_be_graded_by="1" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<!-- no options -->
</criterion>
</rubric>
</openassessment>
......@@ -40,7 +40,7 @@
}
],
"expected_xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -55,7 +55,7 @@
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
]
},
......@@ -93,7 +93,7 @@
}
],
"expected_xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>ƒσσ</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -107,7 +107,7 @@
"<option points=\"2\"><name>Чэѕ</name><explanation>Чэѕ эхрlаиатіои</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
]
},
......@@ -145,7 +145,7 @@
}
],
"expected_xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -159,7 +159,7 @@
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
]
},
......@@ -210,7 +210,7 @@
}
],
"expected_xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -229,7 +229,7 @@
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
]
},
......@@ -274,7 +274,7 @@
}
],
"expected_xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -289,7 +289,7 @@
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
]
}
}
{
"simple": {
"xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -16,7 +16,7 @@
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
......@@ -61,7 +61,7 @@
"unicode": {
"xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>िѻѻ</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -75,7 +75,7 @@
"<option points=\"2\"><name>ﻉร</name><explanation>ﻉร ﻉซρɭคกคՇٱѻก</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
],
"title": "िѻѻ",
"prompt": "ՇєรՇ קг๏๓קՇ",
......@@ -113,7 +113,7 @@
"multiple_criteria": {
"xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -132,7 +132,7 @@
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
......@@ -183,7 +183,7 @@
"no_dates_specified": {
"xml": [
"<openassessmentblock>",
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
......@@ -197,7 +197,7 @@
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessmentblock>"
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
......
<openassessment>
<title>Foo</title>
<assessments>
<assessment name="peer-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" must_grade="2" must_be_graded_by="1" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<option points="0"><name>No</name><explanation>No explanation</explanation></option>
<option points="2"><name>Yes</name><explanation>Yes explanation</explanation></option>
</criterion>
</rubric>
</openassessment>
"""Tests the Workbench Scenario Parser functionality."""
from lxml import etree
from django.test import TestCase
from openassessment.xblock.scenario_parser import ScenarioParser
class TestScenarioParser(TestCase):
"""Test the ScenarioParser XML parsing class, which turns xml into filled XBlocks.
This does the simplest possible set of tests, just calling the parser utility
methods and confirming that their return results are correct, have good types, etc."""
def setUp(self):
self.test_parser = ScenarioParser("Dummy XBlock", "Dummy XML")
def test_get_prompt(self):
"""Given a <prompt> node, return its text."""
prompt_text = "5de0ef7cc2c7469383b58febd2fdac29"
prompt_xml = etree.fromstring("<prompt>{words}</prompt>".format(words=prompt_text))
self.assertEqual(self.test_parser.get_prompt(prompt_xml), prompt_text)
def test_get_rubric(self):
"""Given a <rubric> tree, return a instructions and a list of criteria"""
rubric_prompt_text = "This text is general instructions relating to this rubric. There should only be one set of instructions for the rubric."
criterion_prompt_text = "This text is instructions for this criterion. There can be multiple criteria, but each one should only have one set of instructions."
criterion_option_explain_text = "And this explains what the label for this option means. There can be only one explanation per label."
rubric_text = """<rubric>
{rit}
<criterion name="myCrit">
{cit}
<option val="99">
This is some text labeling the criterion option worth 99 points
Three can be multiple options per criterion.
<explain>
{coet}
</explain>
</option>
</criterion>
</rubric>""".format(rit=rubric_prompt_text,
cit=criterion_prompt_text,
coet=criterion_option_explain_text)
rubric_xml = etree.fromstring(rubric_text)
rubric_criteria = self.test_parser.get_rubric(rubric_xml)
# Basic shape of the rubric: prompt and criteria
self.assertEqual(len(rubric_criteria), 1)
# Look inside the criterion to make sure it's shaped correctly
criterion = rubric_criteria[0]
self.assertEqual(criterion['name'], 'myCrit')
self.assertEqual(criterion['prompt'], criterion_prompt_text)
self.assertEqual(len(criterion['options']), 1)
# And within the criterion, check that options appear to come out well-formed
option = criterion['options'][0]
self.assertEqual(option['points'], 99)
self.assertEqual(option['explanation'], criterion_option_explain_text)
def test_get_assessments(self):
"""Given an <assessments> list, return a list of assessment modules."""
assessments = """<assessments>
<self-assessment name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999"
must_be_graded_by="73" />
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-assessment />
</assessments>"""
assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_assessments(assessments_xml)
# Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]["assessment_type"], 'self-assessment')
self.assertEqual(parsed_list[0]["name"], '0382e03c808e4f2bb12dfdd2d45d5c4b')
# Peer assessments are more interesting
self.assertEqual(parsed_list[1]["assessment_type"], 'peer-assessment')
self.assertEqual(parsed_list[1]["must_grade"], 5)
self.assertEqual(parsed_list[1]["must_be_graded_by"], 3)
# We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]["assessment_type"], 'self-assessment')
"""
View-level tests for Studio view of OpenAssessment XBlock.
"""
import json
import lxml.etree as etree
import mock
from ddt import ddt, data
from .base import scenario, XBlockHandlerTestCase
@ddt
class StudioViewTest(XBlockHandlerTestCase):
"""
Test the view and handlers for editing the OpenAssessment XBlock in Studio.
"""
@scenario('data/basic_scenario.xml')
def test_render_studio_view(self, xblock):
frag = self.runtime.render(xblock, 'studio_view')
self.assertTrue(frag.body_html().find('openassessment-edit'))
@scenario('data/basic_scenario.xml')
def test_get_xml(self, xblock):
resp = self.request(xblock, 'xml', '""', response_format='json')
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
# Verify that the XML is parseable and the root is <openassessment>
root = etree.fromstring(resp['xml'])
self.assertEqual(root.tag, 'openassessment')
@mock.patch('openassessment.xblock.studio_mixin.serialize_content')
@scenario('data/basic_scenario.xml')
def test_get_xml_error(self, xblock, mock_serialize):
# Simulate an unexpected error while serializing the XBlock
mock_serialize.side_effect = ValueError('Test error!')
# Check that we get a failure message
resp = self.request(xblock, 'xml', '""', response_format='json')
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
@scenario('data/basic_scenario.xml')
def test_update_xml(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/updated_block.xml')})
# Verify the response is successfully
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertTrue(resp['success'])
self.assertIn('success', resp['msg'].lower())
# Check that the XBlock fields were updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, u'Foo')
self.assertEqual(xblock.prompt, u'Test prompt')
self.assertEqual(xblock.rubric_assessments[0]['name'], 'peer-assessment')
self.assertEqual(xblock.rubric_criteria[0]['prompt'], 'Test criterion prompt')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_request_data(self, xblock):
resp = self.request(xblock, 'update_xml', json.dumps({}), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('xml', resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid(self, xblock, data):
xml_path = data[0]
expected_msg = data[1]
request = json.dumps({'xml': self.load_fixture_str(xml_path)})
# Store old XBlock fields for later verification
old_title = xblock.title
old_prompt = xblock.prompt
old_assessments = xblock.rubric_assessments
old_criteria = xblock.rubric_criteria
# Verify the response fails
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn(expected_msg, resp['msg'].lower())
# Check that the XBlock fields were NOT updated
# We don't need to be exhaustive here, because we have other unit tests
# that verify this extensively.
self.assertEqual(xblock.title, old_title)
self.assertEqual(xblock.prompt, old_prompt)
self.assertItemsEqual(xblock.rubric_assessments, old_assessments)
self.assertItemsEqual(xblock.rubric_criteria, old_criteria)
......@@ -8,7 +8,7 @@ from django.test import TestCase
from ddt import ddt, data, file_data, unpack
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock, UI_MODELS
from openassessment.xblock.xml import (
serialize_content, update_from_xml,
serialize_content, update_from_xml_str,
UpdateFromXmlError, InvalidRubricError, InvalidAssessmentError
)
......@@ -144,7 +144,7 @@ class TestSerializeContent(TestCase):
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
for mutated_value in [0, u"\u9282", None]:
setattr(self.oa_block, 'title', mutated_value)
setattr(self.oa_block, field, mutated_value)
xml = serialize_content(self.oa_block)
try:
......@@ -267,7 +267,7 @@ class TestUpdateFromXml(TestCase):
def test_update_from_xml(self, data):
# Update the block based on the fixture XML definition
returned_block = update_from_xml(self.oa_block, "".join(data['xml']))
returned_block = update_from_xml_str(self.oa_block, "".join(data['xml']))
# The block we passed in should be updated and returned
self.assertEqual(self.oa_block, returned_block)
......@@ -281,7 +281,7 @@ class TestUpdateFromXml(TestCase):
@file_data('data/update_from_xml_error.json')
def test_update_from_xml_error(self, data):
with self.assertRaises(UpdateFromXmlError):
update_from_xml(self.oa_block, "".join(data['xml']))
update_from_xml_str(self.oa_block, "".join(data['xml']))
@file_data('data/update_from_xml.json')
def test_invalid_rubric(self, data):
......@@ -289,7 +289,7 @@ class TestUpdateFromXml(TestCase):
# We need to back this up with an integration test that checks whether the XBlock
# provides an appropriate rubric validator.
with self.assertRaises(InvalidRubricError):
update_from_xml(
update_from_xml_str(
self.oa_block, "".join(data['xml']),
rubric_validator=lambda _: (False, '')
)
......@@ -298,7 +298,7 @@ class TestUpdateFromXml(TestCase):
def test_invalid_assessment(self, data):
# Plug in an assessment validator that always reports that the assessment dict is invalid.
with self.assertRaises(InvalidAssessmentError):
update_from_xml(
update_from_xml_str(
self.oa_block, "".join(data['xml']),
assessment_validator=lambda _: (False, '')
)
......@@ -397,7 +397,7 @@ def serialize_content(oa_block):
Returns:
xml (unicode)
"""
root = etree.Element('openassessmentblock')
root = etree.Element('openassessment')
# Open assessment displayed title
title = etree.SubElement(root, 'title')
......@@ -433,7 +433,7 @@ def serialize_content(oa_block):
def update_from_xml(
oa_block, xml,
oa_block, root,
rubric_validator=lambda _: (True, ''),
assessment_validator=lambda _: (True, '')
):
......@@ -445,7 +445,7 @@ def update_from_xml(
Args:
oa_block (OpenAssessmentBlock): The open assessment block to update.
xml (unicode): The XML definition of the XBlock's content.
root (lxml.etree.Element): The XML definition of the XBlock's content.
Kwargs:
rubric_validator (callable): Function that accepts a rubric dict and returns
......@@ -462,17 +462,10 @@ def update_from_xml(
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
InvalidRubricError: The rubric was not semantically valid.
"""
# Parse the XML content definition
# Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree:
# http://docs.python.org/2/library/xml.html#xml-vulnerabilities
try:
root = safe_etree.fromstring(xml.encode('utf-8'))
except (ValueError, safe_etree.ParseError) as ex:
raise UpdateFromXmlError(_("An error occurred while parsing the XML content."))
# Check that the root has the correct tag
if root.tag != 'openassessmentblock':
raise UpdateFromXmlError(_("XML content must contain an 'openassessmentblock' root element."))
if root.tag != 'openassessment':
raise UpdateFromXmlError(_("XML content must contain an 'openassessment' root element."))
# Retrieve the title
title_el = root.find('title')
......@@ -503,3 +496,33 @@ def update_from_xml(
oa_block.rubric_assessments = assessments
return oa_block
def update_from_xml_str(oa_block, xml, **kwargs):
"""
Update the OpenAssessment XBlock's content from an XML string definition.
Parses the string using a library that avoids some known security vulnerabilities in etree.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to update.
xml (unicode): The XML definition of the XBlock's content.
Kwargs:
Same as `update_from_xml`
Returns:
OpenAssessmentBlock
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
InvalidRubricError: The rubric was not semantically valid.
"""
# Parse the XML content definition
# Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree:
# http://docs.python.org/2/library/xml.html#xml-vulnerabilities
try:
root = safe_etree.fromstring(xml.encode('utf-8'))
except (ValueError, safe_etree.ParseError) as ex:
raise UpdateFromXmlError(_("An error occurred while parsing the XML content."))
return update_from_xml(oa_block, root, **kwargs)
......@@ -10,7 +10,8 @@ TEST_APPS = ('openassessment.peer', 'submissions', "openassessment.xblock")
# Configure nose
NOSE_ARGS = [
'--with-coverage',
'--cover-package=' + ",".join(TEST_APPS)
'--cover-package=' + ",".join(TEST_APPS),
'--cover-erase',
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment