Commit 64b3df76 by Will Daly

Merge pull request #326 from edx/will/student-training-xml

Will/student training xml
parents 65463429 7a4e0ed1
......@@ -59,6 +59,11 @@ UI_MODELS = {
}
}
VALID_ASSESSMENT_TYPES = [
"peer-assessment",
"self-assessment",
]
def load(path):
"""Handy helper for getting resources from our kit."""
......@@ -252,7 +257,7 @@ class OpenAssessmentBlock(
"""
ui_models = [UI_MODELS["submission"]]
for assessment in self.rubric_assessments:
for assessment in self.valid_assessments:
ui_model = UI_MODELS[assessment["name"]]
ui_models.append(dict(assessment, **ui_model))
ui_models.append(UI_MODELS["grade"])
......@@ -305,8 +310,25 @@ class OpenAssessmentBlock(
return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
@property
def valid_assessments(self):
"""
Return a list of assessment dictionaries that we recognize.
This allows us to gracefully handle situations in which unrecognized
assessment types are stored in the XBlock field (e.g. because
we roll back code after releasing a feature).
Returns:
list
"""
return [
asmnt for asmnt in self.rubric_assessments
if asmnt.get('name') in VALID_ASSESSMENT_TYPES
]
@property
def assessment_steps(self):
return [asmnt['name'] for asmnt in self.rubric_assessments]
return [asmnt['name'] for asmnt in self.valid_assessments]
def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML
......@@ -392,7 +414,7 @@ class OpenAssessmentBlock(
submission_range = (self.submission_start, self.submission_due)
assessment_ranges = [
(asmnt.get('start'), asmnt.get('due'))
for asmnt in self.rubric_assessments
for asmnt in self.valid_assessments
]
# Resolve unspecified dates and date strings to datetimes
......@@ -463,7 +485,7 @@ class OpenAssessmentBlock(
"must_be_graded_by": 3,
}
"""
for assessment in self.rubric_assessments:
for assessment in self.valid_assessments:
if assessment["name"] == mixin_name:
return assessment
......
......@@ -549,6 +549,252 @@
"must_be_graded_by": 3
}
]
}
},
"student_training_no_examples": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": []
}
]
},
"student_training_one_example": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
},
"student_training_multiple_examples": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"student-training\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\">",
"<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<criterion>",
"<name>Another test criterion</name>",
"<prompt>Another test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Another test criterion",
"prompt": "Another test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "student-training",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
},
{
"criterion": "Another test criterion",
"option": "No"
}
]
},
{
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Another test criterion",
"option": "Yes"
},
{
"criterion": "Test criterion",
"option": "No"
}
]
}
]
}
]
}
}
......@@ -318,5 +318,79 @@
"</rubric>",
"</openassessment>"
]
},
"training_example_missing_answer": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"student-training\">",
"<example>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"training_example_select_missing_criterion": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"student-training\">",
"<example>",
"<answer>Test answer</answer>",
"<select option=\"Yes\" />",
"</example>",
"</assessment>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"training_example_select_missing_option": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"student-training\">",
"<example>",
"<answer>Test answer</answer>",
"<select criterion=\"Test criterion\" />",
"</example>",
"</assessment>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
}
}
......@@ -156,6 +156,19 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertEqual(student_item['course_id'], 'test_course')
self.assertEqual(student_item['student_id'], 'test_student')
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_ignore_unknown_assessment_types(self, xblock):
# If the XBlock contains an unknown assessment type
# (perhaps after a roll-back), it should ignore it.
xblock.rubric_assessments.append({'name': 'unknown'})
# Check that the name is excluded from valid assessments
self.assertNotIn({'name': 'unknown'}, xblock.valid_assessments)
self.assertNotIn('unknown', xblock.assessment_steps)
# Check that we can render the student view without error
self.runtime.render(xblock, 'student_view')
class TestDates(XBlockHandlerTestCase):
......
# -*- coding: utf-8 -*-
"""
Test OpenAssessment XBlock validation.
"""
import copy
from datetime import datetime as dt
import mock
import pytz
import ddt
from django.test import TestCase
from openassessment.xblock.validation import validate_assessments, validate_rubric, validate_dates
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.validation import validator, validate_assessments, validate_rubric, validate_dates
@ddt.ddt
......@@ -33,14 +37,43 @@ class AssessmentValidationTest(TestCase):
# (peer -> self), and (self)
@ddt.file_data('data/assessment_combo.json')
def test_enforce_assessment_combo_restrictions(self, data):
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertEqual(success, data['valid'], msg=msg)
self._assert_validation(
data["assessments"], data["current_assessments"],
data["is_released"], data['valid']
)
@ddt.file_data('data/student_training_combo.json')
def test_student_training_combos(self, data):
self._assert_validation(
data["assessments"], data["current_assessments"],
data["is_released"], data['valid']
)
def _assert_validation(self, assessments, current_assessments, is_released, expected_is_valid):
"""
Check that the validation function gives the expected result.
If there is a validation error, check that the validation error message isn't empty.
Args:
assessments (list): The updated list of assessments
current_assessments (list): The current assessments in the problem definition.
is_released (bool): Whether the problem has been released yet.
expected_is_valid (bool): Whether the inputs should be marked valid or invalid
Returns:
None
Raises:
AssertionError
"""
success, msg = validate_assessments(assessments, current_assessments, is_released)
self.assertEqual(success, expected_is_valid, msg=msg)
if not success:
self.assertGreater(len(msg), 0)
@ddt.ddt
class RubricValidationTest(TestCase):
......@@ -131,3 +164,117 @@ class DateValidationTest(TestCase):
success, _ = validate_dates(valid, valid, [(valid, "invalid")])
self.assertFalse(success)
class ValidationIntegrationTest(TestCase):
"""
Each validation function is combined into a single function
used by the OA XBlock itself.
This tests the combined function, rather than the
individual validation functions.
"""
CRITERION_OPTIONS = [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
RUBRIC = {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How good is the vocabulary?",
"options": CRITERION_OPTIONS
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How good is the grammar?",
"options": CRITERION_OPTIONS
}
]
}
SUBMISSION = {
"due": None
}
ASSESSMENTS = [
{
"name": "student-training",
"start": None,
"due": None,
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "vocabulary",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
},
{
"name": "peer-assessment",
"start": None,
"due": None,
"must_grade": 5,
"must_be_graded_by": 3
}
]
def setUp(self):
"""
Mock the OA XBlock and create a validator function.
"""
self.oa_block = mock.MagicMock(OpenAssessmentBlock)
self.oa_block.is_released.return_value = False
self.oa_block.rubric_assessments.return_value = []
self.oa_block.prompt = ""
self.oa_block.rubric_criteria = []
self.oa_block.start = None
self.oa_block.due = None
self.validator = validator(self.oa_block)
def test_student_training_examples_match_rubric(self):
is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, self.ASSESSMENTS)
self.assertTrue(is_valid, msg=msg)
self.assertEqual(msg, "")
def test_student_training_examples_invalid_criterion(self):
# Mutate the assessment training examples so the criterion names don't match the rubric
mutated_assessments = copy.deepcopy(self.ASSESSMENTS)
mutated_assessments[0]['examples'][0]['options_selected'][0]['criterion'] = 'Invalid criterion!'
# Expect a validation error
is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, mutated_assessments)
self.assertFalse(is_valid)
self.assertEqual(msg, u'Example 1 has an extra option for "Invalid criterion!"\nExample 1 is missing an option for "vocabulary"')
def test_student_training_examples_invalid_option(self):
# Mutate the assessment training examples so the option names don't match the rubric
mutated_assessments = copy.deepcopy(self.ASSESSMENTS)
mutated_assessments[0]['examples'][0]['options_selected'][0]['option'] = 'Invalid option!'
# Expect a validation error
is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, mutated_assessments)
self.assertFalse(is_valid)
self.assertEqual(msg, u'Example 1 has an invalid option for "vocabulary": "Invalid option!"')
# -*- coding: utf-8 -*-
"""
Tests for serializing to/from XML.
"""
......@@ -8,8 +9,8 @@ import lxml.etree as etree
import pytz
import dateutil.parser
from django.test import TestCase
from ddt import ddt, data, file_data, unpack
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock, UI_MODELS
import ddt
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.xml import (
serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
)
......@@ -31,7 +32,7 @@ def _parse_date(value):
return dateutil.parser.parse(value).replace(tzinfo=pytz.utc)
@ddt
@ddt.ddt
class TestSerializeContent(TestCase):
"""
Test serialization of OpenAssessment XBlock content to XML.
......@@ -55,6 +56,22 @@ class TestSerializeContent(TestCase):
BASIC_ASSESSMENTS = [
{
"name": "student-training",
"start": "2014-02-27T09:46:28.873926",
"due": "2014-05-30T00:00:00.92926",
"examples": [
{
"answer": u"ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Maybe"
}
]
}
]
},
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28.873926",
"due": "2014-05-30T00:00:00.92926",
......@@ -65,8 +82,6 @@ class TestSerializeContent(TestCase):
"name": "self-assessment",
"start": '2014-04-01T00:00:00.000000',
"due": "2014-06-01T00:00:00.92926",
"must_grade": 5,
"must_be_graded_by": 3,
}
]
......@@ -76,8 +91,7 @@ class TestSerializeContent(TestCase):
"""
self.oa_block = mock.MagicMock(OpenAssessmentBlock)
@file_data('data/serialize.json')
@ddt.file_data('data/serialize.json')
def test_serialize(self, data):
self.oa_block.title = data['title']
self.oa_block.prompt = data['prompt']
......@@ -112,7 +126,7 @@ class TestSerializeContent(TestCase):
self.assertEqual(
len(actual_elements), len(expected_elements),
msg="Incorrect XML output:\nActual: {}\nExpected: {}".format(actual_elements, expected_elements)
msg="Incorrect XML output:\nActual: {}\nExpected: {}".format(xml, pretty_expected)
)
for actual, expected in zip(actual_elements, expected_elements):
......@@ -151,7 +165,7 @@ class TestSerializeContent(TestCase):
try:
etree.fromstring(xml)
except Exception as ex:
except Exception as ex: # pylint:disable=W0703
msg = "Could not parse mutated criteria dict {criteria}\n{ex}".format(criteria=mutated_dict, ex=ex)
self.fail(msg)
......@@ -170,11 +184,11 @@ class TestSerializeContent(TestCase):
try:
etree.fromstring(xml)
except Exception as ex:
except Exception as ex: # pylint:disable=W0703
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex)
self.fail(msg)
@data("title", "prompt", "start", "due", "submission_due", "submission_start")
@ddt.data("title", "prompt", "start", "due", "submission_due", "submission_start")
def test_mutated_field(self, field):
self.oa_block.rubric_criteria = self.BASIC_CRITERIA
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
......@@ -189,7 +203,7 @@ class TestSerializeContent(TestCase):
try:
etree.fromstring(xml)
except Exception as ex:
except Exception as ex: # pylint:disable=W0703
msg = "Could not parse mutated field {field} with value {value}\n{ex}".format(
field=field, value=mutated_value, ex=ex
)
......@@ -214,7 +228,7 @@ class TestSerializeContent(TestCase):
# Mutation #1: Remove the key
print "== Removing key {}".format(key)
yield {k:v for k,v in input_dict.iteritems() if k != key}
yield {k:v for k, v in input_dict.iteritems() if k != key}
if isinstance(val, dict):
......@@ -287,7 +301,7 @@ class TestSerializeContent(TestCase):
return mutated
@ddt
@ddt.ddt
class TestUpdateFromXml(TestCase):
"""
Test deserialization of OpenAssessment XBlock content from XML.
......@@ -309,7 +323,7 @@ class TestUpdateFromXml(TestCase):
self.oa_block.submission_start = "2000-01-01T00:00:00"
self.oa_block.submission_due = "2000-01-01T00:00:00"
@file_data('data/update_from_xml.json')
@ddt.file_data('data/update_from_xml.json')
def test_update_from_xml(self, data):
# Update the block based on the fixture XML definition
......@@ -328,12 +342,12 @@ class TestUpdateFromXml(TestCase):
self.assertEqual(self.oa_block.rubric_criteria, data['criteria'])
self.assertEqual(self.oa_block.rubric_assessments, data['assessments'])
@file_data('data/update_from_xml_error.json')
@ddt.file_data('data/update_from_xml_error.json')
def test_update_from_xml_error(self, data):
with self.assertRaises(UpdateFromXmlError):
update_from_xml_str(self.oa_block, "".join(data['xml']))
@file_data('data/update_from_xml.json')
@ddt.file_data('data/update_from_xml.json')
def test_invalid(self, data):
# Plug in a rubric validator that always reports that the rubric dict is invalid.
# We need to back this up with an integration test that checks whether the XBlock
......
......@@ -4,6 +4,7 @@ Validate changes to an XBlock before it is updated.
from collections import Counter
from django.utils.translation import ugettext as _
from openassessment.assessment.serializers import rubric_from_dict, InvalidRubric
from openassessment.assessment.api.student_training import validate_training_examples
from openassessment.xblock.resolve_dates import resolve_dates, DateValidationError, InvalidDateFormat
......@@ -43,6 +44,31 @@ def _duplicates(items):
return set(x for x in items if counts[x] > 1)
def _is_valid_assessment_sequence(assessments):
"""
Check whether the sequence of assessments is valid.
For example, we currently allow self-assessment after peer-assessment,
but do not allow peer-assessment before self-assessment.
Args:
assessments (list of dict): List of assessment dictionaries.
Returns:
bool
"""
valid_sequences = [
['self-assessment'],
['peer-assessment'],
['peer-assessment', 'self-assessment'],
['student-training', 'peer-assessment'],
['student-training', 'peer-assessment', 'self-assessment'],
]
sequence = [asmnt.get('name') for asmnt in assessments]
return sequence in valid_sequences
def validate_assessments(assessments, current_assessments, is_released):
"""
Check that the assessment dict is semantically valid.
......@@ -66,29 +92,18 @@ def validate_assessments(assessments, current_assessments, is_released):
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
def _only_peer_or_self(assessments):
return (len(assessments) == 1
and (assessments[0].get('name') == 'self-assessment'
or assessments[0].get('name') == 'peer-assessment'))
def _peer_then_self(assessments):
return (
len(assessments) == 2 and
assessments[0].get('name') == 'peer-assessment' and
assessments[1].get('name') == 'self-assessment'
)
if len(assessments) == 0:
return (False, _("This problem must include at least one assessment."))
# Right now, there are two allowed scenarios: (peer -> self) and (self)
if not (_only_peer_or_self(assessments) or _peer_then_self(assessments)):
return (
False,
_("For this assignment, you can set a peer assessment only, a self "
"assessment only, or a peer assessment followed by a self "
"assessment.")
if not _is_valid_assessment_sequence(assessments):
msg = _(
"For this assignment, you can set a peer assessment only, a self "
"assessment only, or a peer assessment followed by a self "
"assessment. Student training is allowed only immediately before "
"peer assessment."
)
return (False, msg)
for assessment_dict in assessments:
# Number you need to grade is >= the number of people that need to grade you
......@@ -196,6 +211,46 @@ def validate_dates(start, end, date_ranges):
return (True, u'')
def _validate_assessment_examples(rubric_dict, assessments):
"""
Validate assessment training examples.
Args:
rubric_dict (dict): The serialized rubric model.
assessments (list of dict): List of assessment dictionaries.
Returns:
tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
for asmnt in assessments:
if asmnt['name'] == 'student-training':
# Convert of options selected we store in the problem def,
# which is ordered, to the unordered dictionary of options
# selected that the student training API expects.
examples = [
{
'answer': ex['answer'],
'options_selected': {
select_dict['criterion']: select_dict['option']
for select_dict in ex['options_selected']
}
}
for ex in asmnt['examples']
]
# Delegate to the student training API to validate the
# examples against the rubric.
errors = validate_training_examples(rubric_dict, examples)
if errors:
return (False, "\n".join(errors))
return (True, u'')
def validator(oa_block, strict_post_release=True):
"""
Return a validator function configured for the XBlock.
......@@ -213,32 +268,37 @@ def validator(oa_block, strict_post_release=True):
"""
def _inner(rubric_dict, submission_dict, assessments):
is_released = strict_post_release and oa_block.is_released()
# Assessments
current_assessments = oa_block.rubric_assessments
success, msg = validate_assessments(
assessments,
current_assessments,
strict_post_release and oa_block.is_released()
)
success, msg = validate_assessments(assessments, current_assessments, is_released)
if not success:
return (False, msg)
# Rubric
current_rubric = {
'prompt': oa_block.prompt,
'criteria': oa_block.rubric_criteria
}
success, msg = validate_rubric(
rubric_dict, current_rubric,
strict_post_release and oa_block.is_released()
)
success, msg = validate_rubric(rubric_dict, current_rubric, is_released)
if not success:
return (False, msg)
# Training examples
success, msg = _validate_assessment_examples(rubric_dict, assessments)
if not success:
return (False, msg)
# Dates
submission_dates = [(oa_block.start, submission_dict['due'])]
assessment_dates = [(asmnt['start'], asmnt['due']) for asmnt in assessments]
success, msg = validate_dates(oa_block.start, oa_block.due, submission_dates + assessment_dates)
if not success:
return (False, msg)
# Success!
return (True, u'')
return _inner
......@@ -24,7 +24,7 @@ class WorkflowMixin(object):
# standardize.
return [
_convert_rubric_assessment_name(ra["name"])
for ra in self.rubric_assessments
for ra in self.valid_assessments
]
def workflow_requirements(self):
......
......@@ -320,6 +320,48 @@ def _parse_rubric_xml(rubric_root):
return rubric_dict
def _parse_examples_xml(examples):
"""
Parse <example> (training examples) from the XML.
Args:
examples (list of lxml.etree.Element): The <example> elements to parse.
Returns:
list of example dicts
Raises:
UpdateFromXmlError
"""
examples_list = []
for example_el in examples:
example_dict = dict()
# Retrieve the answer from the training example
answer_elements = example_el.findall('answer')
if len(answer_elements) != 1:
raise UpdateFromXmlError(_(u'Each "example" element must contain exactly one "answer" element'))
example_dict['answer'] = _safe_get_text(answer_elements[0])
# Retrieve the options selected from the training example
example_dict['options_selected'] = []
for select_el in example_el.findall('select'):
if 'criterion' not in select_el.attrib:
raise UpdateFromXmlError(_(u'Each "select" element must have a "criterion" attribute'))
if 'option' not in select_el.attrib:
raise UpdateFromXmlError(_(u'Each "select" element must have an "option" attribute'))
example_dict['options_selected'].append({
'criterion': unicode(select_el.get('criterion')),
'option': unicode(select_el.get('option'))
})
examples_list.append(example_dict)
return examples_list
def _parse_assessments_xml(assessments_root):
"""
Parse the <assessments> element in the OpenAssessment XBlock's content XML.
......@@ -331,7 +373,8 @@ def _parse_assessments_xml(assessments_root):
list of assessment dicts
Raises:
InvalidAssessmentsError: Assessment definitions were not semantically valid.
UpdateFromXmlError
"""
assessments_list = []
......@@ -343,7 +386,7 @@ def _parse_assessments_xml(assessments_root):
if 'name' in assessment.attrib:
assessment_dict['name'] = unicode(assessment.get('name'))
else:
raise UpdateFromXmlError(_('All "criterion" and "option" elements must contain a "name" element.'))
raise UpdateFromXmlError(_('All "assessment" elements must contain a "name" element.'))
# Assessment start
if 'start' in assessment.attrib:
......@@ -379,12 +422,50 @@ def _parse_assessments_xml(assessments_root):
except ValueError:
raise UpdateFromXmlError(_('The "must_be_graded_by" value must be a positive integer.'))
# Training examples
examples = assessment.findall('example')
# Student training should always have examples set, even if it's an empty list.
# (Validation rules, applied later, are responsible for
# ensuring that users specify at least one example).
# Other assessment types ignore examples.
# Later, we can add AI assessment here.
if assessment_dict['name'] == 'student-training':
assessment_dict['examples'] = _parse_examples_xml(examples)
# Update the list of assessments
assessments_list.append(assessment_dict)
return assessments_list
def _serialize_training_examples(examples, assessment_el):
"""
Serialize a training example to XML.
Args:
examples (list of dict): List of example dictionaries.
assessment_el (lxml.etree.Element): The <assessment> XML element.
Returns:
None
"""
for example_dict in examples:
example_el = etree.SubElement(assessment_el, 'example')
# Answer provided in the example (default to empty string)
answer_el = etree.SubElement(example_el, 'answer')
answer_el.text = unicode(example_dict.get('answer', ''))
# Options selected from the rubric
options_selected = example_dict.get('options_selected', [])
for selected_dict in options_selected:
select_el = etree.SubElement(example_el, 'select')
select_el.set('criterion', unicode(selected_dict.get('criterion', '')))
select_el.set('option', unicode(selected_dict.get('option', '')))
def serialize_content_to_xml(oa_block, root):
"""
Serialize the OpenAssessment XBlock's content to XML.
......@@ -432,6 +513,12 @@ def serialize_content_to_xml(oa_block, root):
if assessment_dict.get('due') is not None:
assessment.set('due', unicode(assessment_dict['due']))
# Training examples
examples = assessment_dict.get('examples', [])
if not isinstance(examples, list):
examples = []
_serialize_training_examples(examples, assessment)
# Rubric
rubric_root = etree.SubElement(root, 'rubric')
_serialize_rubric(rubric_root, oa_block)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment