Commit 22732a0f by Will Daly

Merge pull request #365 from edx/will/ai-grading-xml-rules

Will/ai grading xml rules
parents 11a59fd7 548a01c4
{
"student_training_examples_invalid_criterion": {
"rubric": {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How good is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How good is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
}
]
},
"assessments": [
{
"name": "student-training",
"start": null,
"due": null,
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "Invalid criterion!",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
},
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"example_based_no_training_examples": {
"rubric": {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "how good is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "poor",
"explanation": "poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "good",
"explanation": "good job!"
}
]
},
{
"order_num": 1,
"name": "grammar",
"prompt": "how good is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "poor",
"explanation": "poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "good",
"explanation": "good job!"
}
]
}
]
},
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": []
}
]
}
}
...@@ -158,5 +158,38 @@ ...@@ -158,5 +158,38 @@
} }
], ],
"is_released": true "is_released": true
},
"example_based_algorithm_id_is_not_ease": {
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "NOT_EASE",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
],
"current_assessments": null,
"is_released": false
} }
} }
...@@ -3,9 +3,7 @@ ...@@ -3,9 +3,7 @@
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompt": "Test Prompt",
"criteria": [] "criteria": []
}, }
"current_rubric": null,
"is_released": false
}, },
"zero_options": { "zero_options": {
...@@ -19,9 +17,7 @@ ...@@ -19,9 +17,7 @@
"options": [] "options": []
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"negative_points": { "negative_points": {
...@@ -42,9 +38,7 @@ ...@@ -42,9 +38,7 @@
] ]
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"duplicate_criteria_names": { "duplicate_criteria_names": {
...@@ -78,9 +72,7 @@ ...@@ -78,9 +72,7 @@
] ]
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"duplicate_option_names": { "duplicate_option_names": {
...@@ -107,9 +99,7 @@ ...@@ -107,9 +99,7 @@
] ]
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"change_points_after_release": { "change_points_after_release": {
...@@ -396,5 +386,33 @@ ...@@ -396,5 +386,33 @@
] ]
}, },
"is_released": true "is_released": true
},
"example_based_duplicate_option_points": {
"is_example_based": true,
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 2,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
}
} }
} }
...@@ -796,5 +796,175 @@ ...@@ -796,5 +796,175 @@
] ]
} }
] ]
},
"example_based_assessment": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" algorithm_id=\"ease\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "foo",
"prompt": "test prompt",
"start": "2000-01-01t00:00:00",
"due": "3000-01-01t00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "test criterion",
"prompt": "test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "no",
"explanation": "no explanation"
},
{
"order_num": 1,
"points": 2,
"name": "yes",
"explanation": "yes explanation"
}
]
}
],
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
},
"example_based_default_algorithm_id": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "foo",
"prompt": "test prompt",
"start": "2000-01-01t00:00:00",
"due": "3000-01-01t00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "test criterion",
"prompt": "test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "no",
"explanation": "no explanation"
},
{
"order_num": 1,
"points": 2,
"name": "yes",
"explanation": "yes explanation"
}
]
}
],
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
} }
} }
...@@ -392,5 +392,63 @@ ...@@ -392,5 +392,63 @@
"</rubric>", "</rubric>",
"</openassessment>" "</openassessment>"
] ]
},
"example_based_start_date": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" start=\"2020-01-01\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"example_based_due_date": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" due=\"2020-01-01\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
} }
} }
{
"student_training_examples_match_rubric": {
"rubric": {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How good is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How good is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
}
]
},
"assessments": [
{
"name": "student-training",
"start": null,
"due": null,
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "vocabulary",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
},
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"example_based_assessment_matches_rubric": {
"rubric": {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How good is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How good is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
}
]
},
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "vocabulary",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
}
]
}
}
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
"current_assessments": null, "current_assessments": null,
"is_released": false "is_released": false
}, },
"self_only": { "self_only": {
"assessments": [ "assessments": [
{ {
...@@ -22,6 +23,7 @@ ...@@ -22,6 +23,7 @@
"current_assessments": null, "current_assessments": null,
"is_released": false "is_released": false
}, },
"must_be_graded_by_equals_must_grade": { "must_be_graded_by_equals_must_grade": {
"assessments": [ "assessments": [
{ {
...@@ -35,5 +37,38 @@ ...@@ -35,5 +37,38 @@
], ],
"current_assessments": null, "current_assessments": null,
"is_released": false "is_released": false
},
"example_based_algorithm_id_is_ease": {
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
],
"current_assessments": null,
"is_released": false
} }
} }
...@@ -23,9 +23,7 @@ ...@@ -23,9 +23,7 @@
] ]
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"unicode": { "unicode": {
...@@ -52,9 +50,7 @@ ...@@ -52,9 +50,7 @@
] ]
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"change_points_before_release": { "change_points_before_release": {
...@@ -293,10 +289,7 @@ ...@@ -293,10 +289,7 @@
] ]
} }
] ]
}, }
"current_rubric": null,
"is_released": false
}, },
"remove_options_before_release": { "remove_options_before_release": {
......
...@@ -10,7 +10,10 @@ import pytz ...@@ -10,7 +10,10 @@ import pytz
import ddt import ddt
from django.test import TestCase from django.test import TestCase
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from openassessment.xblock.validation import validator, validate_assessments, validate_rubric, validate_dates from openassessment.xblock.validation import (
validator, validate_assessments, validate_rubric,
validate_dates, validate_assessment_examples
)
@ddt.ddt @ddt.ddt
...@@ -79,13 +82,39 @@ class RubricValidationTest(TestCase): ...@@ -79,13 +82,39 @@ class RubricValidationTest(TestCase):
@ddt.file_data('data/valid_rubrics.json') @ddt.file_data('data/valid_rubrics.json')
def test_valid_assessment(self, data): def test_valid_assessment(self, data):
success, msg = validate_rubric(data['rubric'], data['current_rubric'], data['is_released']) current_rubric = data.get('current_rubric')
is_released = data.get('is_released', False)
is_example_based = data.get('is_example_based', False)
success, msg = validate_rubric(
data['rubric'], current_rubric,is_released, is_example_based
)
self.assertTrue(success) self.assertTrue(success)
self.assertEqual(msg, u'') self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_rubrics.json') @ddt.file_data('data/invalid_rubrics.json')
def test_invalid_assessment(self, data): def test_invalid_assessment(self, data):
success, msg = validate_rubric(data['rubric'], data['current_rubric'], data['is_released']) current_rubric = data.get('current_rubric')
is_released = data.get('is_released', False)
is_example_based = data.get('is_example_based', False)
success, msg = validate_rubric(
data['rubric'], current_rubric, is_released, is_example_based
)
self.assertFalse(success)
self.assertGreater(len(msg), 0)
@ddt.ddt
class AssessmentExamplesValidationTest(TestCase):
@ddt.file_data('data/valid_assessment_examples.json')
def test_valid_assessment_examples(self, data):
success, msg = validate_assessment_examples(data['rubric'], data['assessments'])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessment_examples.json')
def test_invalid_assessment_examples(self, data):
success, msg = validate_assessment_examples(data['rubric'], data['assessments'])
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
...@@ -211,26 +240,35 @@ class ValidationIntegrationTest(TestCase): ...@@ -211,26 +240,35 @@ class ValidationIntegrationTest(TestCase):
"due": None "due": None
} }
EXAMPLES = [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "vocabulary",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
ASSESSMENTS = [ ASSESSMENTS = [
{ {
"name": "example-based-assessment",
"start": None,
"due": None,
"examples": EXAMPLES,
"algorithm_id": "ease"
},
{
"name": "student-training", "name": "student-training",
"start": None, "start": None,
"due": None, "due": None,
"examples": [ "examples": EXAMPLES,
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "vocabulary",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
}, },
{ {
"name": "peer-assessment", "name": "peer-assessment",
...@@ -254,7 +292,7 @@ class ValidationIntegrationTest(TestCase): ...@@ -254,7 +292,7 @@ class ValidationIntegrationTest(TestCase):
self.oa_block.due = None self.oa_block.due = None
self.validator = validator(self.oa_block) self.validator = validator(self.oa_block)
def test_student_training_examples_match_rubric(self): def test_validates_successfully(self):
is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, self.ASSESSMENTS) is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, self.ASSESSMENTS)
self.assertTrue(is_valid, msg=msg) self.assertTrue(is_valid, msg=msg)
self.assertEqual(msg, "") self.assertEqual(msg, "")
...@@ -278,3 +316,22 @@ class ValidationIntegrationTest(TestCase): ...@@ -278,3 +316,22 @@ class ValidationIntegrationTest(TestCase):
is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, mutated_assessments) is_valid, msg = self.validator(self.RUBRIC, self.SUBMISSION, mutated_assessments)
self.assertFalse(is_valid) self.assertFalse(is_valid)
self.assertEqual(msg, u'Example 1 has an invalid option for "vocabulary": "Invalid option!"') self.assertEqual(msg, u'Example 1 has an invalid option for "vocabulary": "Invalid option!"')
def test_example_based_assessment_duplicate_point_values(self):
# Mutate the rubric so that two options have the same point value
# for a particular criterion.
# This should cause a validation error with example-based assessment.
mutated_rubric = copy.deepcopy(self.RUBRIC)
for option in mutated_rubric['criteria'][0]['options']:
option['points'] = 1
# Expect a validation error
is_valid, msg = self.validator(mutated_rubric, self.SUBMISSION, self.ASSESSMENTS)
self.assertFalse(is_valid)
self.assertEqual(msg, u'Example-based assessments cannot have duplicate point values.')
# But it should be okay if we don't have example-based assessment
no_example_based = copy.deepcopy(self.ASSESSMENTS)[1:]
is_valid, msg = self.validator(mutated_rubric, self.SUBMISSION, no_example_based)
self.assertTrue(is_valid)
self.assertEqual(msg, u'')
...@@ -64,6 +64,7 @@ def _is_valid_assessment_sequence(assessments): ...@@ -64,6 +64,7 @@ def _is_valid_assessment_sequence(assessments):
['peer-assessment', 'self-assessment'], ['peer-assessment', 'self-assessment'],
['student-training', 'peer-assessment'], ['student-training', 'peer-assessment'],
['student-training', 'peer-assessment', 'self-assessment'], ['student-training', 'peer-assessment', 'self-assessment'],
['example-based-assessment'],
['example-based-assessment', 'self-assessment'], ['example-based-assessment', 'self-assessment'],
['example-based-assessment', 'peer-assessment'], ['example-based-assessment', 'peer-assessment'],
['example-based-assessment', 'peer-assessment', 'self-assessment'], ['example-based-assessment', 'peer-assessment', 'self-assessment'],
...@@ -126,6 +127,12 @@ def validate_assessments(assessments, current_assessments, is_released): ...@@ -126,6 +127,12 @@ def validate_assessments(assessments, current_assessments, is_released):
if must_grade < must_be_graded_by: if must_grade < must_be_graded_by:
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.')) return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
# Example-based assessment MUST specify 'ease' as the algorithm ID,
# at least for now. Later, we may make this more flexible.
if assessment_dict.get('name') == 'example-based-assessment':
if assessment_dict.get('algorithm_id') not in ['ease', 'fake']:
return (False, _('The "algorithm_id" value must be set to "ease" or "fake"'))
if is_released: if is_released:
if len(assessments) != len(current_assessments): if len(assessments) != len(current_assessments):
return (False, _("The number of assessments cannot be changed after the problem has been released.")) return (False, _("The number of assessments cannot be changed after the problem has been released."))
...@@ -138,7 +145,7 @@ def validate_assessments(assessments, current_assessments, is_released): ...@@ -138,7 +145,7 @@ def validate_assessments(assessments, current_assessments, is_released):
return (True, u'') return (True, u'')
def validate_rubric(rubric_dict, current_rubric, is_released): def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
""" """
Check that the rubric is semantically valid. Check that the rubric is semantically valid.
...@@ -146,6 +153,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released): ...@@ -146,6 +153,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
rubric_dict (dict): Serialized Rubric model representing the updated state of the rubric. rubric_dict (dict): Serialized Rubric model representing the updated state of the rubric.
current_rubric (dict): Serialized Rubric model representing the current state of the rubric. current_rubric (dict): Serialized Rubric model representing the current state of the rubric.
is_released (bool): True if and only if the problem has been released. is_released (bool): True if and only if the problem has been released.
is_example_based (bool): True if and only if this is an example-based assessment.
Returns: Returns:
tuple (is_valid, msg) where tuple (is_valid, msg) where
...@@ -160,7 +168,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released): ...@@ -160,7 +168,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
# No duplicate criteria names # No duplicate criteria names
duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']]) duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']])
if len(duplicates) > 0: if len(duplicates) > 0:
msg = u"Criteria duplicate name(s): {duplicates}".format( msg = _(u"Criteria duplicate name(s): {duplicates}").format(
duplicates=", ".join(duplicates) duplicates=", ".join(duplicates)
) )
return (False, msg) return (False, msg)
...@@ -169,28 +177,37 @@ def validate_rubric(rubric_dict, current_rubric, is_released): ...@@ -169,28 +177,37 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
for criterion in rubric_dict['criteria']: for criterion in rubric_dict['criteria']:
duplicates = _duplicates([option['name'] for option in criterion['options']]) duplicates = _duplicates([option['name'] for option in criterion['options']])
if len(duplicates) > 0: if len(duplicates) > 0:
msg = u"Options in '{criterion}' have duplicate name(s): {duplicates}".format( msg = _(u"Options in '{criterion}' have duplicate name(s): {duplicates}").format(
criterion=criterion['name'], duplicates=", ".join(duplicates) criterion=criterion['name'], duplicates=", ".join(duplicates)
) )
return (False, msg) return (False, msg)
# Example-based assessments impose the additional restriction
# that the point values for options must be unique within
# a particular rubric criterion.
if is_example_based:
duplicates = _duplicates([option['points'] for option in criterion['options']])
if len(duplicates) > 0:
msg = _(u"Example-based assessments cannot have duplicate point values.")
return (False, msg)
# After a problem is released, authors are allowed to change text, # After a problem is released, authors are allowed to change text,
# but nothing that would change the point value of a rubric. # but nothing that would change the point value of a rubric.
if is_released: if is_released:
# Number of criteria must be the same # Number of criteria must be the same
if len(rubric_dict['criteria']) != len(current_rubric['criteria']): if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
return (False, u'The number of criteria cannot be changed after a problem is released.') return (False, _(u'The number of criteria cannot be changed after a problem is released.'))
# Number of options for each criterion must be the same # Number of options for each criterion must be the same
for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']): for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']):
if len(new_criterion['options']) != len(old_criterion['options']): if len(new_criterion['options']) != len(old_criterion['options']):
return (False, u'The number of options cannot be changed after a problem is released.') return (False, _(u'The number of options cannot be changed after a problem is released.'))
else: else:
for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']): for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']):
if new_option['points'] != old_option['points']: if new_option['points'] != old_option['points']:
return (False, u'Point values cannot be changed after a problem is released.') return (False, _(u'Point values cannot be changed after a problem is released.'))
return (True, u'') return (True, u'')
...@@ -217,7 +234,7 @@ def validate_dates(start, end, date_ranges): ...@@ -217,7 +234,7 @@ def validate_dates(start, end, date_ranges):
return (True, u'') return (True, u'')
def _validate_assessment_examples(rubric_dict, assessments): def validate_assessment_examples(rubric_dict, assessments):
""" """
Validate assessment training examples. Validate assessment training examples.
...@@ -232,10 +249,14 @@ def _validate_assessment_examples(rubric_dict, assessments): ...@@ -232,10 +249,14 @@ def _validate_assessment_examples(rubric_dict, assessments):
""" """
for asmnt in assessments: for asmnt in assessments:
if asmnt['name'] == 'student-training': if asmnt['name'] == 'student-training' or asmnt['name'] == 'example-based-assessment':
examples = convert_training_examples_list_to_dict(asmnt['examples']) examples = convert_training_examples_list_to_dict(asmnt['examples'])
# Must have at least one training example
if len(examples) == 0:
return False, _(u"Student training and example-based assessments must have at least one training example")
# Delegate to the student training API to validate the # Delegate to the student training API to validate the
# examples against the rubric. # examples against the rubric.
errors = validate_training_examples(rubric_dict, examples) errors = validate_training_examples(rubric_dict, examples)
...@@ -272,16 +293,17 @@ def validator(oa_block, strict_post_release=True): ...@@ -272,16 +293,17 @@ def validator(oa_block, strict_post_release=True):
return (False, msg) return (False, msg)
# Rubric # Rubric
is_example_based = 'example-based-assessment' in [asmnt.get('name') for asmnt in assessments]
current_rubric = { current_rubric = {
'prompt': oa_block.prompt, 'prompt': oa_block.prompt,
'criteria': oa_block.rubric_criteria 'criteria': oa_block.rubric_criteria
} }
success, msg = validate_rubric(rubric_dict, current_rubric, is_released) success, msg = validate_rubric(rubric_dict, current_rubric, is_released, is_example_based)
if not success: if not success:
return (False, msg) return (False, msg)
# Training examples # Training examples
success, msg = _validate_assessment_examples(rubric_dict, assessments) success, msg = validate_assessment_examples(rubric_dict, assessments)
if not success: if not success:
return (False, msg) return (False, msg)
......
...@@ -390,6 +390,11 @@ def _parse_assessments_xml(assessments_root): ...@@ -390,6 +390,11 @@ def _parse_assessments_xml(assessments_root):
# Assessment start # Assessment start
if 'start' in assessment.attrib: if 'start' in assessment.attrib:
# Example-based assessment is NOT allowed to have a start date
if assessment_dict['name'] == 'example-based-assessment':
raise UpdateFromXmlError(_('Example-based assessment cannot have a start date'))
# Other assessment types CAN have a start date
parsed_start = _parse_date(assessment.get('start')) parsed_start = _parse_date(assessment.get('start'))
if parsed_start is not None: if parsed_start is not None:
assessment_dict['start'] = parsed_start assessment_dict['start'] = parsed_start
...@@ -400,6 +405,11 @@ def _parse_assessments_xml(assessments_root): ...@@ -400,6 +405,11 @@ def _parse_assessments_xml(assessments_root):
# Assessment due # Assessment due
if 'due' in assessment.attrib: if 'due' in assessment.attrib:
# Example-based assessment is NOT allowed to have a due date
if assessment_dict['name'] == 'example-based-assessment':
raise UpdateFromXmlError(_('Example-based assessment cannot have a due date'))
# Other assessment types CAN have a due date
parsed_start = _parse_date(assessment.get('due')) parsed_start = _parse_date(assessment.get('due'))
if parsed_start is not None: if parsed_start is not None:
assessment_dict['due'] = parsed_start assessment_dict['due'] = parsed_start
...@@ -434,7 +444,7 @@ def _parse_assessments_xml(assessments_root): ...@@ -434,7 +444,7 @@ def _parse_assessments_xml(assessments_root):
if assessment_dict['name'] == 'example-based-assessment': if assessment_dict['name'] == 'example-based-assessment':
assessment_dict['examples'] = _parse_examples_xml(examples) assessment_dict['examples'] = _parse_examples_xml(examples)
assessment_dict['algorithm_id'] = unicode(assessment.get('algorithm_id')) assessment_dict['algorithm_id'] = unicode(assessment.get('algorithm_id', 'ease'))
# Update the list of assessments # Update the list of assessments
assessments_list.append(assessment_dict) assessments_list.append(assessment_dict)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment