Commit d6012733 by gradyward

Addressed code review comments

parent 0a573a17
......@@ -168,12 +168,12 @@ def create_assessment(
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
_log_assessment(assessment, submission)
except InvalidRubric:
msg = "Invalid rubric definition"
except InvalidRubric as ex:
msg = "Invalid rubric definition: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidRubricSelection:
msg = "Selected options do not match the rubric"
except InvalidRubricSelection as ex:
msg = "Selected options do not match the rubric: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
......
......@@ -809,5 +809,5 @@ class AssessmentPart(models.Model):
missing_criteria = zero_option_criteria_missing_feedback | optioned_criteria_missing_selection
if len(missing_criteria) > 0:
msg = u"Missing selections for criteria: {missing}".format(missing=missing_criteria)
msg = u"Missing selections for criteria: {missing}".format(missing=', '.join(missing_criteria))
raise InvalidRubricSelection(msg)
\ No newline at end of file
{
"No Option Selected, Has Options, No Feedback": {
"has_option_selected": false,
"has_zero_options": false,
"has_feedback": false,
"expected_error": true
},
"No Option Selected, Has Options, Has Feedback": {
"has_option_selected": false,
"has_zero_options": false,
"has_feedback": true,
"expected_error": true
},
"No Option Selected, No Options, No Feedback": {
"has_option_selected": false,
"has_zero_options": true,
"has_feedback": false,
"expected_error": true
},
"No Option Selected, No Options, Has Feedback": {
"has_option_selected": false,
"has_zero_options": true,
"has_feedback": true,
"expected_error": false
},
"Has Option Selected, Has Options, No Feedback": {
"has_option_selected": true,
"has_zero_options": false,
"has_feedback": false,
"expected_error": false
},
"Has Option Selected, No Options, Has Feedback": {
"has_option_selected": true,
"has_zero_options": true,
"has_feedback": true,
"expected_error": true
},
"Has Option Selected, No Options, No Feedback": {
"has_option_selected": true,
"has_zero_options": true,
"has_feedback": false,
"expected_error": true
},
"Has Option Selected, Has Options, Has Feedback": {
"has_option_selected": true,
"has_zero_options": false,
"has_feedback": true,
"expected_error": false
}
}
\ No newline at end of file
......@@ -2,13 +2,16 @@
"""
Tests for the assessment Django models.
"""
import copy
import copy, ddt
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
import mock
from openassessment.assessment.api.self import create_assessment
from submissions.api import create_submission
from openassessment.assessment.errors import SelfAssessmentRequestError
@ddt.ddt
class AssessmentTest(CacheResetTest):
"""
Tests for the `Assessment` and `AssessmentPart` models.
......@@ -148,41 +151,65 @@ class AssessmentTest(CacheResetTest):
criterion['options'] = []
return rubric_from_dict(rubric_dict)
def test_check_all_criteria_assessed(self):
"""
Runs a problem with 8 criterion (representing the 8 permutations of possible needs)
through this validator. Represented as:
A -- Has an option selected for it.
B -- Has Zero Options
C -- Has Feedback given
"""
all_criteria = ['---','--C','-B-','-BC','A--','A-C','AB-','ABC']
selected_criteria = [crit for crit in all_criteria if ('A' in crit)]
zero_option_criteria_names = [crit for crit in all_criteria if ('B' in crit)]
feedback_given_criteria = [crit for crit in all_criteria if ('C' in crit)]
zero_option_criteria = []
for zoc in zero_option_criteria_names:
a = mock.Mock()
a.name = zoc
zero_option_criteria.append(a)
fake_rubric_index = mock.Mock()
fake_rubric_index.find_criteria_without_options = mock.Mock(return_value=zero_option_criteria)
fake_rubric_index.find_missing_criteria = mock.Mock(return_value=(set(all_criteria) - set(selected_criteria)))
expected_not_assessed = {'---','--C','-B-','AB-'}
expected_assessed = set(all_criteria) - expected_not_assessed
@ddt.file_data('data/models_check_criteria_assessed.json')
def test_check_all_criteria_assessed(self, data):
student_item = {
'student_id': u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
'item_id': 'test_item',
'course_id': 'test_course',
'item_type': 'test_type'
}
submission = create_submission(student_item, "Test answer")
rubric, options_selected, criterion_feedback = self._create_data_structures_with_criterion_properties(
has_option_selected=data['has_option_selected'],
has_zero_options=data['has_zero_options'],
has_feedback=data['has_feedback']
)
error = False
try:
AssessmentPart._check_all_criteria_assessed(fake_rubric_index, selected_criteria, feedback_given_criteria)
except InvalidRubricSelection as ex:
for criterion in expected_not_assessed:
self.assertTrue(criterion in str(ex))
for criterion in expected_assessed:
self.assertFalse(criterion in str(ex))
create_assessment(
submission['uuid'], student_item['student_id'], options_selected,
criterion_feedback, "overall feedback", rubric
)
except SelfAssessmentRequestError:
error = True
self.assertTrue(data['expected_error'] == error)
def _create_data_structures_with_criterion_properties(
self,
has_option_selected=True,
has_zero_options=True,
has_feedback=True
):
"""
Generates a dummy set of criterion definition structures that will allow us to specificy a specific combination
of criterion attributes for a test case.
"""
options = []
if not has_zero_options:
options = [{
"name": "Okay",
"points": 1,
"description": "It was okay I guess."
}]
rubric = {
'criteria': [
{
"name": "Quality",
"prompt": "How 'good' was it?",
"options": options
}
]
}
options_selected = {}
if has_option_selected:
options_selected['Quality'] = 'Okay'
criterion_feedback = {}
if has_feedback:
criterion_feedback['Quality'] = "This was an assignment of average quality."
self.assertTrue(error)
\ No newline at end of file
return rubric, options_selected, criterion_feedback
\ No newline at end of file
......@@ -56,8 +56,10 @@ class TestSelfApi(CacheResetTest):
"accuracy": "Like my sister's cutting comments about my weight, I may not have enjoyed the piece, but I cannot fault it for its factual nature."
}
OVERALL_FEEDBACK = (u"Unfortunately, the nature of being is too complex to comment, judge, or discern any one"
u"arbitrary set of things over another.")
OVERALL_FEEDBACK = (
u"Unfortunately, the nature of being is too complex to comment, judge, or discern any one"
u"arbitrary set of things over another."
)
def test_create_assessment(self):
# Initially, there should be no submission or self assessment
......
......@@ -264,7 +264,6 @@ class GradeMixin(object):
for criterion in criteria:
criterion_name = criterion['name']
criterion['peer_feedback'] = peer_criteria_feedback[criterion_name]
if self_criteria_feedback.get(criterion_name, False):
criterion['self_feedback'] = self_criteria_feedback.get(criterion_name)
criterion['self_feedback'] = self_criteria_feedback.get(criterion_name)
return criteria
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment