Commit fdbbe88c by David Ormsbee

Properly create Assessment Parts based on selected options.

parent dddf9194
...@@ -11,7 +11,9 @@ import math ...@@ -11,7 +11,9 @@ import math
from django.db import DatabaseError from django.db import DatabaseError
from openassessment.peer.models import Assessment from openassessment.peer.models import Assessment
from openassessment.peer.serializers import AssessmentSerializer from openassessment.peer.serializers import (
AssessmentSerializer, RubricSerializer, rubric_from_dict
)
from submissions import api as submission_api from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer
...@@ -70,7 +72,7 @@ def create_assessment( ...@@ -70,7 +72,7 @@ def create_assessment(
required_assessments_for_student, required_assessments_for_student,
required_assessments_for_submission, required_assessments_for_submission,
assessment_dict, assessment_dict,
# rubric_dict, rubric_dict,
scored_at=None): scored_at=None):
"""Creates an assessment on the given submission. """Creates an assessment on the given submission.
...@@ -123,18 +125,22 @@ def create_assessment( ...@@ -123,18 +125,22 @@ def create_assessment(
""" """
try: try:
submission = Submission.objects.get(uuid=submission_uuid) submission = Submission.objects.get(uuid=submission_uuid)
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(assessment_dict["options_selected"])
peer_assessment = { peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id, "scorer_id": scorer_id,
"submission": submission.pk, "submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]), "points_earned": sum(assessment_dict["points_earned"]),
"points_possible": assessment_dict["points_possible"], "points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE, "score_type": PEER_TYPE,
"feedback": assessment_dict["feedback"], "parts": [{"option": option_id} for option_id in option=_ids]
} }
if scored_at: if scored_at:
peer_assessment["scored_at"] = scored_at peer_assessment["scored_at"] = scored_at
peer_serializer = AssessmentSerializer(data=peer_evaluation) peer_serializer = AssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid(): if not peer_serializer.is_valid():
raise PeerAssessmentRequestError(peer_serializer.errors) raise PeerAssessmentRequestError(peer_serializer.errors)
......
...@@ -42,7 +42,8 @@ class Rubric(models.Model): ...@@ -42,7 +42,8 @@ class Rubric(models.Model):
@property @property
def points_possible(self): def points_possible(self):
return sum(crit.points_possible for crit in self.criteria.all()) criteria_points = [crit.points_possible for crit in self.criteria.all()]
return sum(criteria_points) if criteria_points else 0
@staticmethod @staticmethod
def content_hash_for_rubric_dict(rubric_dict): def content_hash_for_rubric_dict(rubric_dict):
...@@ -59,10 +60,29 @@ class Rubric(models.Model): ...@@ -59,10 +60,29 @@ class Rubric(models.Model):
return sha1(canonical_form).hexdigest() return sha1(canonical_form).hexdigest()
def options_ids(self, crit_to_opt_names):
"""
"""
# Cache this
crit_to_all_opts = {
crit.name : {
option.name: option.id for option in crit.options.all()
}
for crit in self.criteria.all()
}
return [
crit_to_all_opts[crit][opt]
for crit, opt in crit_to_opt_names.items()
]
class Criterion(models.Model): class Criterion(models.Model):
# All Rubrics have at least one Criterion # All Rubrics have at least one Criterion
rubric = models.ForeignKey(Rubric, related_name="criteria") rubric = models.ForeignKey(Rubric, related_name="criteria")
name = models.CharField(max_length=100, blank=False)
# 0-based order in the Rubric # 0-based order in the Rubric
order_num = models.PositiveIntegerField() order_num = models.PositiveIntegerField()
...@@ -128,15 +148,22 @@ class Assessment(models.Model): ...@@ -128,15 +148,22 @@ class Assessment(models.Model):
@property @property
def points_earned(self): def points_earned(self):
return sum(part.points_earned for part in self.parts.all()) parts = [part.points_earned for part in self.parts.all()]
return sum(parts) if parts else 0
@property @property
def points_possible(self): def points_possible(self):
return self.rubric.points_possible return self.rubric.points_possible
@property
def submission_uuid(self):
return self.submission.uuid
class AssessmentPart(models.Model): class AssessmentPart(models.Model):
assessment = models.ForeignKey(Assessment, related_name='parts') assessment = models.ForeignKey(Assessment, related_name='parts')
# criterion = models.ForeignKey(Criterion)
option = models.ForeignKey(CriterionOption) # TODO: no reverse option = models.ForeignKey(CriterionOption) # TODO: no reverse
@property @property
......
...@@ -63,7 +63,7 @@ class CriterionSerializer(NestedModelSerializer): ...@@ -63,7 +63,7 @@ class CriterionSerializer(NestedModelSerializer):
class Meta: class Meta:
model = Criterion model = Criterion
fields = ('order_num', 'prompt', 'options') fields = ('order_num', 'name', 'prompt', 'options')
def validate_options(self, attrs, source): def validate_options(self, attrs, source):
...@@ -77,10 +77,11 @@ class CriterionSerializer(NestedModelSerializer): ...@@ -77,10 +77,11 @@ class CriterionSerializer(NestedModelSerializer):
class RubricSerializer(NestedModelSerializer): class RubricSerializer(NestedModelSerializer):
criteria = CriterionSerializer(required=True, many=True) criteria = CriterionSerializer(required=True, many=True)
points_possible = serializers.Field(source='points_possible')
class Meta: class Meta:
model = Rubric model = Rubric
fields = ('id', 'content_hash', 'criteria') fields = ('id', 'content_hash', 'criteria', 'points_possible')
def validate_criteria(self, attrs, source): def validate_criteria(self, attrs, source):
...@@ -103,19 +104,39 @@ class RubricSerializer(NestedModelSerializer): ...@@ -103,19 +104,39 @@ class RubricSerializer(NestedModelSerializer):
class AssessmentPartSerializer(serializers.ModelSerializer): class AssessmentPartSerializer(serializers.ModelSerializer):
option = CriterionOptionSerializer() # criterion = CriterionSerializer()
# option = CriterionOptionSerializer()
class Meta: class Meta:
model = AssessmentPart model = AssessmentPart
# fields = ('criterion', 'option')
fields = ('option',) fields = ('option',)
class AssessmentSerializer(serializers.ModelSerializer): class AssessmentSerializer(serializers.ModelSerializer):
submission_uuid = serializers.Field(source='submission_uuid')
parts = AssessmentPartSerializer(required=True, many=True) parts = AssessmentPartSerializer(required=True, many=True)
points_earned = serializers.Field(source='points_earned')
points_possible = serializers.Field(source='points_possible')
class Meta: class Meta:
model = Assessment model = Assessment
fields = ('submission', 'rubric', 'scored_at', 'scorer_id', 'score_type') fields = (
'submission', # will go away shortly
'rubric',
'scored_at',
'scorer_id',
'score_type',
# Foreign Key
'parts',
# Computed, not part of the model
'submission_uuid',
'points_earned',
'points_possible',
)
...@@ -133,6 +154,11 @@ def rubric_from_dict(rubric_dict): ...@@ -133,6 +154,11 @@ def rubric_from_dict(rubric_dict):
rubric = Rubric.objects.get(content_hash=content_hash) rubric = Rubric.objects.get(content_hash=content_hash)
except Rubric.DoesNotExist: except Rubric.DoesNotExist:
rubric_dict["content_hash"] = content_hash rubric_dict["content_hash"] = content_hash
for crit_idx, criterion in enumerate(rubric_dict["criteria"]):
criterion["order_num"] = crit_idx
for opt_idx, option in enumerate(criterion["options"]):
option["order_num"] = opt_idx
rubric_serializer = RubricSerializer(data=rubric_dict) rubric_serializer = RubricSerializer(data=rubric_dict)
if not rubric_serializer.is_valid(): if not rubric_serializer.is_valid():
raise InvalidRubric(rubric_serializer.errors) raise InvalidRubric(rubric_serializer.errors)
......
...@@ -15,8 +15,64 @@ from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE ...@@ -15,8 +15,64 @@ from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
ASSESSMENT_DICT = dict( ASSESSMENT_DICT = dict(
points_earned=[1, 0, 3, 2], points_earned=[1, 0, 3, 2],
points_possible=12, points_possible=14,
feedback="Your submission was thrilling.", feedback="Your submission was thrilling.",
options_selected={
"secret": "yes",
"safe": "no",
"giveup": "reluctant",
"singing": "no",
}
)
RUBRIC_DICT = dict(
criteria=[
dict(
name="secret",
prompt="Did the writer keep it secret?",
options=[
dict(name="no", points="0", explanation=""),
dict(name="yes", points="1", explanation="")
]
),
dict(
name="safe",
prompt="Did the writer keep it safe?",
options=[
dict(name="no", points="0", explanation=""),
dict(name="yes", points="1", explanation="")
]
),
dict(
name="giveup",
prompt="How willing is the writer to give up the ring?",
options=[
dict(
name="unwilling",
points="0",
explanation="Likely to use force to keep it."
),
dict(
name="reluctant",
points="3",
explanation="May argue, but will give it up voluntarily."
),
dict(
name="eager",
points="10",
explanation="Happy to give it up."
)
]
),
dict(
name="singing",
prompt="Did the writer break into tedious elvish lyrics?",
options=[
dict(name="no", points="2", explanation=""),
dict(name="yes", points="0", explanation="")
]
),
]
) )
REQUIRED_GRADED = 5 REQUIRED_GRADED = 5
...@@ -37,7 +93,8 @@ class TestApi(TestCase): ...@@ -37,7 +93,8 @@ class TestApi(TestCase):
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
ASSESSMENT_DICT ASSESSMENT_DICT,
RUBRIC_DICT,
) )
self._assert_evaluation(evaluation, **ASSESSMENT_DICT) self._assert_evaluation(evaluation, **ASSESSMENT_DICT)
...@@ -49,7 +106,8 @@ class TestApi(TestCase): ...@@ -49,7 +106,8 @@ class TestApi(TestCase):
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
assessment_dict assessment_dict,
RUBRIC_DICT,
) )
evaluations = api.get_assessments(submission["uuid"]) evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
...@@ -64,6 +122,7 @@ class TestApi(TestCase): ...@@ -64,6 +122,7 @@ class TestApi(TestCase):
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
assessment_dict, assessment_dict,
RUBRIC_DICT,
MONDAY MONDAY
) )
evaluations = api.get_assessments(submission["uuid"]) evaluations = api.get_assessments(submission["uuid"])
...@@ -86,22 +145,23 @@ class TestApi(TestCase): ...@@ -86,22 +145,23 @@ class TestApi(TestCase):
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment( api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
api.create_assessment( api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment( api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment( api.create_evaluation(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment( api.create_evaluation(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
...@@ -111,13 +171,13 @@ class TestApi(TestCase): ...@@ -111,13 +171,13 @@ class TestApi(TestCase):
self.assertFalse(scores) self.assertFalse(scores)
api.create_assessment( api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
api.create_assessment( api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
api.create_assessment( api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
# Tim has met the critera, and should now have a score. # Tim has met the critera, and should now have a score.
...@@ -165,6 +225,7 @@ class TestApi(TestCase): ...@@ -165,6 +225,7 @@ class TestApi(TestCase):
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT,
MONDAY MONDAY
) )
...@@ -178,6 +239,7 @@ class TestApi(TestCase): ...@@ -178,6 +239,7 @@ class TestApi(TestCase):
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT,
MONDAY MONDAY
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
...@@ -201,8 +263,10 @@ class TestApi(TestCase): ...@@ -201,8 +263,10 @@ class TestApi(TestCase):
return sub_api.create_submission(new_student_item, answer, date) return sub_api.create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible, def _assert_evaluation(self, evaluation, points_earned, points_possible,
feedback): feedback, options_selected):
print evaluation
self.assertIsNotNone(evaluation) self.assertIsNotNone(evaluation)
self.assertEqual(evaluation["points_earned"], sum(points_earned)) self.assertEqual(evaluation["points_earned"], sum(points_earned))
self.assertEqual(evaluation["points_possible"], points_possible) self.assertEqual(evaluation["points_possible"], points_possible)
self.assertEqual(evaluation["feedback"], feedback) # self.assertEqual(evaluation["feedback"], feedback)
...@@ -122,7 +122,6 @@ DEFAULT_ASSESSMENT_MODULES = [ ...@@ -122,7 +122,6 @@ DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT, DEFAULT_PEER_ASSESSMENT,
] ]
def load(path): def load(path):
"""Handy helper for getting resources from our kit.""" """Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path) data = pkg_resources.resource_string(__name__, path)
...@@ -224,6 +223,21 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -224,6 +223,21 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
"grade_state": grade_state, "grade_state": grade_state,
} }
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
student_item_dict["student_id"],
int(peer_eval["must_grade"]),
int(peer_eval["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
template = get_template("static/html/oa_base.html") template = get_template("static/html/oa_base.html")
context = Context(context_dict) context = Context(context_dict)
frag = Fragment(template.render(context)) frag = Fragment(template.render(context))
......
...@@ -48,20 +48,27 @@ class ScenarioParser(object): ...@@ -48,20 +48,27 @@ class ScenarioParser(object):
</rubric>""" </rubric>"""
rubric_criteria = [] rubric_criteria = []
for criterion in e: for criterion in e:
crit = {'name': criterion.attrib.get('name', ''), crit = {
'instructions': criterion.text.strip(), 'name': criterion.attrib.get('name', ''),
'total_value': 0, 'prompt': criterion.text.strip(),
'options': [], 'options': [],
} }
for option in criterion: for option in criterion:
explanations = option.getchildren() explanations = option.getchildren()
if explanations and len(explanations) == 1 and explanations[0].tag == 'explain': if explanations and len(explanations) == 1 and explanations[0].tag == 'explain':
explanation = explanations[0].text.strip() explanation = explanations[0].text.strip()
else: else:
explanation = '' explanation = ''
crit['options'].append((option.attrib['val'], option.text.strip(), explanation))
crit['total_value'] = max(int(x[0]) for x in crit['options']) crit['options'].append(
{
'name': option.text.strip(),
'points': int(option.attrib['val']),
'explanation': explanation,
}
)
rubric_criteria.append(crit) rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria) return (e.text.strip(), rubric_criteria)
def get_assessments(self, assessments): def get_assessments(self, assessments):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment