Commit f467ab54 by Stephen Sanchez

Updating the API and tests.

parent 5d73dc4b
...@@ -11,10 +11,11 @@ from django.db import DatabaseError ...@@ -11,10 +11,11 @@ from django.db import DatabaseError
from openassessment.peer.models import PeerEvaluation from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer from openassessment.peer.serializers import PeerEvaluationSerializer
from submissions.models import Submission, StudentItem
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
PEER_TYPE = "Peer" PEER_TYPE = "PE"
class PeerEvaluationError(Exception): class PeerEvaluationError(Exception):
...@@ -97,7 +98,7 @@ def create_evaluation(submission_id, scorer_id, assessment_dict, ...@@ -97,7 +98,7 @@ def create_evaluation(submission_id, scorer_id, assessment_dict,
>>> points_possible=12, >>> points_possible=12,
>>> feedback="Your submission was thrilling.", >>> feedback="Your submission was thrilling.",
>>> ) >>> )
>>> create_evaluation("submission_one", "Tim", assessment_dict) >>> create_evaluation("1", "Tim", assessment_dict)
{ {
'points_earned': 6, 'points_earned': 6,
'points_possible': 12, 'points_possible': 12,
...@@ -107,18 +108,19 @@ def create_evaluation(submission_id, scorer_id, assessment_dict, ...@@ -107,18 +108,19 @@ def create_evaluation(submission_id, scorer_id, assessment_dict,
} }
""" """
peer_evaluation = {
"scorer_id": scorer_id,
"submission_id": submission_id,
"points_earned": sum(assessment_dict["points_earned"]),
"points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE,
"feedback": assessment_dict["feedback"],
}
if scored_at:
peer_evaluation["scored_at"] = scored_at
try: try:
submission = Submission.objects.get(pk=submission_id)
peer_evaluation = {
"scorer_id": scorer_id,
"submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]),
"points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE,
"feedback": assessment_dict["feedback"],
}
if scored_at:
peer_evaluation["scored_at"] = scored_at
peer_serializer = PeerEvaluationSerializer(data=peer_evaluation) peer_serializer = PeerEvaluationSerializer(data=peer_evaluation)
if not peer_serializer.is_valid(): if not peer_serializer.is_valid():
raise PeerEvaluationRequestError(peer_serializer.errors) raise PeerEvaluationRequestError(peer_serializer.errors)
...@@ -126,7 +128,7 @@ def create_evaluation(submission_id, scorer_id, assessment_dict, ...@@ -126,7 +128,7 @@ def create_evaluation(submission_id, scorer_id, assessment_dict,
return peer_serializer.data return peer_serializer.data
except DatabaseError: except DatabaseError:
error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format( error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format(
peer_evaluation, assessment_dict,
submission_id, submission_id,
scorer_id scorer_id
) )
...@@ -192,7 +194,7 @@ def get_evaluations(submission_id): ...@@ -192,7 +194,7 @@ def get_evaluations(submission_id):
while retrieving the evaluations associated with this submission. while retrieving the evaluations associated with this submission.
Examples: Examples:
>>> get_evaluations("submission_one") >>> get_evaluations("1")
[ [
{ {
'points_earned': 6, 'points_earned': 6,
...@@ -211,10 +213,20 @@ def get_evaluations(submission_id): ...@@ -211,10 +213,20 @@ def get_evaluations(submission_id):
] ]
""" """
pass try:
submission = Submission.objects.get(pk=submission_id)
evaluations = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerEvaluationSerializer(evaluations, many=True)
return serializer.data
except DatabaseError:
error_message = (
u"Error getting evaluations for submission {}".format(submission_id)
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
def get_submission_to_evaluate(student_item, scorer_id): def get_submission_to_evaluate(student_item_dict):
"""Get a submission to peer evaluate. """Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will Retrieves a submission for evaluation for the given student_item. This will
...@@ -224,7 +236,7 @@ def get_submission_to_evaluate(student_item, scorer_id): ...@@ -224,7 +236,7 @@ def get_submission_to_evaluate(student_item, scorer_id):
submissions from students who are not as active in the evaluation process. submissions from students who are not as active in the evaluation process.
Args: Args:
student_item (dict): student_item_dict (dict):
scorer_id (str): scorer_id (str):
Returns: Returns:
...@@ -239,9 +251,10 @@ def get_submission_to_evaluate(student_item, scorer_id): ...@@ -239,9 +251,10 @@ def get_submission_to_evaluate(student_item, scorer_id):
>>> student_item_dict = dict( >>> student_item_dict = dict(
>>> item_id="item_1", >>> item_id="item_1",
>>> course_id="course_1", >>> course_id="course_1",
>>> item_type="type_one" >>> item_type="type_one",
>>> student_id="Bob",
>>> ) >>> )
>>> get_submission_to_evaluate(student_item_dict, "Bob") >>> get_submission_to_evaluate(student_item_dict)
{ {
'student_item': 2, 'student_item': 2,
'attempt_number': 1, 'attempt_number': 1,
...@@ -253,4 +266,10 @@ def get_submission_to_evaluate(student_item, scorer_id): ...@@ -253,4 +266,10 @@ def get_submission_to_evaluate(student_item, scorer_id):
""" """
pass student_items = StudentItem.objects.filter(
course_id=student_item_dict["course_id"],
item_id=student_item_dict["item_id"],
).exclude(student_id=student_item_dict["student_id"])
# TODO: We need a priority queue.
return Submission.objects.filter(student_item__in=student_items)[:1]
...@@ -6,9 +6,11 @@ be a lot here, like rubrics and such. ...@@ -6,9 +6,11 @@ be a lot here, like rubrics and such.
from django.db import models from django.db import models
from django.utils.timezone import now from django.utils.timezone import now
from submissions.models import Submission
class PeerEvaluation(models.Model): class PeerEvaluation(models.Model):
# submission = models.ForeignKey(Submission) submission = models.ForeignKey(Submission)
points_earned = models.PositiveIntegerField(default=0) points_earned = models.PositiveIntegerField(default=0)
points_possible = models.PositiveIntegerField(default=0) points_possible = models.PositiveIntegerField(default=0)
scored_at = models.DateTimeField(default=now, db_index=True) scored_at = models.DateTimeField(default=now, db_index=True)
......
...@@ -10,6 +10,7 @@ class PeerEvaluationSerializer(serializers.ModelSerializer): ...@@ -10,6 +10,7 @@ class PeerEvaluationSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = PeerEvaluation model = PeerEvaluation
fields = ( fields = (
'submission',
'points_earned', 'points_earned',
'points_possible', 'points_possible',
'scored_at', 'scored_at',
......
from ddt import ddt from ddt import ddt
from django.test import TestCase from django.test import TestCase
from openassessment.peer.api import create_evaluation, get_evaluations
from submissions.api import create_submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
ASSESSMENT_DICT = dict(
points_earned=[1, 0, 3, 2],
points_possible=12,
feedback="Your submission was thrilling.",
)
@ddt @ddt
class TestApi(TestCase): class TestApi(TestCase):
def test_create_evaluation(self): def test_create_evaluation(self):
pass create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = create_evaluation(
"1",
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT
)
self._assert_evaluation(evaluation, **ASSESSMENT_DICT)
def test_get_evaluations(self): def test_get_evaluations(self):
pass create_submission(STUDENT_ITEM, ANSWER_ONE)
create_evaluation("1", STUDENT_ITEM["student_id"], ASSESSMENT_DICT)
evaluations = get_evaluations("1")
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **ASSESSMENT_DICT)
def test_get_submission_to_evaluate(self): def test_get_submission_to_evaluate(self):
pass pass
def test_concurrent_evaluators(self): def test_concurrent_evaluators(self):
pass pass
def _assert_evaluation(self, evaluation, points_earned, points_possible, feedback):
self.assertIsNotNone(evaluation)
self.assertEqual(evaluation["points_earned"], sum(points_earned))
self.assertEqual(evaluation["points_possible"], points_possible)
self.assertEqual(evaluation["feedback"], feedback)
\ No newline at end of file
...@@ -129,7 +129,7 @@ def create_submission(student_item_dict, answer, submitted_at=None, ...@@ -129,7 +129,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
u"Submission answer could not be properly decoded to unicode.") u"Submission answer could not be properly decoded to unicode.")
model_kwargs = { model_kwargs = {
"student_item": student_item_model, "student_item": student_item_model.pk,
"answer": answer, "answer": answer,
"attempt_number": attempt_number, "attempt_number": attempt_number,
} }
...@@ -137,12 +137,7 @@ def create_submission(student_item_dict, answer, submitted_at=None, ...@@ -137,12 +137,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
model_kwargs["submitted_at"] = submitted_at model_kwargs["submitted_at"] = submitted_at
try: try:
# Serializer validation requires the student item primary key, rather submission_serializer = SubmissionSerializer(data=model_kwargs)
# than the student item model itself. Create a copy of the submission
# kwargs and replace the student item model with it's primary key.
validation_data = model_kwargs.copy()
validation_data["student_item"] = student_item_model.pk
submission_serializer = SubmissionSerializer(data=validation_data)
if not submission_serializer.is_valid(): if not submission_serializer.is_valid():
raise SubmissionRequestError(submission_serializer.errors) raise SubmissionRequestError(submission_serializer.errors)
submission_serializer.save() submission_serializer.save()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment