Commit 7efd0f18 by Stephen Sanchez

Updating the APIs with additional tests.

parent f467ab54
......@@ -12,6 +12,7 @@ from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer
from submissions.models import Submission, StudentItem
from submissions.serializers import SubmissionSerializer
logger = logging.getLogger(__name__)
......@@ -166,7 +167,8 @@ def has_finished_required_evaluating(student_id, required_evaluations):
"""
if required_evaluations < 0:
raise PeerEvaluationRequestError("Required Evaluation count must be a positive integer.")
raise PeerEvaluationRequestError(
"Required Evaluation count must be a positive integer.")
return PeerEvaluation.objects.filter(
scorer_id=student_id
).count() >= required_evaluations
......@@ -272,4 +274,11 @@ def get_submission_to_evaluate(student_item_dict):
).exclude(student_id=student_item_dict["student_id"])
# TODO: We need a priority queue.
return Submission.objects.filter(student_item__in=student_items)[:1]
submission = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at",
"attempt_number")[:1]
if not submission:
raise PeerEvaluationWorkflowError(
"There are no submissions available for evaluation."
)
return SubmissionSerializer(submission[0]).data
......@@ -17,3 +17,17 @@ class PeerEvaluation(models.Model):
scorer_id = models.CharField(max_length=255, db_index=True)
score_type = models.CharField(max_length=2)
feedback = models.TextField(max_length=10000, default="")
def __repr__(self):
return repr(dict(
submission=self.submission,
points_earned=self.points_earned,
points_possible=self.points_possible,
scored_at=self.scored_at,
scorer_id=self.scorer_id,
score_type=self.score_type,
feedback=self.feedback,
))
class Meta:
ordering = ["-scored_at"]
from ddt import ddt
import pytz
import datetime
from django.test import TestCase
from openassessment.peer.api import create_evaluation, get_evaluations
from nose.tools import raises
from openassessment.peer.api import create_evaluation, get_evaluations, has_finished_required_evaluating, PeerEvaluationRequestError, get_submission_to_evaluate
from submissions.api import create_submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
......@@ -29,11 +32,44 @@ class TestApi(TestCase):
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **ASSESSMENT_DICT)
def test_student_finished_evaluating(self):
self._create_student_and_submission("Bob", "Bob's answer")
self._create_student_and_submission("Sally", "Sally's answer")
self._create_student_and_submission("Jim", "Jim's answer")
self.assertFalse(has_finished_required_evaluating("Tim", 3))
create_evaluation("1", "Tim", ASSESSMENT_DICT)
create_evaluation("2", "Tim", ASSESSMENT_DICT)
self.assertFalse(has_finished_required_evaluating("Tim", 3))
create_evaluation("3", "Tim", ASSESSMENT_DICT)
self.assertTrue(has_finished_required_evaluating("Tim", 3))
@raises(PeerEvaluationRequestError)
def test_bad_configuration(self):
has_finished_required_evaluating("Tim", -1)
def test_get_submission_to_evaluate(self):
pass
monday = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
tuesday = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
wednesday = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
thursday = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
self._create_student_and_submission("Tim", "Tim's answer", monday)
self._create_student_and_submission("Bob", "Bob's answer", tuesday)
self._create_student_and_submission("Sally", "Sally's answer", wednesday)
self._create_student_and_submission("Jim", "Jim's answer", thursday)
submission = get_submission_to_evaluate(STUDENT_ITEM)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
def test_concurrent_evaluators(self):
pass
@staticmethod
def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible, feedback):
self.assertIsNotNone(evaluation)
......
......@@ -9,5 +9,6 @@ coverage==3.7.1
lettuce==0.2.19
pep8==1.4.6
pylint<1.0
pytz==2013.9
sure==1.2.3
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment