Commit 58733b0d by Stephen Sanchez

Added a few additional tests.

Added some additional documentation. Some quick cleanup.
parent 7efd0f18
......@@ -238,14 +238,20 @@ def get_submission_to_evaluate(student_item_dict):
submissions from students who are not as active in the evaluation process.
Args:
student_item_dict (dict):
scorer_id (str):
student_item_dict (dict): The student item information from the student
requesting a submission for evaluation. The dict contains an
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
Returns:
dict:
dict: A peer submission for evaluation. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for evaluation.
Raises:
PeerEvaluationRequestError:
PeerEvaluationRequestError: Raised when the request parameters are
invalid for the request.
PeerEvaluationInternalError:
PeerEvaluationWorkflowError:
......@@ -276,7 +282,7 @@ def get_submission_to_evaluate(student_item_dict):
# TODO: We need a priority queue.
submission = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at",
"attempt_number")[:1]
"-attempt_number")[:1]
if not submission:
raise PeerEvaluationWorkflowError(
"There are no submissions available for evaluation."
......
from ddt import ddt
from ddt import ddt, file_data
from django.db import DatabaseError
import pytz
import datetime
from django.test import TestCase
from nose.tools import raises
from openassessment.peer.api import create_evaluation, get_evaluations, has_finished_required_evaluating, PeerEvaluationRequestError, get_submission_to_evaluate
from mock import patch
from openassessment.peer import api
from openassessment.peer.models import PeerEvaluation
from submissions.api import create_submission
from submissions.models import Submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
ASSESSMENT_DICT = dict(
......@@ -13,65 +19,106 @@ ASSESSMENT_DICT = dict(
feedback="Your submission was thrilling.",
)
MONDAY = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
class TestApi(TestCase):
def test_create_evaluation(self):
create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = create_evaluation(
evaluation = api.create_evaluation(
"1",
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT
)
self._assert_evaluation(evaluation, **ASSESSMENT_DICT)
def test_get_evaluations(self):
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
create_submission(STUDENT_ITEM, ANSWER_ONE)
create_evaluation("1", STUDENT_ITEM["student_id"], ASSESSMENT_DICT)
evaluations = get_evaluations("1")
api.create_evaluation("1", STUDENT_ITEM["student_id"], assessment_dict)
evaluations = api.get_evaluations("1")
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
"1", STUDENT_ITEM["student_id"], assessment_dict, MONDAY
)
evaluations = api.get_evaluations("1")
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **ASSESSMENT_DICT)
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
def test_student_finished_evaluating(self):
self._create_student_and_submission("Bob", "Bob's answer")
self._create_student_and_submission("Sally", "Sally's answer")
self._create_student_and_submission("Jim", "Jim's answer")
self.assertFalse(has_finished_required_evaluating("Tim", 3))
create_evaluation("1", "Tim", ASSESSMENT_DICT)
create_evaluation("2", "Tim", ASSESSMENT_DICT)
self.assertFalse(has_finished_required_evaluating("Tim", 3))
create_evaluation("3", "Tim", ASSESSMENT_DICT)
self.assertTrue(has_finished_required_evaluating("Tim", 3))
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
api.create_evaluation("1", "Tim", ASSESSMENT_DICT)
api.create_evaluation("2", "Tim", ASSESSMENT_DICT)
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
api.create_evaluation("3", "Tim", ASSESSMENT_DICT)
self.assertTrue(api.has_finished_required_evaluating("Tim", 3))
@raises(PeerEvaluationRequestError)
@raises(api.PeerEvaluationRequestError)
def test_bad_configuration(self):
has_finished_required_evaluating("Tim", -1)
api.has_finished_required_evaluating("Tim", -1)
def test_get_submission_to_evaluate(self):
monday = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
tuesday = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
wednesday = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
thursday = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
self._create_student_and_submission("Bob", "Bob's answer", TUESDAY)
self._create_student_and_submission("Sally", "Sally's answer",
WEDNESDAY)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
self._create_student_and_submission("Tim", "Tim's answer", monday)
self._create_student_and_submission("Bob", "Bob's answer", tuesday)
self._create_student_and_submission("Sally", "Sally's answer", wednesday)
self._create_student_and_submission("Jim", "Jim's answer", thursday)
submission = get_submission_to_evaluate(STUDENT_ITEM)
submission = api.get_submission_to_evaluate(STUDENT_ITEM)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
"1", STUDENT_ITEM["student_id"], ASSESSMENT_DICT, MONDAY
)
@patch.object(PeerEvaluation.objects, 'filter')
@raises(api.PeerEvaluationInternalError)
def test_error_on_get_evaluation(self, mock_filter):
create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation("1", STUDENT_ITEM["student_id"], ASSESSMENT_DICT,
MONDAY)
mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations("1")
@staticmethod
def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible, feedback):
def _assert_evaluation(self, evaluation, points_earned, points_possible,
feedback):
self.assertIsNotNone(evaluation)
self.assertEqual(evaluation["points_earned"], sum(points_earned))
self.assertEqual(evaluation["points_possible"], points_possible)
......
{
"unicode_evaluation": {
"points_earned": [10, 0, 24, 36],
"points_possible": 12,
"feedback": "这是中国"
},
"basic_evaluation": {
"points_earned": [1, 0, 3, 2],
"points_possible": 12,
"feedback": "Your submission was thrilling."
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment