Commit 807710f2 by Stephen Sanchez

Some changes to the API based on review comments

parent 413ce1de
......@@ -8,6 +8,7 @@ import copy
import logging
from django.db import DatabaseError
import math
from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer
......@@ -194,7 +195,7 @@ def _check_if_finished_and_create_score(student_item,
student_item.student_id,
required_evaluations_for_student
)
evaluations = PeerEvaluation.objects.filter(submission=submission).order_by("-points_earned")
evaluations = PeerEvaluation.objects.filter(submission=submission)
submission_finished = evaluations.count() >= required_evaluations_for_submission
scores = []
for evaluation in evaluations:
......@@ -217,12 +218,14 @@ def _calculate_final_score(scores):
"""
total_scores = len(scores)
scores = sorted(scores)
median = int(math.ceil(total_scores / float(2)))
if total_scores == 0:
return 0
elif total_scores % 2:
return scores[total_scores / 2]
return scores[median-1]
else:
return (scores[total_scores / 2 - 1] + scores[total_scores / 2]) / 2
return int(math.ceil(sum(scores[median-1:median+1])/float(2)))
def has_finished_required_evaluating(student_id, required_evaluations):
......@@ -352,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_evaluate(student_item_dict)
>>> get_submission_to_evaluate(student_item_dict, 3)
{
'student_item': 2,
'attempt_number': 1,
......
......@@ -9,7 +9,7 @@ from mock import patch
from openassessment.peer import api
from openassessment.peer.models import PeerEvaluation
from submissions.api import create_submission, SubmissionInternalError
from submissions import api as sub_api
from submissions.models import Submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
......@@ -31,7 +31,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
class TestApi(TestCase):
def test_create_evaluation(self):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
......@@ -43,7 +43,7 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
......@@ -57,7 +57,7 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
......@@ -71,36 +71,61 @@ class TestApi(TestCase):
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
def test_student_finished_evaluating(self):
bob = self._create_student_and_submission("Tim", "Tim's answer")
def test_peer_evaluation_workflow(self):
tim = self._create_student_and_submission("Tim", "Tim's answer")
bob = self._create_student_and_submission("Bob", "Bob's answer")
sally = self._create_student_and_submission("Sally", "Sally's answer")
jim = self._create_student_and_submission("Jim", "Jim's answer")
buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander = self._create_student_and_submission("Xander", "Xander's answer")
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
# Tim should not have a score, because he has not evaluated enough
# peer submissions.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
bob["uuid"],
"Tim",
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
sally["uuid"],
"Tim",
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
jim["uuid"],
"Tim",
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", 3))
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have
# enough evaluations.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
api.create_evaluation(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
# Tim has met the critera, and should now have a score.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertTrue(scores)
self.assertEqual(6, scores[0]["points_earned"])
self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerEvaluationRequestError)
def test_bad_configuration(self):
......@@ -133,7 +158,7 @@ class TestApi(TestCase):
@raises(api.PeerEvaluationInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
......@@ -144,9 +169,9 @@ class TestApi(TestCase):
)
@patch.object(PeerEvaluation.objects, 'filter')
@raises(SubmissionInternalError)
@raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
......@@ -162,16 +187,18 @@ class TestApi(TestCase):
self.assertEqual(0, api._calculate_final_score([]))
self.assertEqual(5, api._calculate_final_score([5]))
# average of 5, 6, rounded down.
self.assertEqual(5, api._calculate_final_score([5, 6]))
self.assertEqual(6, api._calculate_final_score([5, 6]))
self.assertEqual(14, api._calculate_final_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(14, api._calculate_final_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(16, api._calculate_final_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, api._calculate_final_score([16, 6, 12, 102, 22, 53, 5]))
@staticmethod
def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
return create_submission(new_student_item, answer, date)
return sub_api.create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible,
feedback):
......
......@@ -107,7 +107,6 @@ MIDDLEWARE_CLASSES = (
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware', # Needed to enable shell-on-crash behavior
)
ROOT_URLCONF = 'urls'
......@@ -131,7 +130,6 @@ INSTALLED_APPS = (
# Third party
'django_extensions',
'django_pdb', # Allows post-mortem debugging on exceptions
# XBlock
'workbench',
......
"""
Dev-specific Django settings.
"""
# Inherit from base settings
from .base import *
MIDDLEWARE_CLASSES += (
'django_pdb.middleware.PdbMiddleware', # Needed to enable shell-on-crash behavior
)
INSTALLED_APPS += (
'django_pdb', # Allows post-mortem debugging on exceptions
)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment