Commit 01fa7f2a by David Ormsbee

Fix broken Peer API tests

parent a6249bc1
......@@ -3,6 +3,7 @@
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?",
"options": [
{
......@@ -27,6 +28,7 @@
},
{
"order_num": 1,
"name": "architecture",
"prompt": "Describe the architecture.",
"options": [
]
......
......@@ -3,10 +3,12 @@
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?"
},
{
"order_num": 1,
"name": "architecture",
"prompt": "Describe the architecture.",
"options": [
{
......
......@@ -15,10 +15,60 @@ from submissions import api as sub_api
from submissions.models import Submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
# Possible points: 14
RUBRIC_DICT = {
"criteria": [
{
"name": "secret",
"prompt": "Did the writer keep it secret?",
"options": [
{"name": "no", "points": "0", "explanation": ""},
{"name": "yes", "points": "1", "explanation": ""},
]
},
{
"name": u"ⓢⓐⓕⓔ",
"prompt": "Did the writer keep it safe?",
"options": [
{"name": "no", "points": "0", "explanation": ""},
{"name": "yes", "points": "1", "explanation": ""},
]
},
{
"name": "giveup",
"prompt": "How willing is the writer to give up the ring?",
"options": [
{
"name": "unwilling",
"points": "0",
"explanation": "Likely to use force to keep it."
},
{
"name": "reluctant",
"points": "3",
"explanation": "May argue, but will give it up voluntarily."
},
{
"name": "eager",
"points": "10",
"explanation": "Happy to give it up."
}
]
},
{
"name": "singing",
"prompt": "Did the writer break into tedious elvish lyrics?",
"options": [
{"name": "no", "points": "2", "explanation": ""},
{"name": "yes", "points": "0", "explanation": ""}
]
},
]
}
# Answers are against RUBRIC_DICT -- this is worth 6 points
ASSESSMENT_DICT = dict(
points_earned=[1, 0, 3, 2],
points_possible=14,
feedback="Your submission was thrilling.",
feedback=u"这是中国",
options_selected={
"secret": "yes",
u"ⓢⓐⓕⓔ": "no",
......@@ -27,56 +77,6 @@ ASSESSMENT_DICT = dict(
}
)
RUBRIC_DICT = dict(
criteria=[
dict(
name="secret",
prompt="Did the writer keep it secret?",
options=[
dict(name="no", points="0", explanation=""),
dict(name="yes", points="1", explanation="")
]
),
dict(
name=u"ⓢⓐⓕⓔ",
prompt="Did the writer keep it safe?",
options=[
dict(name="no", points="0", explanation=""),
dict(name="yes", points="1", explanation="")
]
),
dict(
name="giveup",
prompt="How willing is the writer to give up the ring?",
options=[
dict(
name="unwilling",
points="0",
explanation="Likely to use force to keep it."
),
dict(
name="reluctant",
points="3",
explanation="May argue, but will give it up voluntarily."
),
dict(
name="eager",
points="10",
explanation="Happy to give it up."
)
]
),
dict(
name="singing",
prompt="Did the writer break into tedious elvish lyrics?",
options=[
dict(name="no", points="2", explanation=""),
dict(name="yes", points="0", explanation="")
]
),
]
)
REQUIRED_GRADED = 5
REQUIRED_GRADED_BY = 3
......@@ -88,9 +88,9 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
class TestApi(TestCase):
def test_create_evaluation(self):
def test_create_assessment(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = peer_api.create_assessment(
assessment = peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -98,10 +98,11 @@ class TestApi(TestCase):
ASSESSMENT_DICT,
RUBRIC_DICT,
)
self._assert_evaluation(evaluation, **ASSESSMENT_DICT)
self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
@file_data('valid_assessments.json')
def test_get_assessments(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_assessment(
submission["uuid"],
......@@ -111,12 +112,11 @@ class TestApi(TestCase):
assessment_dict,
RUBRIC_DICT,
)
evaluations = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
assessments = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(assessments))
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
@file_data('valid_assessments.json')
def test_get_assessments_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_assessment(
submission["uuid"],
......@@ -127,12 +127,11 @@ class TestApi(TestCase):
RUBRIC_DICT,
MONDAY
)
evaluations = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
assessments = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(assessments))
self.assertEqual(assessments[0]["scored_at"], MONDAY)
def test_peer_evaluation_workflow(self):
def test_peer_assessment_workflow(self):
tim = self._create_student_and_submission("Tim", "Tim's answer")
bob = self._create_student_and_submission("Bob", "Bob's answer")
sally = self._create_student_and_submission("Sally", "Sally's answer")
......@@ -168,7 +167,7 @@ class TestApi(TestCase):
self.assertTrue(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have
# enough evaluations.
# enough assessments.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
......@@ -186,7 +185,7 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM)
self.assertTrue(scores)
self.assertEqual(6, scores[0]["points_earned"])
self.assertEqual(12, scores[0]["points_possible"])
self.assertEqual(14, scores[0]["points_possible"])
@raises(peer_api.PeerAssessmentRequestError)
......@@ -212,13 +211,10 @@ class TestApi(TestCase):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(peer_api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_assessment(
......@@ -233,7 +229,7 @@ class TestApi(TestCase):
@patch.object(Assessment.objects, 'filter')
@raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter):
def test_error_on_get_assessment(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_assessment(
submission["uuid"],
......@@ -263,12 +259,3 @@ class TestApi(TestCase):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
return sub_api.create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible,
feedback, options_selected):
print evaluation
self.assertIsNotNone(evaluation)
self.assertEqual(evaluation["points_earned"], sum(points_earned))
self.assertEqual(evaluation["points_possible"], points_possible)
# self.assertEqual(evaluation["feedback"], feedback)
{
"unicode_evaluation": {
"points_earned": [10, 0, 24, 36],
"points_possible": 12,
"feedback": "这是中国",
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no"
}
},
"basic_evaluation": {
"points_earned": [1, 0, 3, 2],
"points_possible": 12,
"feedback": "Your submission was thrilling.",
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no"
}
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment