Commit d504cf67 by Stephen Sanchez

Fix for TIM-449. Do not use Peer UUID

Conflicts:
	apps/openassessment/assessment/peer_api.py
parent 60ac0ee2
...@@ -138,7 +138,7 @@ def get_score(submission_uuid, requirements): ...@@ -138,7 +138,7 @@ def get_score(submission_uuid, requirements):
def create_assessment( def create_assessment(
submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
...@@ -150,9 +150,9 @@ def create_assessment( ...@@ -150,9 +150,9 @@ def create_assessment(
rubric. rubric.
Args: Args:
submission_uuid (str): The submission uuid this assessment is associated scorer_submission_uuid (str): The submission uuid for the Scorer's
with. The submission uuid is required and must already exist in the workflow. The submission being assessed can be determined via the
Submission model. peer workflow of the grading student.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
...@@ -185,7 +185,6 @@ def create_assessment( ...@@ -185,7 +185,6 @@ def create_assessment(
>>> create_assessment("1", "Tim", assessment_dict, rubric_dict) >>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
""" """
try: try:
submission = sub_api.get_submission_and_student(submission_uuid)
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
# Validate that the selected options matched the rubric # Validate that the selected options matched the rubric
...@@ -196,11 +195,26 @@ def create_assessment( ...@@ -196,11 +195,26 @@ def create_assessment(
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message) msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
open_items = list(scorer_workflow.graded.filter(
assessment__isnull=True).order_by("-started_at", "-id")[:1])
if not open_items:
message = _(
u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid)
)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
item = open_items[0]
feedback = assessment_dict.get('feedback', u'') feedback = assessment_dict.get('feedback', u'')
peer_assessment = { peer_assessment = {
"rubric": rubric.id, "rubric": rubric.id,
"scorer_id": scorer_id, "scorer_id": scorer_id,
"submission_uuid": submission_uuid, "submission_uuid": item.submission_uuid,
"score_type": PEER_TYPE, "score_type": PEER_TYPE,
"feedback": feedback, "feedback": feedback,
} }
...@@ -220,30 +234,26 @@ def create_assessment( ...@@ -220,30 +234,26 @@ def create_assessment(
# option to do validation. We already validated these options above. # option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids) AssessmentPart.add_to_assessment(assessment, option_ids)
student_item = submission['student_item']
scorer_item = copy.deepcopy(student_item)
scorer_item['student_id'] = scorer_id
scorer_workflow = _get_latest_workflow(scorer_item)
if not scorer_workflow:
raise PeerAssessmentWorkflowError(_(
"You must make a submission before assessing another student."))
# Close the active assessment # Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades) _close_active_assessment(scorer_workflow, item.submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment) assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, student_item, scorer_item) _log_assessment(assessment, scorer_workflow)
return assessment_dict return assessment_dict
except DatabaseError: except DatabaseError:
error_message = _( error_message = _(
u"An error occurred while creating assessment {} for submission: " u"An error occurred while creating assessment {} by: {}"
u"{} by: {}" .format(assessment_dict, scorer_id)
.format(assessment_dict, submission_uuid, scorer_id)
) )
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist:
message = _(
u"There is no Peer Workflow associated with the given "
u"submission UUID {}.".format(scorer_submission_uuid)
)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
def get_rubric_max_scores(submission_uuid): def get_rubric_max_scores(submission_uuid):
...@@ -1016,7 +1026,7 @@ def _num_peers_graded(workflow): ...@@ -1016,7 +1026,7 @@ def _num_peers_graded(workflow):
return workflow.graded.filter(assessment__isnull=False).count() return workflow.graded.filter(assessment__isnull=False).count()
def _log_assessment(assessment, student_item, scorer_item): def _log_assessment(assessment, scorer_workflow):
""" """
Log the creation of a peer assessment. Log the creation of a peer assessment.
...@@ -1030,23 +1040,22 @@ def _log_assessment(assessment, student_item, scorer_item): ...@@ -1030,23 +1040,22 @@ def _log_assessment(assessment, student_item, scorer_item):
""" """
logger.info( logger.info(
u"Created peer-assessment {assessment_id} for student {user} on " u"Created peer-assessment {assessment_id} for submission "
u"submission {submission_uuid}, course {course_id}, item {item_id} " u"{submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}" u"with rubric {rubric_content_hash}; scored by {scorer}"
.format( .format(
assessment_id=assessment.id, assessment_id=assessment.id,
user=student_item['student_id'],
submission_uuid=assessment.submission_uuid, submission_uuid=assessment.submission_uuid,
course_id=student_item['course_id'], course_id=scorer_workflow.course_id,
item_id=student_item['item_id'], item_id=scorer_workflow.item_id,
rubric_content_hash=assessment.rubric.content_hash, rubric_content_hash=assessment.rubric.content_hash,
scorer=scorer_item['student_id'], scorer=scorer_workflow.student_id,
) )
) )
tags = [ tags = [
u"course_id:{course_id}".format(course_id=student_item['course_id']), u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
u"item_id:{item_id}".format(item_id=student_item['item_id']), u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
u"type:peer", u"type:peer",
] ]
......
...@@ -127,7 +127,7 @@ class TestPeerApi(CacheResetTest): ...@@ -127,7 +127,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1) sub = peer_api.get_submission_to_assess(bob, 1)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -143,7 +143,7 @@ class TestPeerApi(CacheResetTest): ...@@ -143,7 +143,7 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(bob, 1) sub = peer_api.get_submission_to_assess(bob, 1)
with self.assertRaises(peer_api.PeerAssessmentRequestError): with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT_PASS_HUGE, ASSESSMENT_DICT_PASS_HUGE,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -156,7 +156,7 @@ class TestPeerApi(CacheResetTest): ...@@ -156,7 +156,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3) sub = peer_api.get_submission_to_assess(bob, 3)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -171,7 +171,7 @@ class TestPeerApi(CacheResetTest): ...@@ -171,7 +171,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3) sub = peer_api.get_submission_to_assess(bob, 3)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -195,7 +195,7 @@ class TestPeerApi(CacheResetTest): ...@@ -195,7 +195,7 @@ class TestPeerApi(CacheResetTest):
self.assertFalse(finished) self.assertFalse(finished)
self.assertEqual(count, 0) self.assertEqual(count, 0)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
finished, count = peer_api.has_finished_required_evaluating(bob, 1) finished, count = peer_api.has_finished_required_evaluating(bob, 1)
...@@ -227,7 +227,7 @@ class TestPeerApi(CacheResetTest): ...@@ -227,7 +227,7 @@ class TestPeerApi(CacheResetTest):
self.assertEquals((False, i), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED)) self.assertEquals((False, i), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(tim, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(tim, REQUIRED_GRADED)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, tim_sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -240,21 +240,21 @@ class TestPeerApi(CacheResetTest): ...@@ -240,21 +240,21 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT, sally_sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT, jim_sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -339,19 +339,19 @@ class TestPeerApi(CacheResetTest): ...@@ -339,19 +339,19 @@ class TestPeerApi(CacheResetTest):
# 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more. # 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -366,19 +366,19 @@ class TestPeerApi(CacheResetTest): ...@@ -366,19 +366,19 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"]) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -396,7 +396,7 @@ class TestPeerApi(CacheResetTest): ...@@ -396,7 +396,7 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"]) self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -407,32 +407,32 @@ class TestPeerApi(CacheResetTest): ...@@ -407,32 +407,32 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"]) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"]) self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -440,7 +440,7 @@ class TestPeerApi(CacheResetTest): ...@@ -440,7 +440,7 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(spike_sub["uuid"], sub["uuid"]) self.assertEquals(spike_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -451,7 +451,7 @@ class TestPeerApi(CacheResetTest): ...@@ -451,7 +451,7 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(willow, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(willow, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, willow_sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -459,7 +459,7 @@ class TestPeerApi(CacheResetTest): ...@@ -459,7 +459,7 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -539,7 +539,7 @@ class TestPeerApi(CacheResetTest): ...@@ -539,7 +539,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1) sub = peer_api.get_submission_to_assess(bob, 1)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -547,7 +547,7 @@ class TestPeerApi(CacheResetTest): ...@@ -547,7 +547,7 @@ class TestPeerApi(CacheResetTest):
) )
sub = peer_api.get_submission_to_assess(tim, 1) sub = peer_api.get_submission_to_assess(tim, 1)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], tim_sub["uuid"],
tim["student_id"], tim["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -595,7 +595,7 @@ class TestPeerApi(CacheResetTest): ...@@ -595,7 +595,7 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission["uuid"]) self.assertEqual(xander_answer["uuid"], submission["uuid"])
assessment_dict = peer_api.create_assessment( assessment_dict = peer_api.create_assessment(
xander_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT, buffy_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
assessment = Assessment.objects.filter( assessment = Assessment.objects.filter(
...@@ -696,7 +696,7 @@ class TestPeerApi(CacheResetTest): ...@@ -696,7 +696,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1) sub = peer_api.get_submission_to_assess(bob, 1)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
...@@ -710,6 +710,18 @@ class TestPeerApi(CacheResetTest): ...@@ -710,6 +710,18 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(max_scores['secret'], 1) self.assertEqual(max_scores['secret'], 1)
self.assertEqual(max_scores['giveup'], 10) self.assertEqual(max_scores['giveup'], 10)
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_open_assessment(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.create_assessment(
bob_sub['uuid'],
bob['student_id'],
ASSESSMENT_DICT,
RUBRIC_DICT,
1
)
@patch.object(Assessment.objects, 'filter') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_max_score_db_error(self, mock_filter): def test_max_score_db_error(self, mock_filter):
...@@ -753,7 +765,7 @@ class TestPeerApi(CacheResetTest): ...@@ -753,7 +765,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3) sub = peer_api.get_submission_to_assess(bob, 3)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
......
...@@ -85,7 +85,7 @@ class Command(BaseCommand): ...@@ -85,7 +85,7 @@ class Command(BaseCommand):
# The scorer needs to make a submission before assessing # The scorer needs to make a submission before assessing
scorer_student_item = copy.copy(student_item) scorer_student_item = copy.copy(student_item)
scorer_student_item['student_id'] = scorer_id scorer_student_item['student_id'] = scorer_id
self._create_dummy_submission(scorer_student_item) scorer_submission_uuid = self._create_dummy_submission(scorer_student_item)
# Retrieve the submission we want to score # Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know # Note that we are NOT using the priority queue here, since we know
...@@ -98,7 +98,7 @@ class Command(BaseCommand): ...@@ -98,7 +98,7 @@ class Command(BaseCommand):
'feedback': " ".join(loremipsum.get_paragraphs(2)) 'feedback': " ".join(loremipsum.get_paragraphs(2))
} }
peer_api.create_assessment( peer_api.create_assessment(
submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
assessment, assessment,
rubric, rubric,
......
...@@ -66,7 +66,7 @@ class PeerAssessmentMixin(object): ...@@ -66,7 +66,7 @@ class PeerAssessmentMixin(object):
try: try:
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
data["submission_uuid"], self.submission_uuid,
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
......
...@@ -106,7 +106,7 @@ class SelfAssessmentMixin(object): ...@@ -106,7 +106,7 @@ class SelfAssessmentMixin(object):
try: try:
assessment = self_api.create_assessment( assessment = self_api.create_assessment(
data['submission_uuid'], self.submission_uuid,
self.get_student_item_dict()['student_id'], self.get_student_item_dict()['student_id'],
data['options_selected'], data['options_selected'],
{"criteria": self.rubric_criteria} {"criteria": self.rubric_criteria}
......
...@@ -198,7 +198,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -198,7 +198,7 @@ class TestGrade(XBlockHandlerTestCase):
# Create an assessment of the user's submission # Create an assessment of the user's submission
if not waiting_for_peer: if not waiting_for_peer:
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], scorer_name, scorer_sub['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria}, assessment, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by'] xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
...@@ -207,7 +207,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -207,7 +207,7 @@ class TestGrade(XBlockHandlerTestCase):
for asmnt in peer_assessments: for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers)) new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
peer_api.create_assessment( peer_api.create_assessment(
new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria}, submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by'] xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
......
...@@ -41,7 +41,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -41,7 +41,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub = peer_api.get_submission_to_assess(hal_student_item, 1) sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sub['uuid'] assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
sub['uuid'], hal_submission['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -53,7 +53,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -53,7 +53,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub = peer_api.get_submission_to_assess(sally_student_item, 1) sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = sub['uuid'] assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
sub['uuid'], sally_submission['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -123,16 +123,16 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -123,16 +123,16 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user # Create a submission for this problem from another user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally' student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION) xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student) # Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item) another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob" another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION) another_submission = xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric # Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid'] assessment['submission_uuid'] = another_submission['uuid']
assessment['options_selected']['invalid'] = 'not a part of the rubric!' assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json') resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
...@@ -183,7 +183,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -183,7 +183,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1) sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sally_sub['uuid'] assessment['submission_uuid'] = sally_sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
sally_sub['uuid'], hal_submission['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -195,7 +195,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -195,7 +195,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1) hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = hal_sub['uuid'] assessment['submission_uuid'] = hal_sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
hal_sub['uuid'], sally_submission['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -217,16 +217,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -217,16 +217,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_response = "Hal".encode('utf-8') in peer_response.body hal_response = "Hal".encode('utf-8') in peer_response.body
sally_response = "Sally".encode('utf-8') in peer_response.body sally_response = "Sally".encode('utf-8') in peer_response.body
# Validate Peer Rendering.
if hal_response:
peer_uuid = hal_sub['uuid']
elif sally_response:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment( peer_api.create_assessment(
peer_uuid, submission['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -240,17 +232,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -240,17 +232,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response) self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering. Check that if Sally or Hal were selected
# the first time around, the other is selected this time.
if not hal_response and "Hal".encode('utf-8') in peer_response.body:
peer_uuid = hal_sub['uuid']
elif not sally_response and "Sally".encode('utf-8') in peer_response.body:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment( peer_api.create_assessment(
peer_uuid, submission['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment