Commit d504cf67 by Stephen Sanchez

Fix for TIM-449. Do not use Peer UUID

Conflicts:
	apps/openassessment/assessment/peer_api.py
parent 60ac0ee2
......@@ -138,7 +138,7 @@ def get_score(submission_uuid, requirements):
def create_assessment(
submission_uuid,
scorer_submission_uuid,
scorer_id,
assessment_dict,
rubric_dict,
......@@ -150,9 +150,9 @@ def create_assessment(
rubric.
Args:
submission_uuid (str): The submission uuid this assessment is associated
with. The submission uuid is required and must already exist in the
Submission model.
scorer_submission_uuid (str): The submission uuid for the Scorer's
workflow. The submission being assessed can be determined via the
peer workflow of the grading student.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
......@@ -185,7 +185,6 @@ def create_assessment(
>>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
"""
try:
submission = sub_api.get_submission_and_student(submission_uuid)
rubric = rubric_from_dict(rubric_dict)
# Validate that the selected options matched the rubric
......@@ -196,11 +195,26 @@ def create_assessment(
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
open_items = list(scorer_workflow.graded.filter(
assessment__isnull=True).order_by("-started_at", "-id")[:1])
if not open_items:
message = _(
u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid)
)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
item = open_items[0]
feedback = assessment_dict.get('feedback', u'')
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission_uuid": submission_uuid,
"submission_uuid": item.submission_uuid,
"score_type": PEER_TYPE,
"feedback": feedback,
}
......@@ -220,30 +234,26 @@ def create_assessment(
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
student_item = submission['student_item']
scorer_item = copy.deepcopy(student_item)
scorer_item['student_id'] = scorer_id
scorer_workflow = _get_latest_workflow(scorer_item)
if not scorer_workflow:
raise PeerAssessmentWorkflowError(_(
"You must make a submission before assessing another student."))
# Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades)
_close_active_assessment(scorer_workflow, item.submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, student_item, scorer_item)
_log_assessment(assessment, scorer_workflow)
return assessment_dict
except DatabaseError:
error_message = _(
u"An error occurred while creating assessment {} for submission: "
u"{} by: {}"
.format(assessment_dict, submission_uuid, scorer_id)
u"An error occurred while creating assessment {} by: {}"
.format(assessment_dict, scorer_id)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist:
message = _(
u"There is no Peer Workflow associated with the given "
u"submission UUID {}.".format(scorer_submission_uuid)
)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
def get_rubric_max_scores(submission_uuid):
......@@ -1016,7 +1026,7 @@ def _num_peers_graded(workflow):
return workflow.graded.filter(assessment__isnull=False).count()
def _log_assessment(assessment, student_item, scorer_item):
def _log_assessment(assessment, scorer_workflow):
"""
Log the creation of a peer assessment.
......@@ -1030,23 +1040,22 @@ def _log_assessment(assessment, student_item, scorer_item):
"""
logger.info(
u"Created peer-assessment {assessment_id} for student {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"Created peer-assessment {assessment_id} for submission "
u"{submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}"
.format(
assessment_id=assessment.id,
user=student_item['student_id'],
submission_uuid=assessment.submission_uuid,
course_id=student_item['course_id'],
item_id=student_item['item_id'],
course_id=scorer_workflow.course_id,
item_id=scorer_workflow.item_id,
rubric_content_hash=assessment.rubric.content_hash,
scorer=scorer_item['student_id'],
scorer=scorer_workflow.student_id,
)
)
tags = [
u"course_id:{course_id}".format(course_id=student_item['course_id']),
u"item_id:{item_id}".format(item_id=student_item['item_id']),
u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
u"type:peer",
]
......
......@@ -85,7 +85,7 @@ class Command(BaseCommand):
# The scorer needs to make a submission before assessing
scorer_student_item = copy.copy(student_item)
scorer_student_item['student_id'] = scorer_id
self._create_dummy_submission(scorer_student_item)
scorer_submission_uuid = self._create_dummy_submission(scorer_student_item)
# Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know
......@@ -98,7 +98,7 @@ class Command(BaseCommand):
'feedback': " ".join(loremipsum.get_paragraphs(2))
}
peer_api.create_assessment(
submission_uuid,
scorer_submission_uuid,
scorer_id,
assessment,
rubric,
......
......@@ -66,7 +66,7 @@ class PeerAssessmentMixin(object):
try:
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.submission_uuid,
self.get_student_item_dict()["student_id"],
assessment_dict,
rubric_dict,
......
......@@ -106,7 +106,7 @@ class SelfAssessmentMixin(object):
try:
assessment = self_api.create_assessment(
data['submission_uuid'],
self.submission_uuid,
self.get_student_item_dict()['student_id'],
data['options_selected'],
{"criteria": self.rubric_criteria}
......
......@@ -198,7 +198,7 @@ class TestGrade(XBlockHandlerTestCase):
# Create an assessment of the user's submission
if not waiting_for_peer:
peer_api.create_assessment(
submission['uuid'], scorer_name,
scorer_sub['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
......@@ -207,7 +207,7 @@ class TestGrade(XBlockHandlerTestCase):
for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
peer_api.create_assessment(
new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
......
......@@ -41,7 +41,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment(
sub['uuid'],
hal_submission['uuid'],
hal_student_item['student_id'],
assessment,
{'criteria': xblock.rubric_criteria},
......@@ -53,7 +53,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment(
sub['uuid'],
sally_submission['uuid'],
sally_student_item['student_id'],
assessment,
{'criteria': xblock.rubric_criteria},
......@@ -123,16 +123,16 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
assessment['submission_uuid'] = another_submission['uuid']
assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
......@@ -183,7 +183,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sally_sub['uuid']
peer_api.create_assessment(
sally_sub['uuid'],
hal_submission['uuid'],
hal_student_item['student_id'],
assessment,
{'criteria': xblock.rubric_criteria},
......@@ -195,7 +195,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = hal_sub['uuid']
peer_api.create_assessment(
hal_sub['uuid'],
sally_submission['uuid'],
sally_student_item['student_id'],
assessment,
{'criteria': xblock.rubric_criteria},
......@@ -217,16 +217,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_response = "Hal".encode('utf-8') in peer_response.body
sally_response = "Sally".encode('utf-8') in peer_response.body
# Validate Peer Rendering.
if hal_response:
peer_uuid = hal_sub['uuid']
elif sally_response:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment(
peer_uuid,
submission['uuid'],
student_item['student_id'],
assessment,
{'criteria': xblock.rubric_criteria},
......@@ -240,17 +232,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering. Check that if Sally or Hal were selected
# the first time around, the other is selected this time.
if not hal_response and "Hal".encode('utf-8') in peer_response.body:
peer_uuid = hal_sub['uuid']
elif not sally_response and "Sally".encode('utf-8') in peer_response.body:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment(
peer_uuid,
submission['uuid'],
student_item['student_id'],
assessment,
{'criteria': xblock.rubric_criteria},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment