Commit 313a6c1e by Stephen Sanchez

Merge pull request #264 from edx/sanchez/TIM-449-No-Sub-Matching-UUID

Fix for TIM-449. Do not use Peer UUID
parents e90a7b73 904f0749
...@@ -138,7 +138,7 @@ def get_score(submission_uuid, requirements): ...@@ -138,7 +138,7 @@ def get_score(submission_uuid, requirements):
def create_assessment( def create_assessment(
submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
...@@ -150,9 +150,9 @@ def create_assessment( ...@@ -150,9 +150,9 @@ def create_assessment(
rubric. rubric.
Args: Args:
submission_uuid (str): The submission uuid this assessment is associated scorer_submission_uuid (str): The submission uuid for the Scorer's
with. The submission uuid is required and must already exist in the workflow. The submission being assessed can be determined via the
Submission model. peer workflow of the grading student.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
...@@ -185,7 +185,6 @@ def create_assessment( ...@@ -185,7 +185,6 @@ def create_assessment(
>>> create_assessment("1", "Tim", assessment_dict, rubric_dict) >>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
""" """
try: try:
submission = sub_api.get_submission_and_student(submission_uuid)
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
# Validate that the selected options matched the rubric # Validate that the selected options matched the rubric
...@@ -196,11 +195,26 @@ def create_assessment( ...@@ -196,11 +195,26 @@ def create_assessment(
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message) msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
open_items = list(scorer_workflow.graded.filter(
assessment__isnull=True).order_by("-started_at", "-id")[:1])
if not open_items:
message = _(
u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid)
)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
item = open_items[0]
feedback = assessment_dict.get('feedback', u'') feedback = assessment_dict.get('feedback', u'')
peer_assessment = { peer_assessment = {
"rubric": rubric.id, "rubric": rubric.id,
"scorer_id": scorer_id, "scorer_id": scorer_id,
"submission_uuid": submission_uuid, "submission_uuid": item.submission_uuid,
"score_type": PEER_TYPE, "score_type": PEER_TYPE,
"feedback": feedback, "feedback": feedback,
} }
...@@ -220,31 +234,26 @@ def create_assessment( ...@@ -220,31 +234,26 @@ def create_assessment(
# option to do validation. We already validated these options above. # option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids) AssessmentPart.add_to_assessment(assessment, option_ids)
student_item = submission['student_item']
scorer_item = copy.deepcopy(student_item)
scorer_item['student_id'] = scorer_id
scorer_workflow = _get_latest_workflow(scorer_item)
if not scorer_workflow:
raise PeerAssessmentWorkflowError(
_("You must submit a response before you can complete a peer assessment.")
)
# Close the active assessment # Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades) _close_active_assessment(scorer_workflow, item.submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment) assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, student_item, scorer_item) _log_assessment(assessment, scorer_workflow)
return assessment_dict return assessment_dict
except DatabaseError: except DatabaseError:
error_message = _( error_message = _(
u"An error occurred while creating assessment {} for submission: " u"An error occurred while creating assessment {} by: {}"
u"{} by: {}" .format(assessment_dict, scorer_id)
.format(assessment_dict, submission_uuid, scorer_id)
) )
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist:
message = _(
u"There is no Peer Workflow associated with the given "
u"submission UUID {}.".format(scorer_submission_uuid)
)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
def get_rubric_max_scores(submission_uuid): def get_rubric_max_scores(submission_uuid):
...@@ -1017,7 +1026,7 @@ def _num_peers_graded(workflow): ...@@ -1017,7 +1026,7 @@ def _num_peers_graded(workflow):
return workflow.graded.filter(assessment__isnull=False).count() return workflow.graded.filter(assessment__isnull=False).count()
def _log_assessment(assessment, student_item, scorer_item): def _log_assessment(assessment, scorer_workflow):
""" """
Log the creation of a peer assessment. Log the creation of a peer assessment.
...@@ -1031,23 +1040,22 @@ def _log_assessment(assessment, student_item, scorer_item): ...@@ -1031,23 +1040,22 @@ def _log_assessment(assessment, student_item, scorer_item):
""" """
logger.info( logger.info(
u"Created peer-assessment {assessment_id} for student {user} on " u"Created peer-assessment {assessment_id} for submission "
u"submission {submission_uuid}, course {course_id}, item {item_id} " u"{submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}" u"with rubric {rubric_content_hash}; scored by {scorer}"
.format( .format(
assessment_id=assessment.id, assessment_id=assessment.id,
user=student_item['student_id'],
submission_uuid=assessment.submission_uuid, submission_uuid=assessment.submission_uuid,
course_id=student_item['course_id'], course_id=scorer_workflow.course_id,
item_id=student_item['item_id'], item_id=scorer_workflow.item_id,
rubric_content_hash=assessment.rubric.content_hash, rubric_content_hash=assessment.rubric.content_hash,
scorer=scorer_item['student_id'], scorer=scorer_workflow.student_id,
) )
) )
tags = [ tags = [
u"course_id:{course_id}".format(course_id=student_item['course_id']), u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
u"item_id:{item_id}".format(item_id=student_item['item_id']), u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
u"type:peer", u"type:peer",
] ]
......
...@@ -85,7 +85,7 @@ class Command(BaseCommand): ...@@ -85,7 +85,7 @@ class Command(BaseCommand):
# The scorer needs to make a submission before assessing # The scorer needs to make a submission before assessing
scorer_student_item = copy.copy(student_item) scorer_student_item = copy.copy(student_item)
scorer_student_item['student_id'] = scorer_id scorer_student_item['student_id'] = scorer_id
self._create_dummy_submission(scorer_student_item) scorer_submission_uuid = self._create_dummy_submission(scorer_student_item)
# Retrieve the submission we want to score # Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know # Note that we are NOT using the priority queue here, since we know
...@@ -98,7 +98,7 @@ class Command(BaseCommand): ...@@ -98,7 +98,7 @@ class Command(BaseCommand):
'feedback': " ".join(loremipsum.get_paragraphs(2)) 'feedback': " ".join(loremipsum.get_paragraphs(2))
} }
peer_api.create_assessment( peer_api.create_assessment(
submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
assessment, assessment,
rubric, rubric,
......
...@@ -66,7 +66,7 @@ class PeerAssessmentMixin(object): ...@@ -66,7 +66,7 @@ class PeerAssessmentMixin(object):
try: try:
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
data["submission_uuid"], self.submission_uuid,
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
......
...@@ -106,7 +106,7 @@ class SelfAssessmentMixin(object): ...@@ -106,7 +106,7 @@ class SelfAssessmentMixin(object):
try: try:
assessment = self_api.create_assessment( assessment = self_api.create_assessment(
data['submission_uuid'], self.submission_uuid,
self.get_student_item_dict()['student_id'], self.get_student_item_dict()['student_id'],
data['options_selected'], data['options_selected'],
{"criteria": self.rubric_criteria} {"criteria": self.rubric_criteria}
......
...@@ -198,7 +198,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -198,7 +198,7 @@ class TestGrade(XBlockHandlerTestCase):
# Create an assessment of the user's submission # Create an assessment of the user's submission
if not waiting_for_peer: if not waiting_for_peer:
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], scorer_name, scorer_sub['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria}, assessment, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by'] xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
...@@ -207,7 +207,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -207,7 +207,7 @@ class TestGrade(XBlockHandlerTestCase):
for asmnt in peer_assessments: for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers)) new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
peer_api.create_assessment( peer_api.create_assessment(
new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria}, submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by'] xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
......
...@@ -41,7 +41,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -41,7 +41,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub = peer_api.get_submission_to_assess(hal_student_item, 1) sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sub['uuid'] assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
sub['uuid'], hal_submission['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -53,7 +53,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -53,7 +53,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub = peer_api.get_submission_to_assess(sally_student_item, 1) sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = sub['uuid'] assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
sub['uuid'], sally_submission['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -123,16 +123,16 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -123,16 +123,16 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user # Create a submission for this problem from another user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally' student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION) xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student) # Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item) another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob" another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION) another_submission = xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric # Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid'] assessment['submission_uuid'] = another_submission['uuid']
assessment['options_selected']['invalid'] = 'not a part of the rubric!' assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json') resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
...@@ -183,7 +183,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -183,7 +183,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1) sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sally_sub['uuid'] assessment['submission_uuid'] = sally_sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
sally_sub['uuid'], hal_submission['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -195,7 +195,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -195,7 +195,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1) hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = hal_sub['uuid'] assessment['submission_uuid'] = hal_sub['uuid']
peer_api.create_assessment( peer_api.create_assessment(
hal_sub['uuid'], sally_submission['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -217,16 +217,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -217,16 +217,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_response = "Hal".encode('utf-8') in peer_response.body hal_response = "Hal".encode('utf-8') in peer_response.body
sally_response = "Sally".encode('utf-8') in peer_response.body sally_response = "Sally".encode('utf-8') in peer_response.body
# Validate Peer Rendering.
if hal_response:
peer_uuid = hal_sub['uuid']
elif sally_response:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment( peer_api.create_assessment(
peer_uuid, submission['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
...@@ -240,17 +232,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -240,17 +232,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response) self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering. Check that if Sally or Hal were selected
# the first time around, the other is selected this time.
if not hal_response and "Hal".encode('utf-8') in peer_response.body:
peer_uuid = hal_sub['uuid']
elif not sally_response and "Sally".encode('utf-8') in peer_response.body:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment( peer_api.create_assessment(
peer_uuid, submission['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment