Commit 418da06d by Stephen Sanchez

Do not use the submission_uuid from a response for peer

The Workflow API was still using the submission_uuid within a peer response.  Removed this, and updated tests accordingly.
parent d504cf67
......@@ -51,9 +51,6 @@ class PeerAssessmentMixin(object):
if 'options_selected' not in data:
return {'success': False, 'msg': _('Must provide options selected in the assessment')}
if 'submission_uuid' not in data:
return {'success': False, 'msg': _('Must provide submission uuid for the assessment')}
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
rubric_dict = {
......@@ -106,7 +103,8 @@ class PeerAssessmentMixin(object):
# Update both the workflow that the submission we're assessing
# belongs to, as well as our own (e.g. have we evaluated enough?)
try:
self.update_workflow_status(submission_uuid=data["submission_uuid"])
if assessment:
self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
msg = _('Could not update workflow status.')
......
......@@ -16,7 +16,6 @@ from .base import XBlockHandlerTestCase, scenario
class TestPeerAssessment(XBlockHandlerTestCase):
ASSESSMENT = {
'submission_uuid': None,
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}
......@@ -39,7 +38,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment(
hal_submission['uuid'],
hal_student_item['student_id'],
......@@ -51,7 +49,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Sally will assess Hal.
assessment = copy.deepcopy(self.ASSESSMENT)
sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = sub['uuid']
peer_api.create_assessment(
sally_submission['uuid'],
sally_student_item['student_id'],
......@@ -95,14 +92,14 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent
actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
self.assertEqual(actual[0]['submission_uuid'], submission['uuid'])
self.assertEqual(actual[0]['points_earned'], 5)
self.assertEqual(actual[0]['points_possible'], 6)
self.assertEqual(actual[0]['scorer_id'], 'Bob')
......@@ -118,6 +115,41 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertEqual(actual[0]['feedback'], assessment['feedback'])
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_submission_uuid_input_regression(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_student, 3)
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT)
# An assessment containing a submission_uuid should not be used in the
# request. This does not exercise any current code, but checks for
# regressions on use of an external submission_uuid.
assessment['submission_uuid'] = "Complete and Random Junk."
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent
actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
self.assertEqual(len(actual), 1)
self.assertNotEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
self.assertEqual(actual[0]['points_earned'], 5)
self.assertEqual(actual[0]['points_possible'], 6)
self.assertEqual(actual[0]['scorer_id'], 'Bob')
self.assertEqual(actual[0]['score_type'], 'PE')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_rubric_option_mismatch(self, xblock):
# Create a submission for this problem from another user
......@@ -132,7 +164,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = another_submission['uuid']
assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
......@@ -141,7 +172,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock):
for missing in ['feedback', 'submission_uuid', 'options_selected']:
for missing in ['feedback', 'options_selected']:
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment[missing]
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
......@@ -181,7 +212,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
assessment['submission_uuid'] = sally_sub['uuid']
peer_api.create_assessment(
hal_submission['uuid'],
hal_student_item['student_id'],
......@@ -193,7 +223,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Sally will assess Hal.
assessment = copy.deepcopy(self.ASSESSMENT)
hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
assessment['submission_uuid'] = hal_sub['uuid']
peer_api.create_assessment(
sally_submission['uuid'],
sally_student_item['student_id'],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment