Commit 855f4d55 by stv

Verify submission_uuid when creating assessments

This corrects an issue where assessments were sometimes incorrectly
associated with the wrong response.

The behavior was caused because the POST request to the `peer_assess`
handler only contained the feedback text/options and did _not_ include
mention of which original response was being assessed. If the current
workflow item is updated on the server _after_ the page first loads but
_before_ the page is submitted, this submission will be incorrectly
associated with the following response.

Steps to reproduce:
- Submit a response (User A).
- Wait for peers to submit responses (User B and User C).
- Load page with first peer response to be assessed (not not submit).
- Open the same page again in a new tab.
- In 1st tab, assess and submit assessment for User B.
- In 2nd tab, assess and submit assessment for User B.
- Notice that your assessment for User B's response is also applied to
  User C's response.

This changeset does _not_ restore data retroactively.

Fix TNL-680
parent 9e0218e9
......@@ -10,3 +10,4 @@ Ned Batchelder <ned@nedbatchelder.com>
David Baumgold <david@davidbaumgold.com>
Grady Ward <gward@brandeis.edu>
Andrew Dekker <a.dekker@uq.edu.au>
Steven Burch <stv@stanford.edu>
{% load tz %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility {% if allow_latex %} allow--latex {%endif%}">
<li
id="openassessment__peer-assessment"
class="openassessment__steps__step step--peer-assessment ui-toggle-visibility {% if allow_latex %} allow--latex {%endif%}"
data-submission-uuid="{{ peer_submission.uuid }}"
>
{% endblock %}
{% spaceless %}
......
......@@ -2,7 +2,11 @@
{% load i18n %}
{% block list_item %}
<li id="openassessment__peer-assessment"class="openassessment__steps__step step--peer-assessment ui-toggle-visibility is--complete">
<li
id="openassessment__peer-assessment"
class="openassessment__steps__step step--peer-assessment ui-toggle-visibility is--complete"
data-submission-uuid="{{ peer_submission.uuid }}"
>
{% endblock %}
{% block title %}
......
......@@ -61,6 +61,20 @@ class PeerAssessmentMixin(object):
if self.submission_uuid is None:
return {'success': False, 'msg': self._('You must submit a response before you can peer-assess.')}
uuid_server, uuid_client = self._get_server_and_client_submission_uuids(data)
if uuid_server != uuid_client:
logger.warning(
'Irrelevant assessment submission: '
'expected "{uuid_server}", got "{uuid_client}"'.format(
uuid_server=uuid_server,
uuid_client=uuid_client,
)
)
return {
'success': False,
'msg': self._('This feedback has already been submitted.'),
}
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
try:
......@@ -273,3 +287,24 @@ class PeerAssessmentMixin(object):
logger.exception(err)
return peer_submission
def _get_server_and_client_submission_uuids(self, data={}):
"""
Retrieve the server and client submission_uuids
Args:
data (dict): A dictionary containing new peer assessment data
This dict should have the following attributes:
- `submission_uuid` (string): Unique identifier for the submission being assessed
`- options_selected` (dict): Map criterion names to option values
`- feedback` (unicode): Written feedback for the submission
Returns:
tuple: (uuid_server, uuid_client)
"""
student_item = self.get_student_item_dict()
assessment = self.get_assessment_module('peer-assessment')
submission = self.get_peer_submission(student_item, assessment) or {}
uuid_server = submission.get('uuid', None)
uuid_client = data.get('submission_uuid', None)
return uuid_server, uuid_client
......@@ -84,7 +84,7 @@ describe("OpenAssessment.PeerView", function() {
// Expect that the peer assessment was sent to the server
// with the options and feedback we selected
expect(server.peerAssess).toHaveBeenCalledWith(
optionsSelected, criterionFeedback, overallFeedback
optionsSelected, criterionFeedback, overallFeedback, ''
);
});
......
......@@ -189,6 +189,8 @@ OpenAssessment.PeerView.prototype = {
**/
peerAssessRequest: function(successFunction) {
var view = this;
var uuid = $('#openassessment__peer-assessment').data('submission-uuid');
view.baseView.toggleActionError('peer', null);
view.peerSubmitEnabled(false);
......@@ -196,7 +198,8 @@ OpenAssessment.PeerView.prototype = {
this.server.peerAssess(
this.rubric.optionsSelected(),
this.rubric.criterionFeedback(),
this.rubric.overallFeedback()
this.rubric.overallFeedback(),
uuid
).done(
successFunction
).fail(function(errMsg) {
......
......@@ -258,12 +258,13 @@ if (typeof OpenAssessment.Server == "undefined" || !OpenAssessment.Server) {
function(errorMsg) { console.log(errorMsg); }
);
**/
peerAssess: function(optionsSelected, criterionFeedback, overallFeedback) {
peerAssess: function(optionsSelected, criterionFeedback, overallFeedback, uuid) {
var url = this.url('peer_assess');
var payload = JSON.stringify({
options_selected: optionsSelected,
criterion_feedback: criterionFeedback,
overall_feedback: overallFeedback
overall_feedback: overallFeedback,
submission_uuid: uuid
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
......
......@@ -557,7 +557,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}
ASSESSMENT_WITH_SUBMISSION_UUID = {
ASSESSMENT_WITH_INVALID_SUBMISSION_UUID = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ı ʇɥonƃɥʇ ʇɥıs ʍɐs ʌǝɹʎ ɔouɔısǝ.'},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
......@@ -641,16 +641,15 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_submission_uuid_input_regression(self, xblock):
# Submit a peer assessment
submission_uuid, assessment = self._submit_peer_assessment(
xblock, 'Sally', 'Bob', self.ASSESSMENT_WITH_SUBMISSION_UUID
assessment = self._submit_peer_assessment(
xblock,
'Sally',
'Bob',
self.ASSESSMENT_WITH_INVALID_SUBMISSION_UUID,
expect_failure=True,
)
# Retrieve the assessment and check that it matches what we sent
self.assertEqual(assessment['submission_uuid'], submission_uuid)
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'PE')
self.assertIsNone(assessment)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_rubric_option_mismatch(self, xblock):
......@@ -712,6 +711,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(assessment)
assessment['submission_uuid'] = assessment.get('submission_uuid', submission.get('uuid', None))
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
if expect_failure:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment