Commit d1ec220f by Joe Blaylock

TIM-346: Server-and-client-side size validation

Inputs in any user-editable text box that are too large will be rejected
without an AJAX call.

Inputs delivered via Python to the API which are too large will be
rejected with an approriate API error.

TODO:
 * @talbs .message__content is styled very badly everywhere
 * Sizes are way too big, currently defaulting to 1MB for API and 256KB
   for js
 * Sizes are configured in too many places, when they should come in via
   settings or something
parent 84ace36e
......@@ -237,6 +237,8 @@ class Assessment(models.Model):
objects that map to each :class:`Criterion` in the :class:`Rubric` we're
assessing against.
"""
MAXSIZE = 1024*1024
submission_uuid = models.CharField(max_length=128, db_index=True)
rubric = models.ForeignKey(Rubric)
......@@ -429,6 +431,8 @@ class AssessmentFeedback(models.Model):
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
MAXSIZE = 1024*1024
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
feedback_text = models.TextField(max_length=10000, default="")
......
......@@ -999,6 +999,10 @@ def set_assessment_feedback(feedback_dict):
feedback_text = feedback_dict.get('feedback_text')
selected_options = feedback_dict.get('options', list())
if feedback_text and len(feedback_text) > AssessmentFeedback.MAXSIZE:
error_message = u"Assessment feedback too large."
raise PeerAssessmentRequestError(error_message)
try:
# Get or create the assessment model for this submission
# If we receive an integrity error, assume that someone else is trying to create
......
......@@ -160,6 +160,13 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class AssessmentSerializer(serializers.ModelSerializer):
"""Simplified serializer for :class:`Assessment` that's lighter on the DB."""
def validate_feedback(self, attrs, source):
"""Check that the feedback is within an acceptable size range."""
value = attrs[source]
if len(value) > Assessment.MAXSIZE:
raise serializers.ValidationError("Maximum feedback size exceeded.")
return attrs
class Meta:
model = Assessment
fields = (
......
......@@ -100,6 +100,17 @@ ASSESSMENT_DICT_PASS = dict(
}
)
# Answers are against RUBRIC_DICT -- this is worth 12 points
ASSESSMENT_DICT_PASS_HUGE = dict(
feedback=u"这是中国" * Assessment.MAXSIZE,
options_selected={
"secret": "yes",
u"ⓢⓐⓕⓔ": "yes",
"giveup": "eager",
"singing": "no",
}
)
REQUIRED_GRADED = 5
REQUIRED_GRADED_BY = 3
......@@ -125,6 +136,18 @@ class TestPeerApi(TestCase):
self.assertEqual(assessment["points_possible"], 14)
self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
def test_create_huge_assessment_fails(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment(
sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT_PASS_HUGE,
RUBRIC_DICT,
)
@file_data('valid_assessments.json')
def test_get_assessments(self, assessment_dict):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -587,6 +610,17 @@ class TestPeerApi(TestCase):
}
)
@patch.object(AssessmentFeedback, 'save')
@raises(peer_api.PeerAssessmentRequestError)
def test_set_assessment_feedback_error_on_huge_save(self, mock_filter):
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback(
{
'submission_uuid': tim_answer['uuid'],
'feedback_text': 'Boo'*AssessmentFeedback.MAXSIZE,
}
)
@patch.object(PeerWorkflow.objects, 'filter')
@raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter):
......
......@@ -3,6 +3,7 @@ import os.path
from ddt import ddt, file_data
from django.test import TestCase
from rest_framework.serializers import ValidationError
from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback
from openassessment.assessment.serializers import (
......
......@@ -190,7 +190,13 @@
</li>
</ol>
</div>
<div class="submission__feeedback__actions">
<div class="submission__feedback__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your feedback</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions submission__feeedback__actions">
<li class="list--actions__item">
<button type="submit" id="feedback__submit" class="action action--submit feedback__submit">Submit Feedback On Peer Evaluations</button>
......
......@@ -117,6 +117,7 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your assessment</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......
......@@ -44,6 +44,7 @@
<div class="response__submission__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not save your progress</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......@@ -65,6 +66,7 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your response</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......
......@@ -191,6 +191,21 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toEqual("Could not contact server.");
});
it("confirms that very long submissions fail with an error without ajax", function() {
var receivedErrorCode = "";
var receivedErrorMsg = "";
var test_string = '';
for (i = 0; i < (1024 * 1024 + 1); i++) { test_string += 'x'; }
server.submit(test_string).fail(
function(errorCode, errorMsg) {
receivedErrorCode = errorCode;
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorCode).toEqual("submit");
expect(receivedErrorMsg).toEqual("Text input too large.");
});
it("informs the caller of an server error when sending a submission", function() {
stubAjax(true, [false, "ENODATA", "Error occurred!"]);
......@@ -207,6 +222,16 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toEqual("Error occurred!");
});
it("confirms that very long saves fail with an error without ajax", function() {
var receivedErrorMsg = "";
var test_string = '';
for (i = 0; i < (1024 * 1024 + 1); i++) { test_string += 'x'; }
server.save(test_string).fail(
function(errorMsg) { receivedErrorMsg = errorMsg; }
);
expect(receivedErrorMsg).toEqual("Text input too large.");
});
it("informs the caller of an AJAX error when sending a submission", function() {
stubAjax(false, null);
var receivedMsg = null;
......@@ -265,6 +290,19 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("confirms that very long peer assessments fail with an error without ajax", function() {
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var receivedErrorMsg = "";
var test_string = '';
for (i = 0; i < (1024 * 1024 + 1); i++) { test_string += 'x'; }
server.peerAssess("abc1234", options, test_string).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toEqual("Text input too large.");
});
it("informs the caller of a server error when sending a peer assessment", function() {
stubAjax(true, {success:false, msg:'Test error!'});
......@@ -312,6 +350,19 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("confirms that very long assessment feedback fails with an error without ajax", function() {
var options = ["Option 1", "Option 2"];
var receivedErrorMsg = "";
var test_string = '';
for (i = 0; i < (1024 * 1024 + 1); i++) { test_string += 'x'; }
server.submitFeedbackOnAssessment(test_string, options).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toEqual("Text input too large.");
});
it("informs the caller of an AJAX error when sending feedback on submission", function() {
stubAjax(false, null);
......
......@@ -342,11 +342,10 @@ OpenAssessment.BaseUI.prototype = {
function(element, index) { return $(element).val(); }
);
ui.server.submitFeedbackOnAssessment(text, options).done(function() {
// When we have successfully sent the submission, textarea no longer editable
// TODO
console.log("Feedback to the assessments submitted, thanks!");
// When we have successfully sent the submission, textarea no longer editable
// console.log("Feedback to the assessments submitted, thanks!");
}).fail(function(errMsg) {
// TODO: display to the user
ui.toggleActionError('feedback_assess', errMsg);
});
},
......@@ -447,8 +446,9 @@ OpenAssessment.BaseUI.prototype = {
var container = null;
if (type == 'save') { container = '.response__submission__actions'; }
else if (type == 'submit') { container = '.step__actions'; }
else if (type == 'peer') { container = '.peer-assessment__actions'; }
else if (type == 'peer') { container = '.step__actions'; }
else if (type == 'self') { container = '.self-assessment__actions'; }
else if (type == 'feedback_assess') { container = '.submission__feedback__actions'; }
// If we don't have anywhere to put the message, just log it to the console
if (container === null) {
......@@ -458,7 +458,7 @@ OpenAssessment.BaseUI.prototype = {
else {
// Insert the error message
var msgHtml = (msg === null) ? "" : msg;
$(container + " .message__content").html('<p>' + msg + '</p>');
$(container + " .message__content").html('<p>' + msgHtml + '</p>');
// Toggle the error class
$(container).toggleClass('has--error', msg !== null);
......
......@@ -112,6 +112,11 @@ OpenAssessment.Server.prototype = {
**/
submit: function(submission) {
var url = this.url('submit');
if (submission.length > 1024*256) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["submit", "Text input too large."]);
}).promise();
}
return $.Deferred(function(defer) {
$.ajax({
type: "POST",
......@@ -147,6 +152,11 @@ OpenAssessment.Server.prototype = {
**/
save: function(submission) {
var url = this.url('save_submission');
if (submission.length > 1024*256) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Text input too large."]);
}).promise();
}
return $.Deferred(function(defer) {
$.ajax({
type: "POST",
......@@ -182,6 +192,11 @@ OpenAssessment.Server.prototype = {
*/
submitFeedbackOnAssessment: function(text, options) {
var url = this.url('submit_feedback');
if (text.length > 1024*256) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Text input too large."]);
}).promise();
}
var payload = JSON.stringify({
'feedback_text': text,
'feedback_options': options
......@@ -221,6 +236,11 @@ OpenAssessment.Server.prototype = {
**/
peerAssess: function(submissionId, optionsSelected, feedback) {
var url = this.url('peer_assess');
if (feedback.length > 1024*256) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Text input too large."]);
}).promise();
}
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected,
......
......@@ -14,6 +14,7 @@ from submissions.serializers import (
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary
logger = logging.getLogger(__name__)
......
......@@ -70,6 +70,8 @@ class Submission(models.Model):
because it makes caching trivial.
"""
MAXSIZE = 1024*1024 # Used by validators to cap maximum answer size
uuid = UUIDField(version=1, db_index=True)
student_item = models.ForeignKey(StudentItem)
......
......@@ -65,6 +65,13 @@ class SubmissionSerializer(serializers.ModelSerializer):
answer = JsonField(source='raw_answer')
def validate_answer(self, attrs, source):
"""Check that the answer is within an acceptable size range."""
value = attrs[source]
if len(value) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return attrs
class Meta:
model = Submission
fields = (
......
......@@ -20,7 +20,7 @@ STUDENT_ITEM = dict(
)
SECOND_STUDENT_ITEM = dict(
student_id="Bob",
student_id="Alice",
course_id="Demo_Course",
item_id="item_one",
item_type="Peer_Submission",
......@@ -28,6 +28,7 @@ SECOND_STUDENT_ITEM = dict(
ANSWER_ONE = u"this is my answer!"
ANSWER_TWO = u"this is my other answer!"
ANSWER_THREE = u'' + 'c' * (Submission.MAXSIZE+1)
@ddt
......@@ -41,6 +42,9 @@ class TestSubmissionsApi(TestCase):
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
def test_create_huge_submission_fails(self):
with self.assertRaises(api.SubmissionRequestError):
api.create_submission(STUDENT_ITEM, ANSWER_THREE)
def test_get_submission_and_student(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment