Commit c3c120d2 by Joe Blaylock

Merge pull request #211 from edx/jrbl/tim-346-check-input-sizes

TIM-346: Server-and-client-side size validation
parents e59ec0b5 4023d756
......@@ -237,6 +237,8 @@ class Assessment(models.Model):
objects that map to each :class:`Criterion` in the :class:`Rubric` we're
assessing against.
"""
MAXSIZE = 1024 * 100 # 100KB
submission_uuid = models.CharField(max_length=128, db_index=True)
rubric = models.ForeignKey(Rubric)
......@@ -429,6 +431,8 @@ class AssessmentFeedback(models.Model):
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
MAXSIZE = 1024*100 # 100KB
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
feedback_text = models.TextField(max_length=10000, default="")
......
......@@ -1000,6 +1000,10 @@ def set_assessment_feedback(feedback_dict):
feedback_text = feedback_dict.get('feedback_text')
selected_options = feedback_dict.get('options', list())
if feedback_text and len(feedback_text) > AssessmentFeedback.MAXSIZE:
error_message = u"Assessment feedback too large."
raise PeerAssessmentRequestError(error_message)
try:
# Get or create the assessment model for this submission
# If we receive an integrity error, assume that someone else is trying to create
......
......@@ -160,6 +160,13 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class AssessmentSerializer(serializers.ModelSerializer):
"""Simplified serializer for :class:`Assessment` that's lighter on the DB."""
def validate_feedback(self, attrs, source):
"""Check that the feedback is within an acceptable size range."""
value = attrs[source]
if len(value) > Assessment.MAXSIZE:
raise serializers.ValidationError("Maximum feedback size exceeded.")
return attrs
class Meta:
model = Assessment
fields = (
......
......@@ -100,6 +100,17 @@ ASSESSMENT_DICT_PASS = dict(
}
)
# Answers are against RUBRIC_DICT -- this is worth 12 points
ASSESSMENT_DICT_PASS_HUGE = dict(
feedback=u"这是中国" * Assessment.MAXSIZE,
options_selected={
"secret": "yes",
u"ⓢⓐⓕⓔ": "yes",
"giveup": "eager",
"singing": "no",
}
)
REQUIRED_GRADED = 5
REQUIRED_GRADED_BY = 3
......@@ -125,6 +136,18 @@ class TestPeerApi(TestCase):
self.assertEqual(assessment["points_possible"], 14)
self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
def test_create_huge_assessment_fails(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment(
sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT_PASS_HUGE,
RUBRIC_DICT,
)
@file_data('valid_assessments.json')
def test_get_assessments(self, assessment_dict):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -587,6 +610,17 @@ class TestPeerApi(TestCase):
}
)
@patch.object(AssessmentFeedback, 'save')
@raises(peer_api.PeerAssessmentRequestError)
def test_set_assessment_feedback_error_on_huge_save(self, mock_filter):
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback(
{
'submission_uuid': tim_answer['uuid'],
'feedback_text': 'Boo'*AssessmentFeedback.MAXSIZE,
}
)
@patch.object(PeerWorkflow.objects, 'filter')
@raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter):
......
......@@ -190,7 +190,13 @@
</li>
</ol>
</div>
<div class="submission__feeedback__actions">
<div class="submission__feedback__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your feedback</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions submission__feeedback__actions">
<li class="list--actions__item">
<button type="submit" id="feedback__submit" class="action action--submit feedback__submit">Submit Feedback On Peer Evaluations</button>
......
......@@ -117,6 +117,7 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your assessment</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......
......@@ -44,6 +44,7 @@
<div class="response__submission__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not save your progress</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......@@ -65,6 +66,7 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your response</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......
......@@ -89,6 +89,7 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">We could not submit your assessment</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -30,6 +30,19 @@ describe("OpenAssessment.Server", function() {
);
};
var getHugeTestString = function() {
var testStringSize = server.maxInputSize + 1;
var testString = '';
for (i = 0; i < (testStringSize); i++) { testString += 'x'; }
return testString;
}
var getHugeStringError = function() {
// return a string that can be used with .toContain()
// "Response text is too large. Please reduce the size of your response and try to submit again.";
return "text is too large"
}
beforeEach(function() {
// Create the server
// Since the runtime is a stub implementation that ignores the element passed to it,
......@@ -191,6 +204,20 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toEqual("Could not contact server.");
});
it("confirms that very long submissions fail with an error without ajax", function() {
var receivedErrorCode = "";
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.submit(testString).fail(
function(errorCode, errorMsg) {
receivedErrorCode = errorCode;
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorCode).toEqual("submit");
expect(receivedErrorMsg).toContain(getHugeStringError());
});
it("informs the caller of an server error when sending a submission", function() {
stubAjax(true, [false, "ENODATA", "Error occurred!"]);
......@@ -207,6 +234,15 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toEqual("Error occurred!");
});
it("confirms that very long saves fail with an error without ajax", function() {
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.save(testString).fail(
function(errorMsg) { receivedErrorMsg = errorMsg; }
);
expect(receivedErrorMsg).toContain(getHugeStringError());
});
it("informs the caller of an AJAX error when sending a submission", function() {
stubAjax(false, null);
var receivedMsg = null;
......@@ -265,6 +301,18 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("confirms that very long peer assessments fail with an error without ajax", function() {
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.peerAssess("abc1234", options, testString).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain(getHugeStringError());
});
it("informs the caller of a server error when sending a peer assessment", function() {
stubAjax(true, {success:false, msg:'Test error!'});
......@@ -312,6 +360,18 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("confirms that very long assessment feedback fails with an error without ajax", function() {
var options = ["Option 1", "Option 2"];
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.submitFeedbackOnAssessment(testString, options).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain(getHugeStringError());
});
it("informs the caller of an AJAX error when sending feedback on submission", function() {
stubAjax(false, null);
......
......@@ -342,11 +342,10 @@ OpenAssessment.BaseUI.prototype = {
function(element, index) { return $(element).val(); }
);
ui.server.submitFeedbackOnAssessment(text, options).done(function() {
// When we have successfully sent the submission, textarea no longer editable
// TODO
console.log("Feedback to the assessments submitted, thanks!");
// When we have successfully sent the submission, textarea no longer editable
// console.log("Feedback to the assessments submitted, thanks!");
}).fail(function(errMsg) {
// TODO: display to the user
ui.toggleActionError('feedback_assess', errMsg);
});
},
......@@ -447,8 +446,9 @@ OpenAssessment.BaseUI.prototype = {
var container = null;
if (type == 'save') { container = '.response__submission__actions'; }
else if (type == 'submit') { container = '.step__actions'; }
else if (type == 'peer') { container = '.peer-assessment__actions'; }
else if (type == 'peer') { container = '.step__actions'; }
else if (type == 'self') { container = '.self-assessment__actions'; }
else if (type == 'feedback_assess') { container = '.submission__feedback__actions'; }
// If we don't have anywhere to put the message, just log it to the console
if (container === null) {
......@@ -458,7 +458,7 @@ OpenAssessment.BaseUI.prototype = {
else {
// Insert the error message
var msgHtml = (msg === null) ? "" : msg;
$(container + " .message__content").html('<p>' + msg + '</p>');
$(container + " .message__content").html('<p>' + msgHtml + '</p>');
// Toggle the error class
$(container).toggleClass('has--error', msg !== null);
......
......@@ -37,6 +37,11 @@ OpenAssessment.Server.prototype = {
return this.runtime.handlerUrl(this.element, handler);
},
/*
* Get maximum size of input
*/
maxInputSize: 1024 * 64, /* 64KB should be enough for anybody, right? ;^P */
/**
Render the XBlock.
......@@ -112,6 +117,11 @@ OpenAssessment.Server.prototype = {
**/
submit: function(submission) {
var url = this.url('submit');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["submit", "Response text is too large. Please reduce the size of your response and try to submit again."]);
}).promise();
}
return $.Deferred(function(defer) {
$.ajax({
type: "POST",
......@@ -147,6 +157,11 @@ OpenAssessment.Server.prototype = {
**/
save: function(submission) {
var url = this.url('save_submission');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]);
}).promise();
}
return $.Deferred(function(defer) {
$.ajax({
type: "POST",
......@@ -182,6 +197,11 @@ OpenAssessment.Server.prototype = {
*/
submitFeedbackOnAssessment: function(text, options) {
var url = this.url('submit_feedback');
if (text.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]);
}).promise();
}
var payload = JSON.stringify({
'feedback_text': text,
'feedback_options': options
......@@ -221,6 +241,11 @@ OpenAssessment.Server.prototype = {
**/
peerAssess: function(submissionId, optionsSelected, feedback) {
var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]);
}).promise();
}
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected,
......
......@@ -119,19 +119,30 @@
border: none;
padding-bottom: 0;
color: $white-t;
text-align: center;
}
&.message--error {
background: tint($color-error, 15%);
.message__content {
color: tint($color-error, 90%);
}
}
&.message--warning {
background: tint($color-warning, 15%);
.message__content {
color: tint($color-warning, 90%);
}
}
&.message--confirm {
background: tint($color-warning, 15%);
background: tint($color-confirm, 15%);
.message__content {
color: tint($color-confirm, 90%);
}
}
}
......
......@@ -14,6 +14,7 @@ from submissions.serializers import (
)
from submissions.models import Submission, StudentItem, Score, ScoreSummary
logger = logging.getLogger(__name__)
......
......@@ -70,6 +70,8 @@ class Submission(models.Model):
because it makes caching trivial.
"""
MAXSIZE = 1024*100 # 100KB
uuid = UUIDField(version=1, db_index=True)
student_item = models.ForeignKey(StudentItem)
......
......@@ -65,6 +65,13 @@ class SubmissionSerializer(serializers.ModelSerializer):
answer = JsonField(source='raw_answer')
def validate_answer(self, attrs, source):
"""Check that the answer is within an acceptable size range."""
value = attrs[source]
if len(value) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return attrs
class Meta:
model = Submission
fields = (
......
......@@ -20,7 +20,7 @@ STUDENT_ITEM = dict(
)
SECOND_STUDENT_ITEM = dict(
student_id="Bob",
student_id="Alice",
course_id="Demo_Course",
item_id="item_one",
item_type="Peer_Submission",
......@@ -28,6 +28,7 @@ SECOND_STUDENT_ITEM = dict(
ANSWER_ONE = u"this is my answer!"
ANSWER_TWO = u"this is my other answer!"
ANSWER_THREE = u'' + 'c' * (Submission.MAXSIZE + 1)
@ddt
......@@ -41,6 +42,9 @@ class TestSubmissionsApi(TestCase):
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
def test_create_huge_submission_fails(self):
with self.assertRaises(api.SubmissionRequestError):
api.create_submission(STUDENT_ITEM, ANSWER_THREE)
def test_get_submission_and_student(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment