Commit 46d0e021 by Sylvia Pearce

Review error messages and UI text

Edit error messages in .py files
Corrected tiny typo in models.py
Revise UI and error message text in .js files
Updated "Could not contact server" error messages
parent 60ac0ee2
......@@ -138,7 +138,7 @@ class Rubric(models.Model):
# Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict):
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected}").format(
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected})").format(
actual=len(options_selected), expected=len(rubric_criteria_dict))
raise InvalidOptionSelection(msg)
......
......@@ -227,8 +227,9 @@ def create_assessment(
scorer_workflow = _get_latest_workflow(scorer_item)
if not scorer_workflow:
raise PeerAssessmentWorkflowError(_(
"You must make a submission before assessing another student."))
raise PeerAssessmentWorkflowError(
_("You must submit a response before you can complete a peer assessment.")
)
# Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades)
......
......@@ -49,15 +49,15 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
"""
# Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
raise SelfAssessmentRequestError(_("Self assessment already exists for this submission"))
raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response."))
# Check that the student is allowed to assess this submission
try:
submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("Cannot self-assess this submission"))
raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response."))
except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the submission."))
raise SelfAssessmentRequestError(_("Could not retrieve the response."))
# Get or create the rubric
try:
......
......@@ -5,7 +5,7 @@
<h2 class="openassessment__title problem__header">{{ title }}</h2>
<nav class="nav--contents">
<h2 class="title">Skip to a specific step:</h2>
<h2 class="title">Skip to a specific step</h2>
<ol class="list list--nav">
{% for assessment in rubric_assessments %}
......@@ -23,14 +23,14 @@
<!-- if the problem is unstarted or response hasn't been submitted -->
<div id="openassessment__message" class="openassessment__message message">
<div class="message__content">
<p>This problem requires that you work through multiple parts. <strong>You can begin by reading the question below and providing your response.</strong></p>
<p>This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.</p>
</div>
</div>
{% endblock %}
<div class="wrapper--openassessment__prompt">
<article class="openassessment__prompt ui-toggle-visibility">
<h2 class="openassessment__prompt__title">This Problem's Prompt/Question</h2>
<h2 class="openassessment__prompt__title">The prompt for this assignment</h2>
<div class="openassessment__prompt__copy ui-toggle-visibility__content">
{{ question|linebreaks }}
......
......@@ -25,7 +25,7 @@
<div class="step__message message message--incomplete">
<h3 class="message__title">The Due Date for This Step Has Passed</h3>
<div class="message__content">
<p>This step is now closed. You can no longer complete peer assessments or continue with this assignment, and your grade will be Incomplete.</p>
<p>This step is now closed. You can no longer complete peer assessments or continue with this assignment, and you'll receive a grade of Incomplete.</p>
</div>
</div>
</div>
......
......@@ -22,11 +22,11 @@
<div class="wrapper--step__content">
<div class="step__message message message--complete">
<h3 class="message__title">Congratulations!</h3>
<h3 class="message__title">Peer Assessments Complete</h3>
<div class="message__content">
<p>You have successfully completed all of the peer assessment that you have been asked to do for this step. If you would like to continue providing feedback to your peers you may do so here, but it will not influence your final grade.</p>
<p>You have successfully completed all of the required peer assessments for this assignment. You may assess additional peer responses if you want to. Completing additional assessments will not affect your final grade.</p>
<p><strong>Currently there are no responses for you to assess. This should change momentarily. Check back shortly to provide feedback on more of your peers' responses.</strong></p>
<p><strong>All submitted peer responses have been assessed. Check back later to see if more students have submitted responses.</strong></p>
</div>
</div>
</div>
......
......@@ -42,9 +42,9 @@
<div class="wrapper--step__content">
<div class="step__instruction">
<p>
Please provide your response below.
Enter your response to the question.
{% if submission_due %}
You can save your progress and return to complete your response at any time before the due date of <span class="step__deadline"><span class="date">{{ submission_due|utc|date:"l, N j, Y H:i e" }}</span></span>.
You can save your progress and return to complete your response at any time before the due date (<span class="step__deadline"><span class="date">{{ submission_due|utc|date:"l, N j, Y H:i e" }}</span></span>).
{% else %}
You can save your progress and return to complete your response at any time.
{% endif %}
......@@ -56,7 +56,7 @@
<form id="response__submission" class="response__submission">
<ol class="list list--fields response__submission__content">
<li class="field field--textarea submission__answer" id="submission__answer">
<label class="sr" for="submission__answer__value">Provide your response to the question.</label>
<label class="sr" for="submission__answer__value">Enter your response to the question.</label>
<textarea id="submission__answer__value" placeholder="">{{ saved_response }}</textarea>
<span class="tip">You may continue to work on your response until you submit it.</span>
</li>
......@@ -74,7 +74,7 @@
<div id="response__save_status" class="response__submission__status">
<h3 class="response__submission__status__title">
<span class="sr">Your Working Submission Status:</span>
<span class="sr">Your Submission Status:</span>
{{ save_status }}
</h3>
</div>
......
......@@ -21,7 +21,7 @@
<h3 class="message__title">The Due Date for This Step Has Passed</h3>
<div class="message__content">
<p>This step is now closed. You can no longer submit a response or continue with this problem, and your grade will be Incomplete. If you saved but did not submit a response, the response appears in the course records.</p>
<p>This step is now closed. You can no longer submit a response or continue with this problem, and you'll receive a grade of Incomplete. If you saved but did not submit a response, the response appears in the course records.</p>
</div>
</div>
</div>
......
......@@ -19,7 +19,7 @@
<div class="wrapper--step__content">
<div class="step__content">
<article class="submission__answer__display">
<h3 class="submission__answer__display__title">Your Submitted Response</h3>
<h3 class="submission__answer__display__title">Your Response</h3>
<div class="submission__answer__display__content">
{{ student_submission.answer.text|linebreaks }}
......
......@@ -12,7 +12,7 @@
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">Assess Yourself</span>
<span class="step__label">Assess Your Response</span>
{% if self_start %}
<span class="step__deadline">available
<span class="date">
......@@ -47,7 +47,7 @@
<div class="step__content">
<article class="self-assessment__display" id="self-assessment">
<header class="self-assessment__display__header">
<h3 class="self-assessment__display__title">Your Submitted Response</h3>
<h3 class="self-assessment__display__title">Your Response</h3>
</header>
<div class="self-assessment__display__response">
......
......@@ -21,7 +21,7 @@
<div class="step__message message message--incomplete">
<h3 class="message__title">The Due Date for This Step Has Passed</h3>
<div class="message__content">
<p>This step is now closed. You can no longer complete a self assessment or continue with this assignment, and your grade will be Incomplete.</p>
<p>This step is now closed. You can no longer complete a self assessment or continue with this assignment, and you'll receive a grade of Incomplete.</p>
</div>
</div>
</div>
......
......@@ -9,7 +9,7 @@
<div class="staff-info__summary ui-staff__content__section">
<dl class="submissions--total">
<dt class="label">Total number of submissions:</dt>
<dt class="label">Response total:</dt>
<dd class="value">{{ num_submissions }}</dd>
</dl>
</div>
......@@ -17,12 +17,12 @@
<div class="staff-info__status ui-staff__content__section">
<table class="staff-info__status__table" summary="Where are your students currently in this problem">
<caption class="title">Student Progress/Step Status</caption>
<caption class="title">Student Progress</caption>
<thead>
<tr>
<th abbr="Step" scope="col">Problem Step</th>
<th abbr="# of Students" scope="col">Number of Students Actively in Step</th>
<th abbr="# of Students" scope="col">Active Students in Step</th>
</tr>
</thead>
......
......@@ -154,4 +154,4 @@ class GradeMixin(object):
'options': feedback_options,
}
)
return {'success': True, 'msg': _(u"Feedback saved!")}
return {'success': True, 'msg': _(u"Feedback saved.")}
......@@ -45,7 +45,7 @@ def _parse_date(value):
try:
return parse_date(value).replace(tzinfo=pytz.utc)
except ValueError:
raise InvalidDateFormat(_("Could not parse date '{date}'").format(date=value))
raise InvalidDateFormat(_("'{date}' is an invalid date format. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.").format(date=value))
else:
raise InvalidDateFormat(_("'{date}' must be a date string or datetime").format(date=value))
......@@ -185,13 +185,13 @@ def resolve_dates(start, end, date_ranges):
step_end = _parse_date(step_end) if step_end is not None else prev_end
if step_start < prev_start:
msg = _(u"The start date '{start}' must be after the previous start date '{prev}'.").format(
msg = _(u"This step's start date '{start}' cannot be before the previous step's start date '{prev}'.").format(
start=step_start, prev=prev_start
)
raise DateValidationError(msg)
if step_end > prev_end:
msg = _(u"The due date '{due}' must be before the following due date '{prev}'.").format(
msg = _(u"This step's due date '{due}' cannot be after the next step's due date '{prev}'.").format(
due=step_end, prev=prev_end
)
raise DateValidationError(msg)
......@@ -207,7 +207,7 @@ def resolve_dates(start, end, date_ranges):
# Now that we have resolved both start and end dates, we can safely compare them
for resolved_start, resolved_end in resolved_ranges:
if resolved_start >= resolved_end:
msg = _(u"Start date '{start}' cannot be later than the due date '{due}'").format(
msg = _(u"The start date '{start}' cannot be later than the due date '{due}'").format(
start=resolved_start, due=resolved_end
)
raise DateValidationError(msg)
......
......@@ -38,7 +38,7 @@
"template": "openassessmentblock/response/oa_response.html",
"context": {
"saved_response": "",
"save_status": "Unsaved draft",
"save_status": "This response has not been saved.",
"submit_enabled": false,
"submission_due": ""
},
......
......@@ -69,7 +69,7 @@ describe("OpenAssessment.GradeView", function() {
expect(server.feedbackText).toEqual('I disliked the feedback I received');
expect(server.feedbackOptions).toEqual([
'These assessments were not useful.',
'I disagree with the ways that my peers assessed me.'
'I disagree with one or more of the peer assessments of my response.'
]);
});
......
......@@ -63,21 +63,21 @@ describe("OpenAssessment.ResponseView", function() {
view.responseChanged();
expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Unsaved draft');
expect(view.saveStatus()).toContain('This response has not been saved.');
// Response is whitespace --> save/submit buttons disabled
view.response(' \n \n ');
view.responseChanged();
expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Unsaved draft');
expect(view.saveStatus()).toContain('This response has not been saved.');
// Response is not blank --> submit button enabled
view.response('Test response');
view.responseChanged();
expect(view.submitEnabled()).toBe(true);
expect(view.saveEnabled()).toBe(true);
expect(view.saveStatus()).toContain('Unsaved draft');
expect(view.saveStatus()).toContain('This response has not been saved.');
});
it("updates submit/save buttons and save status when the user saves a response", function() {
......@@ -86,14 +86,14 @@ describe("OpenAssessment.ResponseView", function() {
view.save();
expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted');
expect(view.saveStatus()).toContain('saved but not submitted');
// Response is not blank --> submit button enabled
view.response('Test response');
view.save();
expect(view.submitEnabled()).toBe(true);
expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted');
expect(view.saveStatus()).toContain('saved but not submitted');
});
it("shows unsaved draft only when response text has changed", function() {
......@@ -101,21 +101,21 @@ describe("OpenAssessment.ResponseView", function() {
view.response('Lorem ipsum');
view.save();
expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted');
expect(view.saveStatus()).toContain('saved but not submitted');
// Keep the text the same, but trigger an update
// Should still be saved
view.response('Lorem ipsum');
view.responseChanged();
expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted');
expect(view.saveStatus()).toContain('saved but not submitted');
// Change the text
// This should cause it to change to unsaved draft
view.response('changed ');
view.responseChanged();
expect(view.saveEnabled()).toBe(true);
expect(view.saveStatus()).toContain('Unsaved draft');
expect(view.saveStatus()).toContain('This response has not been saved.');
});
it("sends the saved submission to the server", function() {
......
......@@ -35,13 +35,7 @@ describe("OpenAssessment.Server", function() {
var testString = '';
for (i = 0; i < (testStringSize); i++) { testString += 'x'; }
return testString;
}
var getHugeStringError = function() {
// return a string that can be used with .toContain()
// "Response text is too large. Please reduce the size of your response and try to submit again.";
return "text is too large"
}
};
beforeEach(function() {
// Create the server
......@@ -185,7 +179,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Could not contact server.");
expect(receivedMsg).toContain("This section could not be loaded");
});
it("informs the caller of an Ajax error when sending a submission", function() {
......@@ -201,7 +195,7 @@ describe("OpenAssessment.Server", function() {
);
expect(receivedErrorCode).toEqual("AJAX");
expect(receivedErrorMsg).toEqual("Could not contact server.");
expect(receivedErrorMsg).toContain("This response could not be submitted");
});
it("confirms that very long submissions fail with an error without ajax", function() {
......@@ -215,7 +209,7 @@ describe("OpenAssessment.Server", function() {
}
);
expect(receivedErrorCode).toEqual("submit");
expect(receivedErrorMsg).toContain(getHugeStringError());
expect(receivedErrorMsg).toContain("This response is too long");
});
it("informs the caller of an server error when sending a submission", function() {
......@@ -240,21 +234,21 @@ describe("OpenAssessment.Server", function() {
server.save(testString).fail(
function(errorMsg) { receivedErrorMsg = errorMsg; }
);
expect(receivedErrorMsg).toContain(getHugeStringError());
expect(receivedErrorMsg).toContain("This response is too long");
});
it("informs the caller of an AJAX error when sending a submission", function() {
it("informs the caller of an AJAX error when saving a submission", function() {
stubAjax(false, null);
var receivedMsg = null;
server.save("Test").fail(function(errorMsg) { receivedMsg = errorMsg; });
expect(receivedMsg).toEqual('Could not contact server.');
expect(receivedMsg).toContain('This response could not be saved');
});
it("informs the caller of an AJAX error when sending a self assessment", function() {
stubAjax(false, null);
var receivedMsg = null;
server.selfAssess("Test").fail(function(errorMsg) { receivedMsg = errorMsg; });
expect(receivedMsg).toEqual('Could not contact server.');
expect(receivedMsg).toContain('This assessment could not be submitted');
});
it("informs the caller of a server error when sending a submission", function() {
......@@ -272,7 +266,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Could not contact server.");
expect(receivedMsg).toContain("This problem could not be loaded");
});
it("informs the caller of an Ajax error when updating XML", function() {
......@@ -283,7 +277,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Could not contact server.");
expect(receivedMsg).toContain("This problem could not be saved");
});
it("informs the caller of a server error when loading XML", function() {
......@@ -317,7 +311,7 @@ describe("OpenAssessment.Server", function() {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain(getHugeStringError());
expect(receivedErrorMsg).toContain("The comments on this assessment are too long");
});
it("informs the caller of a server error when sending a peer assessment", function() {
......@@ -341,7 +335,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Could not contact server.");
expect(receivedMsg).toContain("This assessment could not be submitted");
});
it("informs the caller of an AJAX error when checking whether the XBlock has been released", function() {
......@@ -352,7 +346,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = errMsg;
});
expect(receivedMsg).toEqual("Could not contact server.");
expect(receivedMsg).toContain("The server could not be contacted");
});
......@@ -376,7 +370,7 @@ describe("OpenAssessment.Server", function() {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain(getHugeStringError());
expect(receivedErrorMsg).toContain("This feedback is too long");
});
it("informs the caller of an AJAX error when sending feedback on submission", function() {
......@@ -387,7 +381,7 @@ describe("OpenAssessment.Server", function() {
server.submitFeedbackOnAssessment("test feedback", options).fail(
function(errMsg) { receivedMsg = errMsg; }
);
expect(receivedMsg).toEqual("Could not contact server.");
expect(receivedMsg).toContain("This feedback could not be submitted");
});
it("informs the caller of a server error when sending feedback on submission", function() {
......
......@@ -87,7 +87,7 @@ OpenAssessment.StudioView.prototype = {
executed if the user confirms the update.
**/
confirmPostReleaseUpdate: function(onConfirm) {
var msg = "This problem has already been released. Any changes will apply only to future assessments.";
var msg = "This problem has already been released. Any changes will apply only to future assessments.";
// TODO: classier confirm dialog
if (confirm(msg)) { onConfirm(); }
},
......
......@@ -141,7 +141,7 @@ OpenAssessment.ResponseView.prototype = {
} else {
// Setting the HTML will overwrite the screen reader tag,
// so prepend it to the message.
sel.html('<span class="sr">Your Working Submission Status:</span>\n' + msg);
sel.html('<span class="sr">Status of Your Response:</span>\n' + msg);
}
},
......@@ -177,7 +177,7 @@ OpenAssessment.ResponseView.prototype = {
// Update the save button and status only if the response has changed
if ($.trim(this.savedResponse) !== currentResponse) {
this.saveEnabled(isBlank);
this.saveStatus('Unsaved draft');
this.saveStatus('This response has not been saved.');
}
},
......@@ -201,7 +201,7 @@ OpenAssessment.ResponseView.prototype = {
view.submitEnabled(currentResponse !== '');
if (currentResponse == savedResponse) {
view.saveEnabled(false);
view.saveStatus("Saved but not submitted");
view.saveStatus("This response has been saved but not submitted.");
}
}).fail(function(errMsg) {
view.saveStatus('Error');
......
......@@ -69,7 +69,7 @@ OpenAssessment.Server.prototype = {
}).done(function(data) {
defer.resolveWith(this, [data]);
}).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This section could not be loaded.']);
});
}).promise();
},
......@@ -100,7 +100,7 @@ OpenAssessment.Server.prototype = {
}).done(function(data) {
defer.resolveWith(this, [data]);
}).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This section could not be loaded.']);
});
}).promise();
},
......@@ -119,7 +119,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('submit');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["submit", "Response text is too large. Please reduce the size of your response and try to submit again."]);
defer.rejectWith(this, ["submit", "This response is too long. Please shorten the response and try to submit it again."]);
}).promise();
}
return $.Deferred(function(defer) {
......@@ -140,7 +140,7 @@ OpenAssessment.Server.prototype = {
defer.rejectWith(this, [errorNum, errorMsg]);
}
}).fail(function(data) {
defer.rejectWith(this, ["AJAX", "Could not contact server."]);
defer.rejectWith(this, ["AJAX", "This response could not be submitted."]);
});
}).promise();
},
......@@ -159,7 +159,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('save_submission');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]);
defer.rejectWith(this, ["This response is too long. Please shorten the response and try to save it again."]);
}).promise();
}
return $.Deferred(function(defer) {
......@@ -171,7 +171,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolve(); }
else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) {
defer.rejectWith(this, ["Could not contact server."]);
defer.rejectWith(this, ["This response could not be saved."]);
});
}).promise();
},
......@@ -199,7 +199,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('submit_feedback');
if (text.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]);
defer.rejectWith(this, ["This feedback is too long. Please shorten your feedback and try to submit it again."]);
}).promise();
}
var payload = JSON.stringify({
......@@ -213,7 +213,7 @@ OpenAssessment.Server.prototype = {
else { defer.rejectWith(this, [data.msg]); }
}
).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This feedback could not be submitted.']);
});
}).promise();
},
......@@ -243,7 +243,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]);
defer.rejectWith(this, ["The comments on this assessment are too long. Please shorten your comments and try to submit them again."]);
}).promise();
}
var payload = JSON.stringify({
......@@ -262,7 +262,7 @@ OpenAssessment.Server.prototype = {
}
}
).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This assessment could not be submitted.']);
});
}).promise();
},
......@@ -304,7 +304,7 @@ OpenAssessment.Server.prototype = {
}
}
).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This assessment could not be submitted.']);
});
});
},
......@@ -332,7 +332,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolveWith(this, [data.xml]); }
else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This problem could not be loaded.']);
});
}).promise();
},
......@@ -361,7 +361,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolve(); }
else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
defer.rejectWith(this, ['This problem could not be saved.']);
});
}).promise();
},
......@@ -391,7 +391,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolveWith(this, [data.is_released]); }
else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) {
defer.rejectWith(this, ["Could not contact server."]);
defer.rejectWith(this, ["The server could not be contacted."]);
});
}).promise();
}
......
......@@ -74,7 +74,7 @@ class SubmissionMixin(object):
status_tag = 'EBADFORM'
status_text = unicode(err.field_errors)
except (api.SubmissionError, workflow_api.AssessmentWorkflowError):
logger.exception("Error occurred while submitting.")
logger.exception("An error occurred while submitting.")
status_tag = 'EUNKNOWN'
else:
status = True
......@@ -111,11 +111,11 @@ class SubmissionMixin(object):
{"saved_response": self.saved_response}
)
except:
return {'success': False, 'msg': _(u"Could not save response submission")}
return {'success': False, 'msg': _(u"This response could not be saved.")}
else:
return {'success': True, 'msg': u''}
else:
return {'success': False, 'msg': _(u"Missing required key 'submission'")}
return {'success': False, 'msg': _(u"This response was not submitted.")}
def create_submission(self, student_item_dict, student_sub):
......@@ -173,7 +173,7 @@ class SubmissionMixin(object):
Returns:
unicode
"""
return _(u'Saved but not submitted') if self.has_saved else _(u'Unsaved draft')
return _(u'This response has been saved but not submitted.') if self.has_saved else _(u'This response has not been saved.')
@XBlock.handler
def render_submission(self, data, suffix=''):
......
......@@ -263,7 +263,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
self.assertIn("Congratulations".encode('utf-8'), peer_response.body)
self.assertIn("Peer Assessments Complete", peer_response.body)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_unavailable(self, xblock):
......
......@@ -14,7 +14,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
def test_default_saved_response_blank(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps({}))
self.assertIn('<textarea id="submission__answer__value" placeholder=""></textarea>', resp)
self.assertIn('Unsaved draft', resp)
self.assertIn('response has not been saved', resp)
@ddt.file_data('data/save_responses.json')
@scenario('data/save_scenario.xml', user_id="Perleman")
......@@ -32,7 +32,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
submitted=submission_text
)
self.assertIn(expected_html, resp.decode('utf-8'))
self.assertIn('Saved but not submitted', resp)
self.assertIn('saved but not submitted', resp.lower())
@scenario('data/save_scenario.xml', user_id="Valchek")
def test_overwrite_saved_response(self, xblock):
......@@ -57,4 +57,4 @@ class SaveResponseTest(XBlockHandlerTestCase):
def test_missing_submission_key(self, xblock):
resp = self.request(xblock, 'save_submission', json.dumps({}), response_format="json")
self.assertFalse(resp['success'])
self.assertIn('submission', resp['msg'])
self.assertIn('not submitted', resp['msg'])
......@@ -61,19 +61,19 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
"""
if enforce_peer_then_self:
if len(assessments) != 2:
return (False, _("Problem must have exactly two assessments"))
return (False, _("This problem must have exactly two assessments."))
if assessments[0].get('name') != 'peer-assessment':
return (False, _("The first assessment must be a peer-assessment"))
return (False, _("The first assessment must be a peer assessment."))
if assessments[1].get('name') != 'self-assessment':
return (False, _("The second assessment must be a self-assessment"))
return (False, _("The second assessment must be a self assessment."))
if len(assessments) == 0:
return (False, _("Problem must include at least one assessment"))
return (False, _("This problem must include at least one assessment."))
for assessment_dict in assessments:
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported"))
return (False, _('The "name" value must be "peer-assessment" or "self-assessment".'))
# Number you need to grade is >= the number of people that need to grade you
if assessment_dict.get('name') == 'peer-assessment':
......@@ -81,13 +81,13 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
must_be_graded_by = assessment_dict.get('must_be_graded_by')
if must_grade is None or must_grade < 1:
return (False, _('"must_grade" must be a positive integer'))
return (False, _('The "must_grade" value must be a positive integer.'))
if must_be_graded_by is None or must_be_graded_by < 1:
return (False, _('"must_be_graded_by" must be a positive integer'))
return (False, _('The "must_be_graded_by" value must be a positive integer.'))
if must_grade < must_be_graded_by:
return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"'))
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
return (True, u'')
......@@ -109,7 +109,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
try:
rubric_from_dict(rubric_dict)
except InvalidRubric:
return (False, u'Rubric definition is not valid')
return (False, u'This rubric definition is not valid.')
# No duplicate criteria names
duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']])
......@@ -134,12 +134,12 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
# Number of criteria must be the same
if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
return (False, u'Number of criteria cannot be changed after a problem is released.')
return (False, u'The number of criteria cannot be changed after a problem is released.')
# Number of options for each criterion must be the same
for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']):
if len(new_criterion['options']) != len(old_criterion['options']):
return (False, u'Number of options cannot be changed after a problem is released.')
return (False, u'The number of options cannot be changed after a problem is released.')
else:
for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']):
......
......@@ -196,23 +196,23 @@ def _parse_options_xml(options_root):
try:
option_dict['points'] = int(option.get('points'))
except ValueError:
raise UpdateFromXmlError(_("XML option points must be an integer."))
raise UpdateFromXmlError(_('The value for "points" must be an integer.'))
else:
raise UpdateFromXmlError(_("XML option definition must contain a 'points' attribute."))
raise UpdateFromXmlError(_('Every "option" element must contain a "points" attribute.'))
# Option name
option_name = option.find('name')
if option_name is not None:
option_dict['name'] = _safe_get_text(option_name)
else:
raise UpdateFromXmlError(_("XML option definition must contain a 'name' element."))
raise UpdateFromXmlError(_('Every "option" element must contain a "name" element.'))
# Option explanation
option_explanation = option.find('explanation')
if option_explanation is not None:
option_dict['explanation'] = _safe_get_text(option_explanation)
else:
raise UpdateFromXmlError(_("XML option definition must contain an 'explanation' element."))
raise UpdateFromXmlError(_('Every "option" element must contain an "explanation" element.'))
# Add the options dictionary to the list
options_list.append(option_dict)
......@@ -248,14 +248,14 @@ def _parse_criteria_xml(criteria_root):
if criterion_name is not None:
criterion_dict['name'] = _safe_get_text(criterion_name)
else:
raise UpdateFromXmlError(_("XML criterion definition must contain a 'name' element."))
raise UpdateFromXmlError(_('Every "criterion" element must contain a "name" element.'))
# Criterion prompt
criterion_prompt = criterion.find('prompt')
if criterion_prompt is not None:
criterion_dict['prompt'] = _safe_get_text(criterion_prompt)
else:
raise UpdateFromXmlError(_("XML criterion definition must contain a 'prompt' element."))
raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criterion options
criterion_dict['options'] = _parse_options_xml(criterion)
......@@ -290,7 +290,7 @@ def _parse_rubric_xml(rubric_root):
if prompt_el is not None:
rubric_dict['prompt'] = _safe_get_text(prompt_el)
else:
raise UpdateFromXmlError(_("XML rubric definition must contain a 'prompt' element."))
raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criteria
rubric_dict['criteria'] = _parse_criteria_xml(rubric_root)
......@@ -323,7 +323,7 @@ def _parse_assessments_xml(assessments_root, start, due):
if 'name' in assessment.attrib:
assessment_dict['name'] = unicode(assessment.get('name'))
else:
raise UpdateFromXmlError(_('XML assessment definition must have a "name" attribute'))
raise UpdateFromXmlError(_('All "criterion" and "option" elements must contain a "name" element.'))
# Assessment start
if 'start' in assessment.attrib:
......@@ -331,7 +331,7 @@ def _parse_assessments_xml(assessments_root, start, due):
if parsed_start is not None:
assessment_dict['start'] = parsed_start
else:
raise UpdateFromXmlError(_("Could not parse 'start' attribute as a valid date time"))
raise UpdateFromXmlError(_('The date format in the "start" attribute is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
else:
assessment_dict['start'] = None
......@@ -341,7 +341,7 @@ def _parse_assessments_xml(assessments_root, start, due):
if parsed_start is not None:
assessment_dict['due'] = parsed_start
else:
raise UpdateFromXmlError(_("Could not parse 'due' attribute as a valid date time"))
raise UpdateFromXmlError(_('The date format in the "due" attribute is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
else:
assessment_dict['due'] = None
......@@ -350,14 +350,14 @@ def _parse_assessments_xml(assessments_root, start, due):
try:
assessment_dict['must_grade'] = int(assessment.get('must_grade'))
except ValueError:
raise UpdateFromXmlError(_('Assessment "must_grade" attribute must be an integer.'))
raise UpdateFromXmlError(_('The "must_grade" value must be a positive integer.'))
# Assessment must_be_graded_by
if 'must_be_graded_by' in assessment.attrib:
try:
assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by'))
except ValueError:
raise UpdateFromXmlError(_('Assessment "must_be_graded_by" attribute must be an integer.'))
raise UpdateFromXmlError(_('The "must_be_graded_by" value must be a positive integer.'))
# Update the list of assessments
assessments_list.append(assessment_dict)
......@@ -466,7 +466,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
# Check that the root has the correct tag
if root.tag != 'openassessment':
raise UpdateFromXmlError(_("XML content must contain an 'openassessment' root element."))
raise UpdateFromXmlError(_('XML content must contain an "openassessment" root element.'))
# Retrieve the start date for the submission
# Set it to None by default; we will update it to the latest start date later on
......@@ -474,7 +474,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if 'submission_start' in root.attrib:
submission_start = _parse_date(unicode(root.attrib['submission_start']))
if submission_start is None:
raise UpdateFromXmlError(_("Invalid date format for submission start date"))
raise UpdateFromXmlError(_('Invalid date format for submission start date'))
# Retrieve the due date for the submission
# Set it to None by default; we will update it to the earliest deadline later on
......@@ -482,26 +482,26 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if 'submission_due' in root.attrib:
submission_due = _parse_date(unicode(root.attrib['submission_due']))
if submission_due is None:
raise UpdateFromXmlError(_("Invalid date format for submission due date"))
raise UpdateFromXmlError(_('The format for the submission due date is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
# Retrieve the title
title_el = root.find('title')
if title_el is None:
raise UpdateFromXmlError(_("XML content must contain a 'title' element."))
raise UpdateFromXmlError(_('Every assessment must contain a "title" element.'))
else:
title = _safe_get_text(title_el)
# Retrieve the rubric
rubric_el = root.find('rubric')
if rubric_el is None:
raise UpdateFromXmlError(_("XML content must contain a 'rubric' element."))
raise UpdateFromXmlError(_('Every assessment must contain a "rubric" element.'))
else:
rubric = _parse_rubric_xml(rubric_el)
# Retrieve the assessments
assessments_el = root.find('assessments')
if assessments_el is None:
raise UpdateFromXmlError(_("XML content must contain an 'assessments' element."))
raise UpdateFromXmlError(_('Every assessment must contain an "assessments" element.'))
else:
assessments = _parse_assessments_xml(assessments_el, oa_block.start, oa_block.due)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment