Commit 46d0e021 by Sylvia Pearce

Review error messages and UI text

Edit error messages in .py files
Corrected tiny typo in models.py
Revise UI and error message text in .js files
Updated "Could not contact server" error messages
parent 60ac0ee2
...@@ -138,7 +138,7 @@ class Rubric(models.Model): ...@@ -138,7 +138,7 @@ class Rubric(models.Model):
# Validate: are options selected for each criterion in the rubric? # Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict): if len(options_selected) != len(rubric_criteria_dict):
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected}").format( msg = _("Incorrect number of options for this rubric ({actual} instead of {expected})").format(
actual=len(options_selected), expected=len(rubric_criteria_dict)) actual=len(options_selected), expected=len(rubric_criteria_dict))
raise InvalidOptionSelection(msg) raise InvalidOptionSelection(msg)
......
...@@ -227,8 +227,9 @@ def create_assessment( ...@@ -227,8 +227,9 @@ def create_assessment(
scorer_workflow = _get_latest_workflow(scorer_item) scorer_workflow = _get_latest_workflow(scorer_item)
if not scorer_workflow: if not scorer_workflow:
raise PeerAssessmentWorkflowError(_( raise PeerAssessmentWorkflowError(
"You must make a submission before assessing another student.")) _("You must submit a response before you can complete a peer assessment.")
)
# Close the active assessment # Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades) _close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades)
......
...@@ -49,15 +49,15 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -49,15 +49,15 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
""" """
# Check that there are not any assessments for this submission # Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists(): if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
raise SelfAssessmentRequestError(_("Self assessment already exists for this submission")) raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response."))
# Check that the student is allowed to assess this submission # Check that the student is allowed to assess this submission
try: try:
submission = get_submission_and_student(submission_uuid) submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id: if submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("Cannot self-assess this submission")) raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response."))
except SubmissionNotFoundError: except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the submission.")) raise SelfAssessmentRequestError(_("Could not retrieve the response."))
# Get or create the rubric # Get or create the rubric
try: try:
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
<h2 class="openassessment__title problem__header">{{ title }}</h2> <h2 class="openassessment__title problem__header">{{ title }}</h2>
<nav class="nav--contents"> <nav class="nav--contents">
<h2 class="title">Skip to a specific step:</h2> <h2 class="title">Skip to a specific step</h2>
<ol class="list list--nav"> <ol class="list list--nav">
{% for assessment in rubric_assessments %} {% for assessment in rubric_assessments %}
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
<!-- if the problem is unstarted or response hasn't been submitted --> <!-- if the problem is unstarted or response hasn't been submitted -->
<div id="openassessment__message" class="openassessment__message message"> <div id="openassessment__message" class="openassessment__message message">
<div class="message__content"> <div class="message__content">
<p>This problem requires that you work through multiple parts. <strong>You can begin by reading the question below and providing your response.</strong></p> <p>This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.</p>
</div> </div>
</div> </div>
{% endblock %} {% endblock %}
<div class="wrapper--openassessment__prompt"> <div class="wrapper--openassessment__prompt">
<article class="openassessment__prompt ui-toggle-visibility"> <article class="openassessment__prompt ui-toggle-visibility">
<h2 class="openassessment__prompt__title">This Problem's Prompt/Question</h2> <h2 class="openassessment__prompt__title">The prompt for this assignment</h2>
<div class="openassessment__prompt__copy ui-toggle-visibility__content"> <div class="openassessment__prompt__copy ui-toggle-visibility__content">
{{ question|linebreaks }} {{ question|linebreaks }}
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
<div class="step__message message message--incomplete"> <div class="step__message message message--incomplete">
<h3 class="message__title">The Due Date for This Step Has Passed</h3> <h3 class="message__title">The Due Date for This Step Has Passed</h3>
<div class="message__content"> <div class="message__content">
<p>This step is now closed. You can no longer complete peer assessments or continue with this assignment, and your grade will be Incomplete.</p> <p>This step is now closed. You can no longer complete peer assessments or continue with this assignment, and you'll receive a grade of Incomplete.</p>
</div> </div>
</div> </div>
</div> </div>
......
...@@ -22,11 +22,11 @@ ...@@ -22,11 +22,11 @@
<div class="wrapper--step__content"> <div class="wrapper--step__content">
<div class="step__message message message--complete"> <div class="step__message message message--complete">
<h3 class="message__title">Congratulations!</h3> <h3 class="message__title">Peer Assessments Complete</h3>
<div class="message__content"> <div class="message__content">
<p>You have successfully completed all of the peer assessment that you have been asked to do for this step. If you would like to continue providing feedback to your peers you may do so here, but it will not influence your final grade.</p> <p>You have successfully completed all of the required peer assessments for this assignment. You may assess additional peer responses if you want to. Completing additional assessments will not affect your final grade.</p>
<p><strong>Currently there are no responses for you to assess. This should change momentarily. Check back shortly to provide feedback on more of your peers' responses.</strong></p> <p><strong>All submitted peer responses have been assessed. Check back later to see if more students have submitted responses.</strong></p>
</div> </div>
</div> </div>
</div> </div>
......
...@@ -42,9 +42,9 @@ ...@@ -42,9 +42,9 @@
<div class="wrapper--step__content"> <div class="wrapper--step__content">
<div class="step__instruction"> <div class="step__instruction">
<p> <p>
Please provide your response below. Enter your response to the question.
{% if submission_due %} {% if submission_due %}
You can save your progress and return to complete your response at any time before the due date of <span class="step__deadline"><span class="date">{{ submission_due|utc|date:"l, N j, Y H:i e" }}</span></span>. You can save your progress and return to complete your response at any time before the due date (<span class="step__deadline"><span class="date">{{ submission_due|utc|date:"l, N j, Y H:i e" }}</span></span>).
{% else %} {% else %}
You can save your progress and return to complete your response at any time. You can save your progress and return to complete your response at any time.
{% endif %} {% endif %}
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
<form id="response__submission" class="response__submission"> <form id="response__submission" class="response__submission">
<ol class="list list--fields response__submission__content"> <ol class="list list--fields response__submission__content">
<li class="field field--textarea submission__answer" id="submission__answer"> <li class="field field--textarea submission__answer" id="submission__answer">
<label class="sr" for="submission__answer__value">Provide your response to the question.</label> <label class="sr" for="submission__answer__value">Enter your response to the question.</label>
<textarea id="submission__answer__value" placeholder="">{{ saved_response }}</textarea> <textarea id="submission__answer__value" placeholder="">{{ saved_response }}</textarea>
<span class="tip">You may continue to work on your response until you submit it.</span> <span class="tip">You may continue to work on your response until you submit it.</span>
</li> </li>
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
<div id="response__save_status" class="response__submission__status"> <div id="response__save_status" class="response__submission__status">
<h3 class="response__submission__status__title"> <h3 class="response__submission__status__title">
<span class="sr">Your Working Submission Status:</span> <span class="sr">Your Submission Status:</span>
{{ save_status }} {{ save_status }}
</h3> </h3>
</div> </div>
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
<h3 class="message__title">The Due Date for This Step Has Passed</h3> <h3 class="message__title">The Due Date for This Step Has Passed</h3>
<div class="message__content"> <div class="message__content">
<p>This step is now closed. You can no longer submit a response or continue with this problem, and your grade will be Incomplete. If you saved but did not submit a response, the response appears in the course records.</p> <p>This step is now closed. You can no longer submit a response or continue with this problem, and you'll receive a grade of Incomplete. If you saved but did not submit a response, the response appears in the course records.</p>
</div> </div>
</div> </div>
</div> </div>
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
<div class="wrapper--step__content"> <div class="wrapper--step__content">
<div class="step__content"> <div class="step__content">
<article class="submission__answer__display"> <article class="submission__answer__display">
<h3 class="submission__answer__display__title">Your Submitted Response</h3> <h3 class="submission__answer__display__title">Your Response</h3>
<div class="submission__answer__display__content"> <div class="submission__answer__display__content">
{{ student_submission.answer.text|linebreaks }} {{ student_submission.answer.text|linebreaks }}
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
<h2 class="step__title"> <h2 class="step__title">
<span class="step__counter"></span> <span class="step__counter"></span>
<span class="wrapper--copy"> <span class="wrapper--copy">
<span class="step__label">Assess Yourself</span> <span class="step__label">Assess Your Response</span>
{% if self_start %} {% if self_start %}
<span class="step__deadline">available <span class="step__deadline">available
<span class="date"> <span class="date">
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
<div class="step__content"> <div class="step__content">
<article class="self-assessment__display" id="self-assessment"> <article class="self-assessment__display" id="self-assessment">
<header class="self-assessment__display__header"> <header class="self-assessment__display__header">
<h3 class="self-assessment__display__title">Your Submitted Response</h3> <h3 class="self-assessment__display__title">Your Response</h3>
</header> </header>
<div class="self-assessment__display__response"> <div class="self-assessment__display__response">
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
<div class="step__message message message--incomplete"> <div class="step__message message message--incomplete">
<h3 class="message__title">The Due Date for This Step Has Passed</h3> <h3 class="message__title">The Due Date for This Step Has Passed</h3>
<div class="message__content"> <div class="message__content">
<p>This step is now closed. You can no longer complete a self assessment or continue with this assignment, and your grade will be Incomplete.</p> <p>This step is now closed. You can no longer complete a self assessment or continue with this assignment, and you'll receive a grade of Incomplete.</p>
</div> </div>
</div> </div>
</div> </div>
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
<div class="staff-info__summary ui-staff__content__section"> <div class="staff-info__summary ui-staff__content__section">
<dl class="submissions--total"> <dl class="submissions--total">
<dt class="label">Total number of submissions:</dt> <dt class="label">Response total:</dt>
<dd class="value">{{ num_submissions }}</dd> <dd class="value">{{ num_submissions }}</dd>
</dl> </dl>
</div> </div>
...@@ -17,12 +17,12 @@ ...@@ -17,12 +17,12 @@
<div class="staff-info__status ui-staff__content__section"> <div class="staff-info__status ui-staff__content__section">
<table class="staff-info__status__table" summary="Where are your students currently in this problem"> <table class="staff-info__status__table" summary="Where are your students currently in this problem">
<caption class="title">Student Progress/Step Status</caption> <caption class="title">Student Progress</caption>
<thead> <thead>
<tr> <tr>
<th abbr="Step" scope="col">Problem Step</th> <th abbr="Step" scope="col">Problem Step</th>
<th abbr="# of Students" scope="col">Number of Students Actively in Step</th> <th abbr="# of Students" scope="col">Active Students in Step</th>
</tr> </tr>
</thead> </thead>
......
...@@ -154,4 +154,4 @@ class GradeMixin(object): ...@@ -154,4 +154,4 @@ class GradeMixin(object):
'options': feedback_options, 'options': feedback_options,
} }
) )
return {'success': True, 'msg': _(u"Feedback saved!")} return {'success': True, 'msg': _(u"Feedback saved.")}
...@@ -45,7 +45,7 @@ def _parse_date(value): ...@@ -45,7 +45,7 @@ def _parse_date(value):
try: try:
return parse_date(value).replace(tzinfo=pytz.utc) return parse_date(value).replace(tzinfo=pytz.utc)
except ValueError: except ValueError:
raise InvalidDateFormat(_("Could not parse date '{date}'").format(date=value)) raise InvalidDateFormat(_("'{date}' is an invalid date format. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.").format(date=value))
else: else:
raise InvalidDateFormat(_("'{date}' must be a date string or datetime").format(date=value)) raise InvalidDateFormat(_("'{date}' must be a date string or datetime").format(date=value))
...@@ -185,13 +185,13 @@ def resolve_dates(start, end, date_ranges): ...@@ -185,13 +185,13 @@ def resolve_dates(start, end, date_ranges):
step_end = _parse_date(step_end) if step_end is not None else prev_end step_end = _parse_date(step_end) if step_end is not None else prev_end
if step_start < prev_start: if step_start < prev_start:
msg = _(u"The start date '{start}' must be after the previous start date '{prev}'.").format( msg = _(u"This step's start date '{start}' cannot be before the previous step's start date '{prev}'.").format(
start=step_start, prev=prev_start start=step_start, prev=prev_start
) )
raise DateValidationError(msg) raise DateValidationError(msg)
if step_end > prev_end: if step_end > prev_end:
msg = _(u"The due date '{due}' must be before the following due date '{prev}'.").format( msg = _(u"This step's due date '{due}' cannot be after the next step's due date '{prev}'.").format(
due=step_end, prev=prev_end due=step_end, prev=prev_end
) )
raise DateValidationError(msg) raise DateValidationError(msg)
...@@ -207,7 +207,7 @@ def resolve_dates(start, end, date_ranges): ...@@ -207,7 +207,7 @@ def resolve_dates(start, end, date_ranges):
# Now that we have resolved both start and end dates, we can safely compare them # Now that we have resolved both start and end dates, we can safely compare them
for resolved_start, resolved_end in resolved_ranges: for resolved_start, resolved_end in resolved_ranges:
if resolved_start >= resolved_end: if resolved_start >= resolved_end:
msg = _(u"Start date '{start}' cannot be later than the due date '{due}'").format( msg = _(u"The start date '{start}' cannot be later than the due date '{due}'").format(
start=resolved_start, due=resolved_end start=resolved_start, due=resolved_end
) )
raise DateValidationError(msg) raise DateValidationError(msg)
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
"template": "openassessmentblock/response/oa_response.html", "template": "openassessmentblock/response/oa_response.html",
"context": { "context": {
"saved_response": "", "saved_response": "",
"save_status": "Unsaved draft", "save_status": "This response has not been saved.",
"submit_enabled": false, "submit_enabled": false,
"submission_due": "" "submission_due": ""
}, },
......
...@@ -69,7 +69,7 @@ describe("OpenAssessment.GradeView", function() { ...@@ -69,7 +69,7 @@ describe("OpenAssessment.GradeView", function() {
expect(server.feedbackText).toEqual('I disliked the feedback I received'); expect(server.feedbackText).toEqual('I disliked the feedback I received');
expect(server.feedbackOptions).toEqual([ expect(server.feedbackOptions).toEqual([
'These assessments were not useful.', 'These assessments were not useful.',
'I disagree with the ways that my peers assessed me.' 'I disagree with one or more of the peer assessments of my response.'
]); ]);
}); });
......
...@@ -63,21 +63,21 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -63,21 +63,21 @@ describe("OpenAssessment.ResponseView", function() {
view.responseChanged(); view.responseChanged();
expect(view.submitEnabled()).toBe(false); expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Unsaved draft'); expect(view.saveStatus()).toContain('This response has not been saved.');
// Response is whitespace --> save/submit buttons disabled // Response is whitespace --> save/submit buttons disabled
view.response(' \n \n '); view.response(' \n \n ');
view.responseChanged(); view.responseChanged();
expect(view.submitEnabled()).toBe(false); expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Unsaved draft'); expect(view.saveStatus()).toContain('This response has not been saved.');
// Response is not blank --> submit button enabled // Response is not blank --> submit button enabled
view.response('Test response'); view.response('Test response');
view.responseChanged(); view.responseChanged();
expect(view.submitEnabled()).toBe(true); expect(view.submitEnabled()).toBe(true);
expect(view.saveEnabled()).toBe(true); expect(view.saveEnabled()).toBe(true);
expect(view.saveStatus()).toContain('Unsaved draft'); expect(view.saveStatus()).toContain('This response has not been saved.');
}); });
it("updates submit/save buttons and save status when the user saves a response", function() { it("updates submit/save buttons and save status when the user saves a response", function() {
...@@ -86,14 +86,14 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -86,14 +86,14 @@ describe("OpenAssessment.ResponseView", function() {
view.save(); view.save();
expect(view.submitEnabled()).toBe(false); expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
// Response is not blank --> submit button enabled // Response is not blank --> submit button enabled
view.response('Test response'); view.response('Test response');
view.save(); view.save();
expect(view.submitEnabled()).toBe(true); expect(view.submitEnabled()).toBe(true);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
}); });
it("shows unsaved draft only when response text has changed", function() { it("shows unsaved draft only when response text has changed", function() {
...@@ -101,21 +101,21 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -101,21 +101,21 @@ describe("OpenAssessment.ResponseView", function() {
view.response('Lorem ipsum'); view.response('Lorem ipsum');
view.save(); view.save();
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
// Keep the text the same, but trigger an update // Keep the text the same, but trigger an update
// Should still be saved // Should still be saved
view.response('Lorem ipsum'); view.response('Lorem ipsum');
view.responseChanged(); view.responseChanged();
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('Saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
// Change the text // Change the text
// This should cause it to change to unsaved draft // This should cause it to change to unsaved draft
view.response('changed '); view.response('changed ');
view.responseChanged(); view.responseChanged();
expect(view.saveEnabled()).toBe(true); expect(view.saveEnabled()).toBe(true);
expect(view.saveStatus()).toContain('Unsaved draft'); expect(view.saveStatus()).toContain('This response has not been saved.');
}); });
it("sends the saved submission to the server", function() { it("sends the saved submission to the server", function() {
......
...@@ -35,13 +35,7 @@ describe("OpenAssessment.Server", function() { ...@@ -35,13 +35,7 @@ describe("OpenAssessment.Server", function() {
var testString = ''; var testString = '';
for (i = 0; i < (testStringSize); i++) { testString += 'x'; } for (i = 0; i < (testStringSize); i++) { testString += 'x'; }
return testString; return testString;
} };
var getHugeStringError = function() {
// return a string that can be used with .toContain()
// "Response text is too large. Please reduce the size of your response and try to submit again.";
return "text is too large"
}
beforeEach(function() { beforeEach(function() {
// Create the server // Create the server
...@@ -185,7 +179,7 @@ describe("OpenAssessment.Server", function() { ...@@ -185,7 +179,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg; receivedMsg = msg;
}); });
expect(receivedMsg).toEqual("Could not contact server."); expect(receivedMsg).toContain("This section could not be loaded");
}); });
it("informs the caller of an Ajax error when sending a submission", function() { it("informs the caller of an Ajax error when sending a submission", function() {
...@@ -201,7 +195,7 @@ describe("OpenAssessment.Server", function() { ...@@ -201,7 +195,7 @@ describe("OpenAssessment.Server", function() {
); );
expect(receivedErrorCode).toEqual("AJAX"); expect(receivedErrorCode).toEqual("AJAX");
expect(receivedErrorMsg).toEqual("Could not contact server."); expect(receivedErrorMsg).toContain("This response could not be submitted");
}); });
it("confirms that very long submissions fail with an error without ajax", function() { it("confirms that very long submissions fail with an error without ajax", function() {
...@@ -215,7 +209,7 @@ describe("OpenAssessment.Server", function() { ...@@ -215,7 +209,7 @@ describe("OpenAssessment.Server", function() {
} }
); );
expect(receivedErrorCode).toEqual("submit"); expect(receivedErrorCode).toEqual("submit");
expect(receivedErrorMsg).toContain(getHugeStringError()); expect(receivedErrorMsg).toContain("This response is too long");
}); });
it("informs the caller of an server error when sending a submission", function() { it("informs the caller of an server error when sending a submission", function() {
...@@ -240,21 +234,21 @@ describe("OpenAssessment.Server", function() { ...@@ -240,21 +234,21 @@ describe("OpenAssessment.Server", function() {
server.save(testString).fail( server.save(testString).fail(
function(errorMsg) { receivedErrorMsg = errorMsg; } function(errorMsg) { receivedErrorMsg = errorMsg; }
); );
expect(receivedErrorMsg).toContain(getHugeStringError()); expect(receivedErrorMsg).toContain("This response is too long");
}); });
it("informs the caller of an AJAX error when sending a submission", function() { it("informs the caller of an AJAX error when saving a submission", function() {
stubAjax(false, null); stubAjax(false, null);
var receivedMsg = null; var receivedMsg = null;
server.save("Test").fail(function(errorMsg) { receivedMsg = errorMsg; }); server.save("Test").fail(function(errorMsg) { receivedMsg = errorMsg; });
expect(receivedMsg).toEqual('Could not contact server.'); expect(receivedMsg).toContain('This response could not be saved');
}); });
it("informs the caller of an AJAX error when sending a self assessment", function() { it("informs the caller of an AJAX error when sending a self assessment", function() {
stubAjax(false, null); stubAjax(false, null);
var receivedMsg = null; var receivedMsg = null;
server.selfAssess("Test").fail(function(errorMsg) { receivedMsg = errorMsg; }); server.selfAssess("Test").fail(function(errorMsg) { receivedMsg = errorMsg; });
expect(receivedMsg).toEqual('Could not contact server.'); expect(receivedMsg).toContain('This assessment could not be submitted');
}); });
it("informs the caller of a server error when sending a submission", function() { it("informs the caller of a server error when sending a submission", function() {
...@@ -272,7 +266,7 @@ describe("OpenAssessment.Server", function() { ...@@ -272,7 +266,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg; receivedMsg = msg;
}); });
expect(receivedMsg).toEqual("Could not contact server."); expect(receivedMsg).toContain("This problem could not be loaded");
}); });
it("informs the caller of an Ajax error when updating XML", function() { it("informs the caller of an Ajax error when updating XML", function() {
...@@ -283,7 +277,7 @@ describe("OpenAssessment.Server", function() { ...@@ -283,7 +277,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg; receivedMsg = msg;
}); });
expect(receivedMsg).toEqual("Could not contact server."); expect(receivedMsg).toContain("This problem could not be saved");
}); });
it("informs the caller of a server error when loading XML", function() { it("informs the caller of a server error when loading XML", function() {
...@@ -317,7 +311,7 @@ describe("OpenAssessment.Server", function() { ...@@ -317,7 +311,7 @@ describe("OpenAssessment.Server", function() {
receivedErrorMsg = errorMsg; receivedErrorMsg = errorMsg;
} }
); );
expect(receivedErrorMsg).toContain(getHugeStringError()); expect(receivedErrorMsg).toContain("The comments on this assessment are too long");
}); });
it("informs the caller of a server error when sending a peer assessment", function() { it("informs the caller of a server error when sending a peer assessment", function() {
...@@ -341,7 +335,7 @@ describe("OpenAssessment.Server", function() { ...@@ -341,7 +335,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = msg; receivedMsg = msg;
}); });
expect(receivedMsg).toEqual("Could not contact server."); expect(receivedMsg).toContain("This assessment could not be submitted");
}); });
it("informs the caller of an AJAX error when checking whether the XBlock has been released", function() { it("informs the caller of an AJAX error when checking whether the XBlock has been released", function() {
...@@ -352,7 +346,7 @@ describe("OpenAssessment.Server", function() { ...@@ -352,7 +346,7 @@ describe("OpenAssessment.Server", function() {
receivedMsg = errMsg; receivedMsg = errMsg;
}); });
expect(receivedMsg).toEqual("Could not contact server."); expect(receivedMsg).toContain("The server could not be contacted");
}); });
...@@ -376,7 +370,7 @@ describe("OpenAssessment.Server", function() { ...@@ -376,7 +370,7 @@ describe("OpenAssessment.Server", function() {
receivedErrorMsg = errorMsg; receivedErrorMsg = errorMsg;
} }
); );
expect(receivedErrorMsg).toContain(getHugeStringError()); expect(receivedErrorMsg).toContain("This feedback is too long");
}); });
it("informs the caller of an AJAX error when sending feedback on submission", function() { it("informs the caller of an AJAX error when sending feedback on submission", function() {
...@@ -387,7 +381,7 @@ describe("OpenAssessment.Server", function() { ...@@ -387,7 +381,7 @@ describe("OpenAssessment.Server", function() {
server.submitFeedbackOnAssessment("test feedback", options).fail( server.submitFeedbackOnAssessment("test feedback", options).fail(
function(errMsg) { receivedMsg = errMsg; } function(errMsg) { receivedMsg = errMsg; }
); );
expect(receivedMsg).toEqual("Could not contact server."); expect(receivedMsg).toContain("This feedback could not be submitted");
}); });
it("informs the caller of a server error when sending feedback on submission", function() { it("informs the caller of a server error when sending feedback on submission", function() {
......
...@@ -141,7 +141,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -141,7 +141,7 @@ OpenAssessment.ResponseView.prototype = {
} else { } else {
// Setting the HTML will overwrite the screen reader tag, // Setting the HTML will overwrite the screen reader tag,
// so prepend it to the message. // so prepend it to the message.
sel.html('<span class="sr">Your Working Submission Status:</span>\n' + msg); sel.html('<span class="sr">Status of Your Response:</span>\n' + msg);
} }
}, },
...@@ -177,7 +177,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -177,7 +177,7 @@ OpenAssessment.ResponseView.prototype = {
// Update the save button and status only if the response has changed // Update the save button and status only if the response has changed
if ($.trim(this.savedResponse) !== currentResponse) { if ($.trim(this.savedResponse) !== currentResponse) {
this.saveEnabled(isBlank); this.saveEnabled(isBlank);
this.saveStatus('Unsaved draft'); this.saveStatus('This response has not been saved.');
} }
}, },
...@@ -201,7 +201,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -201,7 +201,7 @@ OpenAssessment.ResponseView.prototype = {
view.submitEnabled(currentResponse !== ''); view.submitEnabled(currentResponse !== '');
if (currentResponse == savedResponse) { if (currentResponse == savedResponse) {
view.saveEnabled(false); view.saveEnabled(false);
view.saveStatus("Saved but not submitted"); view.saveStatus("This response has been saved but not submitted.");
} }
}).fail(function(errMsg) { }).fail(function(errMsg) {
view.saveStatus('Error'); view.saveStatus('Error');
......
...@@ -69,7 +69,7 @@ OpenAssessment.Server.prototype = { ...@@ -69,7 +69,7 @@ OpenAssessment.Server.prototype = {
}).done(function(data) { }).done(function(data) {
defer.resolveWith(this, [data]); defer.resolveWith(this, [data]);
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This section could not be loaded.']);
}); });
}).promise(); }).promise();
}, },
...@@ -100,7 +100,7 @@ OpenAssessment.Server.prototype = { ...@@ -100,7 +100,7 @@ OpenAssessment.Server.prototype = {
}).done(function(data) { }).done(function(data) {
defer.resolveWith(this, [data]); defer.resolveWith(this, [data]);
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This section could not be loaded.']);
}); });
}).promise(); }).promise();
}, },
...@@ -119,7 +119,7 @@ OpenAssessment.Server.prototype = { ...@@ -119,7 +119,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('submit'); var url = this.url('submit');
if (submission.length > this.maxInputSize) { if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
defer.rejectWith(this, ["submit", "Response text is too large. Please reduce the size of your response and try to submit again."]); defer.rejectWith(this, ["submit", "This response is too long. Please shorten the response and try to submit it again."]);
}).promise(); }).promise();
} }
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
...@@ -140,7 +140,7 @@ OpenAssessment.Server.prototype = { ...@@ -140,7 +140,7 @@ OpenAssessment.Server.prototype = {
defer.rejectWith(this, [errorNum, errorMsg]); defer.rejectWith(this, [errorNum, errorMsg]);
} }
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ["AJAX", "Could not contact server."]); defer.rejectWith(this, ["AJAX", "This response could not be submitted."]);
}); });
}).promise(); }).promise();
}, },
...@@ -159,7 +159,7 @@ OpenAssessment.Server.prototype = { ...@@ -159,7 +159,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('save_submission'); var url = this.url('save_submission');
if (submission.length > this.maxInputSize) { if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]); defer.rejectWith(this, ["This response is too long. Please shorten the response and try to save it again."]);
}).promise(); }).promise();
} }
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
...@@ -171,7 +171,7 @@ OpenAssessment.Server.prototype = { ...@@ -171,7 +171,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolve(); } if (data.success) { defer.resolve(); }
else { defer.rejectWith(this, [data.msg]); } else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ["Could not contact server."]); defer.rejectWith(this, ["This response could not be saved."]);
}); });
}).promise(); }).promise();
}, },
...@@ -199,7 +199,7 @@ OpenAssessment.Server.prototype = { ...@@ -199,7 +199,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('submit_feedback'); var url = this.url('submit_feedback');
if (text.length > this.maxInputSize) { if (text.length > this.maxInputSize) {
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]); defer.rejectWith(this, ["This feedback is too long. Please shorten your feedback and try to submit it again."]);
}).promise(); }).promise();
} }
var payload = JSON.stringify({ var payload = JSON.stringify({
...@@ -213,7 +213,7 @@ OpenAssessment.Server.prototype = { ...@@ -213,7 +213,7 @@ OpenAssessment.Server.prototype = {
else { defer.rejectWith(this, [data.msg]); } else { defer.rejectWith(this, [data.msg]); }
} }
).fail(function(data) { ).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This feedback could not be submitted.']);
}); });
}).promise(); }).promise();
}, },
...@@ -243,7 +243,7 @@ OpenAssessment.Server.prototype = { ...@@ -243,7 +243,7 @@ OpenAssessment.Server.prototype = {
var url = this.url('peer_assess'); var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) { if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
defer.rejectWith(this, ["Response text is too large. Please reduce the size of your response and try to submit again."]); defer.rejectWith(this, ["The comments on this assessment are too long. Please shorten your comments and try to submit them again."]);
}).promise(); }).promise();
} }
var payload = JSON.stringify({ var payload = JSON.stringify({
...@@ -262,7 +262,7 @@ OpenAssessment.Server.prototype = { ...@@ -262,7 +262,7 @@ OpenAssessment.Server.prototype = {
} }
} }
).fail(function(data) { ).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This assessment could not be submitted.']);
}); });
}).promise(); }).promise();
}, },
...@@ -304,7 +304,7 @@ OpenAssessment.Server.prototype = { ...@@ -304,7 +304,7 @@ OpenAssessment.Server.prototype = {
} }
} }
).fail(function(data) { ).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This assessment could not be submitted.']);
}); });
}); });
}, },
...@@ -332,7 +332,7 @@ OpenAssessment.Server.prototype = { ...@@ -332,7 +332,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolveWith(this, [data.xml]); } if (data.success) { defer.resolveWith(this, [data.xml]); }
else { defer.rejectWith(this, [data.msg]); } else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This problem could not be loaded.']);
}); });
}).promise(); }).promise();
}, },
...@@ -361,7 +361,7 @@ OpenAssessment.Server.prototype = { ...@@ -361,7 +361,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolve(); } if (data.success) { defer.resolve(); }
else { defer.rejectWith(this, [data.msg]); } else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']); defer.rejectWith(this, ['This problem could not be saved.']);
}); });
}).promise(); }).promise();
}, },
...@@ -391,7 +391,7 @@ OpenAssessment.Server.prototype = { ...@@ -391,7 +391,7 @@ OpenAssessment.Server.prototype = {
if (data.success) { defer.resolveWith(this, [data.is_released]); } if (data.success) { defer.resolveWith(this, [data.is_released]); }
else { defer.rejectWith(this, [data.msg]); } else { defer.rejectWith(this, [data.msg]); }
}).fail(function(data) { }).fail(function(data) {
defer.rejectWith(this, ["Could not contact server."]); defer.rejectWith(this, ["The server could not be contacted."]);
}); });
}).promise(); }).promise();
} }
......
...@@ -74,7 +74,7 @@ class SubmissionMixin(object): ...@@ -74,7 +74,7 @@ class SubmissionMixin(object):
status_tag = 'EBADFORM' status_tag = 'EBADFORM'
status_text = unicode(err.field_errors) status_text = unicode(err.field_errors)
except (api.SubmissionError, workflow_api.AssessmentWorkflowError): except (api.SubmissionError, workflow_api.AssessmentWorkflowError):
logger.exception("Error occurred while submitting.") logger.exception("An error occurred while submitting.")
status_tag = 'EUNKNOWN' status_tag = 'EUNKNOWN'
else: else:
status = True status = True
...@@ -111,11 +111,11 @@ class SubmissionMixin(object): ...@@ -111,11 +111,11 @@ class SubmissionMixin(object):
{"saved_response": self.saved_response} {"saved_response": self.saved_response}
) )
except: except:
return {'success': False, 'msg': _(u"Could not save response submission")} return {'success': False, 'msg': _(u"This response could not be saved.")}
else: else:
return {'success': True, 'msg': u''} return {'success': True, 'msg': u''}
else: else:
return {'success': False, 'msg': _(u"Missing required key 'submission'")} return {'success': False, 'msg': _(u"This response was not submitted.")}
def create_submission(self, student_item_dict, student_sub): def create_submission(self, student_item_dict, student_sub):
...@@ -173,7 +173,7 @@ class SubmissionMixin(object): ...@@ -173,7 +173,7 @@ class SubmissionMixin(object):
Returns: Returns:
unicode unicode
""" """
return _(u'Saved but not submitted') if self.has_saved else _(u'Unsaved draft') return _(u'This response has been saved but not submitted.') if self.has_saved else _(u'This response has not been saved.')
@XBlock.handler @XBlock.handler
def render_submission(self, data, suffix=''): def render_submission(self, data, suffix=''):
......
...@@ -263,7 +263,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -263,7 +263,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_response = xblock.render_peer_assessment(request) peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response) self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
self.assertIn("Congratulations".encode('utf-8'), peer_response.body) self.assertIn("Peer Assessments Complete", peer_response.body)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_unavailable(self, xblock): def test_peer_unavailable(self, xblock):
......
...@@ -14,7 +14,7 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -14,7 +14,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
def test_default_saved_response_blank(self, xblock): def test_default_saved_response_blank(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps({})) resp = self.request(xblock, 'render_submission', json.dumps({}))
self.assertIn('<textarea id="submission__answer__value" placeholder=""></textarea>', resp) self.assertIn('<textarea id="submission__answer__value" placeholder=""></textarea>', resp)
self.assertIn('Unsaved draft', resp) self.assertIn('response has not been saved', resp)
@ddt.file_data('data/save_responses.json') @ddt.file_data('data/save_responses.json')
@scenario('data/save_scenario.xml', user_id="Perleman") @scenario('data/save_scenario.xml', user_id="Perleman")
...@@ -32,7 +32,7 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -32,7 +32,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
submitted=submission_text submitted=submission_text
) )
self.assertIn(expected_html, resp.decode('utf-8')) self.assertIn(expected_html, resp.decode('utf-8'))
self.assertIn('Saved but not submitted', resp) self.assertIn('saved but not submitted', resp.lower())
@scenario('data/save_scenario.xml', user_id="Valchek") @scenario('data/save_scenario.xml', user_id="Valchek")
def test_overwrite_saved_response(self, xblock): def test_overwrite_saved_response(self, xblock):
...@@ -57,4 +57,4 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -57,4 +57,4 @@ class SaveResponseTest(XBlockHandlerTestCase):
def test_missing_submission_key(self, xblock): def test_missing_submission_key(self, xblock):
resp = self.request(xblock, 'save_submission', json.dumps({}), response_format="json") resp = self.request(xblock, 'save_submission', json.dumps({}), response_format="json")
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn('submission', resp['msg']) self.assertIn('not submitted', resp['msg'])
...@@ -61,19 +61,19 @@ def validate_assessments(assessments, enforce_peer_then_self=False): ...@@ -61,19 +61,19 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
""" """
if enforce_peer_then_self: if enforce_peer_then_self:
if len(assessments) != 2: if len(assessments) != 2:
return (False, _("Problem must have exactly two assessments")) return (False, _("This problem must have exactly two assessments."))
if assessments[0].get('name') != 'peer-assessment': if assessments[0].get('name') != 'peer-assessment':
return (False, _("The first assessment must be a peer-assessment")) return (False, _("The first assessment must be a peer assessment."))
if assessments[1].get('name') != 'self-assessment': if assessments[1].get('name') != 'self-assessment':
return (False, _("The second assessment must be a self-assessment")) return (False, _("The second assessment must be a self assessment."))
if len(assessments) == 0: if len(assessments) == 0:
return (False, _("Problem must include at least one assessment")) return (False, _("This problem must include at least one assessment."))
for assessment_dict in assessments: for assessment_dict in assessments:
# Supported assessment # Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']: if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported")) return (False, _('The "name" value must be "peer-assessment" or "self-assessment".'))
# Number you need to grade is >= the number of people that need to grade you # Number you need to grade is >= the number of people that need to grade you
if assessment_dict.get('name') == 'peer-assessment': if assessment_dict.get('name') == 'peer-assessment':
...@@ -81,13 +81,13 @@ def validate_assessments(assessments, enforce_peer_then_self=False): ...@@ -81,13 +81,13 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
must_be_graded_by = assessment_dict.get('must_be_graded_by') must_be_graded_by = assessment_dict.get('must_be_graded_by')
if must_grade is None or must_grade < 1: if must_grade is None or must_grade < 1:
return (False, _('"must_grade" must be a positive integer')) return (False, _('The "must_grade" value must be a positive integer.'))
if must_be_graded_by is None or must_be_graded_by < 1: if must_be_graded_by is None or must_be_graded_by < 1:
return (False, _('"must_be_graded_by" must be a positive integer')) return (False, _('The "must_be_graded_by" value must be a positive integer.'))
if must_grade < must_be_graded_by: if must_grade < must_be_graded_by:
return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"')) return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
return (True, u'') return (True, u'')
...@@ -109,7 +109,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released): ...@@ -109,7 +109,7 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
try: try:
rubric_from_dict(rubric_dict) rubric_from_dict(rubric_dict)
except InvalidRubric: except InvalidRubric:
return (False, u'Rubric definition is not valid') return (False, u'This rubric definition is not valid.')
# No duplicate criteria names # No duplicate criteria names
duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']]) duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']])
...@@ -134,12 +134,12 @@ def validate_rubric(rubric_dict, current_rubric, is_released): ...@@ -134,12 +134,12 @@ def validate_rubric(rubric_dict, current_rubric, is_released):
# Number of criteria must be the same # Number of criteria must be the same
if len(rubric_dict['criteria']) != len(current_rubric['criteria']): if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
return (False, u'Number of criteria cannot be changed after a problem is released.') return (False, u'The number of criteria cannot be changed after a problem is released.')
# Number of options for each criterion must be the same # Number of options for each criterion must be the same
for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']): for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']):
if len(new_criterion['options']) != len(old_criterion['options']): if len(new_criterion['options']) != len(old_criterion['options']):
return (False, u'Number of options cannot be changed after a problem is released.') return (False, u'The number of options cannot be changed after a problem is released.')
else: else:
for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']): for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']):
......
...@@ -196,23 +196,23 @@ def _parse_options_xml(options_root): ...@@ -196,23 +196,23 @@ def _parse_options_xml(options_root):
try: try:
option_dict['points'] = int(option.get('points')) option_dict['points'] = int(option.get('points'))
except ValueError: except ValueError:
raise UpdateFromXmlError(_("XML option points must be an integer.")) raise UpdateFromXmlError(_('The value for "points" must be an integer.'))
else: else:
raise UpdateFromXmlError(_("XML option definition must contain a 'points' attribute.")) raise UpdateFromXmlError(_('Every "option" element must contain a "points" attribute.'))
# Option name # Option name
option_name = option.find('name') option_name = option.find('name')
if option_name is not None: if option_name is not None:
option_dict['name'] = _safe_get_text(option_name) option_dict['name'] = _safe_get_text(option_name)
else: else:
raise UpdateFromXmlError(_("XML option definition must contain a 'name' element.")) raise UpdateFromXmlError(_('Every "option" element must contain a "name" element.'))
# Option explanation # Option explanation
option_explanation = option.find('explanation') option_explanation = option.find('explanation')
if option_explanation is not None: if option_explanation is not None:
option_dict['explanation'] = _safe_get_text(option_explanation) option_dict['explanation'] = _safe_get_text(option_explanation)
else: else:
raise UpdateFromXmlError(_("XML option definition must contain an 'explanation' element.")) raise UpdateFromXmlError(_('Every "option" element must contain an "explanation" element.'))
# Add the options dictionary to the list # Add the options dictionary to the list
options_list.append(option_dict) options_list.append(option_dict)
...@@ -248,14 +248,14 @@ def _parse_criteria_xml(criteria_root): ...@@ -248,14 +248,14 @@ def _parse_criteria_xml(criteria_root):
if criterion_name is not None: if criterion_name is not None:
criterion_dict['name'] = _safe_get_text(criterion_name) criterion_dict['name'] = _safe_get_text(criterion_name)
else: else:
raise UpdateFromXmlError(_("XML criterion definition must contain a 'name' element.")) raise UpdateFromXmlError(_('Every "criterion" element must contain a "name" element.'))
# Criterion prompt # Criterion prompt
criterion_prompt = criterion.find('prompt') criterion_prompt = criterion.find('prompt')
if criterion_prompt is not None: if criterion_prompt is not None:
criterion_dict['prompt'] = _safe_get_text(criterion_prompt) criterion_dict['prompt'] = _safe_get_text(criterion_prompt)
else: else:
raise UpdateFromXmlError(_("XML criterion definition must contain a 'prompt' element.")) raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criterion options # Criterion options
criterion_dict['options'] = _parse_options_xml(criterion) criterion_dict['options'] = _parse_options_xml(criterion)
...@@ -290,7 +290,7 @@ def _parse_rubric_xml(rubric_root): ...@@ -290,7 +290,7 @@ def _parse_rubric_xml(rubric_root):
if prompt_el is not None: if prompt_el is not None:
rubric_dict['prompt'] = _safe_get_text(prompt_el) rubric_dict['prompt'] = _safe_get_text(prompt_el)
else: else:
raise UpdateFromXmlError(_("XML rubric definition must contain a 'prompt' element.")) raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criteria # Criteria
rubric_dict['criteria'] = _parse_criteria_xml(rubric_root) rubric_dict['criteria'] = _parse_criteria_xml(rubric_root)
...@@ -323,7 +323,7 @@ def _parse_assessments_xml(assessments_root, start, due): ...@@ -323,7 +323,7 @@ def _parse_assessments_xml(assessments_root, start, due):
if 'name' in assessment.attrib: if 'name' in assessment.attrib:
assessment_dict['name'] = unicode(assessment.get('name')) assessment_dict['name'] = unicode(assessment.get('name'))
else: else:
raise UpdateFromXmlError(_('XML assessment definition must have a "name" attribute')) raise UpdateFromXmlError(_('All "criterion" and "option" elements must contain a "name" element.'))
# Assessment start # Assessment start
if 'start' in assessment.attrib: if 'start' in assessment.attrib:
...@@ -331,7 +331,7 @@ def _parse_assessments_xml(assessments_root, start, due): ...@@ -331,7 +331,7 @@ def _parse_assessments_xml(assessments_root, start, due):
if parsed_start is not None: if parsed_start is not None:
assessment_dict['start'] = parsed_start assessment_dict['start'] = parsed_start
else: else:
raise UpdateFromXmlError(_("Could not parse 'start' attribute as a valid date time")) raise UpdateFromXmlError(_('The date format in the "start" attribute is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
else: else:
assessment_dict['start'] = None assessment_dict['start'] = None
...@@ -341,7 +341,7 @@ def _parse_assessments_xml(assessments_root, start, due): ...@@ -341,7 +341,7 @@ def _parse_assessments_xml(assessments_root, start, due):
if parsed_start is not None: if parsed_start is not None:
assessment_dict['due'] = parsed_start assessment_dict['due'] = parsed_start
else: else:
raise UpdateFromXmlError(_("Could not parse 'due' attribute as a valid date time")) raise UpdateFromXmlError(_('The date format in the "due" attribute is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
else: else:
assessment_dict['due'] = None assessment_dict['due'] = None
...@@ -350,14 +350,14 @@ def _parse_assessments_xml(assessments_root, start, due): ...@@ -350,14 +350,14 @@ def _parse_assessments_xml(assessments_root, start, due):
try: try:
assessment_dict['must_grade'] = int(assessment.get('must_grade')) assessment_dict['must_grade'] = int(assessment.get('must_grade'))
except ValueError: except ValueError:
raise UpdateFromXmlError(_('Assessment "must_grade" attribute must be an integer.')) raise UpdateFromXmlError(_('The "must_grade" value must be a positive integer.'))
# Assessment must_be_graded_by # Assessment must_be_graded_by
if 'must_be_graded_by' in assessment.attrib: if 'must_be_graded_by' in assessment.attrib:
try: try:
assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by')) assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by'))
except ValueError: except ValueError:
raise UpdateFromXmlError(_('Assessment "must_be_graded_by" attribute must be an integer.')) raise UpdateFromXmlError(_('The "must_be_graded_by" value must be a positive integer.'))
# Update the list of assessments # Update the list of assessments
assessments_list.append(assessment_dict) assessments_list.append(assessment_dict)
...@@ -466,7 +466,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR): ...@@ -466,7 +466,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
# Check that the root has the correct tag # Check that the root has the correct tag
if root.tag != 'openassessment': if root.tag != 'openassessment':
raise UpdateFromXmlError(_("XML content must contain an 'openassessment' root element.")) raise UpdateFromXmlError(_('XML content must contain an "openassessment" root element.'))
# Retrieve the start date for the submission # Retrieve the start date for the submission
# Set it to None by default; we will update it to the latest start date later on # Set it to None by default; we will update it to the latest start date later on
...@@ -474,7 +474,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR): ...@@ -474,7 +474,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if 'submission_start' in root.attrib: if 'submission_start' in root.attrib:
submission_start = _parse_date(unicode(root.attrib['submission_start'])) submission_start = _parse_date(unicode(root.attrib['submission_start']))
if submission_start is None: if submission_start is None:
raise UpdateFromXmlError(_("Invalid date format for submission start date")) raise UpdateFromXmlError(_('Invalid date format for submission start date'))
# Retrieve the due date for the submission # Retrieve the due date for the submission
# Set it to None by default; we will update it to the earliest deadline later on # Set it to None by default; we will update it to the earliest deadline later on
...@@ -482,26 +482,26 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR): ...@@ -482,26 +482,26 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if 'submission_due' in root.attrib: if 'submission_due' in root.attrib:
submission_due = _parse_date(unicode(root.attrib['submission_due'])) submission_due = _parse_date(unicode(root.attrib['submission_due']))
if submission_due is None: if submission_due is None:
raise UpdateFromXmlError(_("Invalid date format for submission due date")) raise UpdateFromXmlError(_('The format for the submission due date is invalid. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'))
# Retrieve the title # Retrieve the title
title_el = root.find('title') title_el = root.find('title')
if title_el is None: if title_el is None:
raise UpdateFromXmlError(_("XML content must contain a 'title' element.")) raise UpdateFromXmlError(_('Every assessment must contain a "title" element.'))
else: else:
title = _safe_get_text(title_el) title = _safe_get_text(title_el)
# Retrieve the rubric # Retrieve the rubric
rubric_el = root.find('rubric') rubric_el = root.find('rubric')
if rubric_el is None: if rubric_el is None:
raise UpdateFromXmlError(_("XML content must contain a 'rubric' element.")) raise UpdateFromXmlError(_('Every assessment must contain a "rubric" element.'))
else: else:
rubric = _parse_rubric_xml(rubric_el) rubric = _parse_rubric_xml(rubric_el)
# Retrieve the assessments # Retrieve the assessments
assessments_el = root.find('assessments') assessments_el = root.find('assessments')
if assessments_el is None: if assessments_el is None:
raise UpdateFromXmlError(_("XML content must contain an 'assessments' element.")) raise UpdateFromXmlError(_('Every assessment must contain an "assessments" element.'))
else: else:
assessments = _parse_assessments_xml(assessments_el, oa_block.start, oa_block.due) assessments = _parse_assessments_xml(assessments_el, oa_block.start, oa_block.due)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment