Commit 7ab488fe by Will Daly

Merge pull request #408 from edx/will/i18n-strings-cleanup

i18n string cleanup
parents 5f1184ae f51f7288
......@@ -7,6 +7,7 @@ before_script:
- "pip install coveralls"
script:
- "./scripts/test.sh"
- "./scripts/i18n.sh"
- "python setup.py install"
after_success:
coveralls
......@@ -121,15 +121,7 @@ To extract strings and compile messages:
.. code:: bash
python manage.py makemessages -l en
python manage.py makemessages -d djangojs -l en
python manage.py compilemessages
Generate dummy strings for testing:
.. code:: bash
i18n_tool dummy
./scripts/i18n.sh
License
......
......@@ -6,7 +6,6 @@ the workflow for a given submission.
"""
import logging
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError, IntegrityError
from dogapi import dog_stats_api
......@@ -162,18 +161,19 @@ def create_assessment(
# and raise an error if this is not the case
try:
option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection as ex:
msg = _("Selected options do not match the rubric: {error}").format(error=ex)
except InvalidOptionSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
if peer_workflow_item is None:
message = _(
message = (
u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid)
)
u"submission UUID {}."
).format(scorer_submission_uuid)
logger.warning(message)
raise PeerAssessmentWorkflowError(message)
......@@ -213,17 +213,16 @@ def create_assessment(
return assessment_dict
except DatabaseError:
error_message = _(
error_message = (
u"An error occurred while creating assessment {} by: {}"
.format(assessment_dict, scorer_id)
)
).format(assessment_dict, scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist:
message = _(
message = (
u"There is no Peer Workflow associated with the given "
u"submission UUID {}.".format(scorer_submission_uuid)
)
u"submission UUID {}."
).format(scorer_submission_uuid)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
......@@ -259,10 +258,9 @@ def get_rubric_max_scores(submission_uuid):
for criterion in rubric_dict["criteria"]
}
except DatabaseError:
error_message = _(
u"Error getting rubric options max scores for submission uuid "
u"[{}]".format(submission_uuid)
)
error_message = (
u"Error getting rubric options max scores for submission uuid {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -298,7 +296,9 @@ def get_assessment_median_scores(submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting assessment median scores {}".format(submission_uuid))
error_message = (
u"Error getting assessment median scores for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -401,9 +401,9 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
)[:limit]
return serialize_assessments(assessments)
except DatabaseError:
error_message = _(
u"Error getting assessments for submission {}".format(submission_uuid)
)
error_message = (
u"Error getting assessments for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -471,10 +471,10 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments)
except DatabaseError:
error_message = _(
u"Couldn't retrieve the assessments that the author of response {}"
u" completed".format(submission_uuid)
)
error_message = (
u"Couldn't retrieve the assessments completed by the "
" student with submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -544,11 +544,10 @@ def get_submission_to_assess(submission_uuid, graded_by):
_log_workflow(peer_submission_uuid, workflow)
return submission_data
except sub_api.SubmissionNotFoundError:
error_message = _(
error_message = (
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
.format(peer_submission_uuid, workflow.student_id)
)
).format(peer_submission_uuid, workflow.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
......@@ -598,11 +597,10 @@ def create_peer_workflow(submission_uuid):
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
.format(submission_uuid)
)
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......
......@@ -2,7 +2,6 @@
Public interface for self-assessment.
"""
import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api
......@@ -46,25 +45,45 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
"""
# Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response."))
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
"because another self-assessment already exists for that submission."
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError(msg)
# Check that the student is allowed to assess this submission
try:
submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response."))
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
u"because it was created by another student "
u"(submission student ID {student_id} does not match your "
u"student id {other_id})"
).format(
uuid=submission_uuid,
student_id=submission['student_item']['student_id'],
other_id=user_id
)
raise SelfAssessmentRequestError(msg)
except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the response."))
msg = (
"Could not submit a self-assessment because no submission "
"exists with UUID {uuid}"
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
# Get or create the rubric
try:
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected)
except InvalidRubric as ex:
msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors)
except InvalidRubric:
msg = "Invalid rubric definition"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection:
msg = _("Selected options do not match the rubric")
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
# Create the assessment
......@@ -84,7 +103,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid():
msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors)
msg = "Could not create self assessment: {errors}".format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg)
assessment = serializer.save()
......@@ -229,7 +248,9 @@ def get_assessment_scores_by_criteria(submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
error_message = (
u"Error getting self assessment scores for submission {}"
).format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
......
......@@ -7,8 +7,8 @@ Public interface for student training:
"""
import logging
from django.db import DatabaseError
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.assessment.serializers import (
......@@ -158,10 +158,9 @@ def validate_training_examples(rubric, examples):
]
for criterion in rubric['criteria']
}
except (ValueError, KeyError) as ex:
msg = _(u"Could not parse serialized rubric")
logger.warning("{}: {}".format(msg, ex))
return [msg]
except (ValueError, KeyError):
logger.warning("Could not parse serialized rubric", exc_info=True)
return [_(u"Could not parse serialized rubric")]
# Check each example
for order_num, example_dict in enumerate(examples, start=1):
......@@ -170,7 +169,9 @@ def validate_training_examples(rubric, examples):
is_format_valid, format_errors = validate_training_example_format(example_dict)
if not is_format_valid:
format_errors = [
_(u"Example {} has a validation error: {}").format(order_num, error)
_(u"Example {example_number} has a validation error: {error}").format(
example_number=order_num, error=error
)
for error in format_errors
]
errors.extend(format_errors)
......@@ -181,20 +182,33 @@ def validate_training_examples(rubric, examples):
if criterion_name in criteria_options:
valid_options = criteria_options[criterion_name]
if option_name not in valid_options:
msg = u"Example {} has an invalid option for \"{}\": \"{}\"".format(
order_num, criterion_name, option_name
msg = _(
u"Example {example_number} has an invalid option "
u"for \"{criterion_name}\": \"{option_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name,
option_name=option_name
)
errors.append(msg)
else:
msg = _(u"Example {} has an extra option for \"{}\"").format(
order_num, criterion_name
msg = _(
u"Example {example_number} has an extra option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name
)
errors.append(msg)
# Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()):
msg = _(u"Example {} is missing an option for \"{}\"").format(
order_num, missing_criterion
msg = _(
u"Example {example_number} is missing an option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=missing_criterion
)
errors.append(msg)
......@@ -303,9 +317,9 @@ def get_training_example(submission_uuid, rubric, examples):
# Validate the training examples
errors = validate_training_examples(rubric, examples)
if len(errors) > 0:
msg = _(u"Training examples do not match the rubric: {errors}").format(
errors="\n".join(errors)
)
msg = (
u"Training examples do not match the rubric (submission UUID is {uuid}): {errors}"
).format(uuid=submission_uuid, errors="\n".join(errors))
raise StudentTrainingRequestError(msg)
# Get or create the workflow
......@@ -328,11 +342,11 @@ def get_training_example(submission_uuid, rubric, examples):
)
raise StudentTrainingRequestError(ex)
except sub_api.SubmissionNotFoundError as ex:
msg = _(u"Could not retrieve the submission with UUID {}").format(submission_uuid)
msg = u"Could not retrieve the submission with UUID {}".format(submission_uuid)
logger.exception(msg)
raise StudentTrainingRequestError(msg)
except DatabaseError:
msg = _(
msg = (
u"Could not retrieve a training example "
u"for the student with submission UUID {}"
).format(submission_uuid)
......
......@@ -20,7 +20,6 @@ import json
from django.core.cache import cache
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext as _
import math
import logging
......@@ -146,8 +145,13 @@ class Rubric(models.Model):
# Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict):
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected})").format(
actual=len(options_selected), expected=len(rubric_criteria_dict))
msg = (
u"Incorrect number of options for this rubric "
u"({actual} instead of {expected})"
).format(
actual=len(options_selected),
expected=len(rubric_criteria_dict)
)
raise InvalidOptionSelection(msg)
# Look up each selected option
......@@ -159,9 +163,9 @@ class Rubric(models.Model):
option_id = rubric_criteria_dict[criterion_name][option_name]
option_id_set.add(option_id)
else:
msg = _("{criterion}: {option} not found in rubric").format(
criterion=criterion_name, option=option_name
)
msg = (
"{criterion}: {option} not found in rubric"
).format(criterion=criterion_name, option=option_name)
raise InvalidOptionSelection(msg)
return option_id_set
......
......@@ -12,7 +12,6 @@ from datetime import timedelta
from django.db import models, DatabaseError
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
......@@ -154,11 +153,10 @@ class PeerWorkflow(models.Model):
except cls.DoesNotExist:
return None
except DatabaseError:
error_message = _(
error_message = (
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
.format(submission_uuid)
)
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
......@@ -196,10 +194,10 @@ class PeerWorkflow(models.Model):
item.save()
return item
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}".format(scorer_workflow)
)
u"item for workflow {}"
).format(scorer_workflow)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -288,10 +286,10 @@ class PeerWorkflow(models.Model):
return peer_workflows[0].submission_uuid
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(self)
)
u"for student {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -326,10 +324,10 @@ class PeerWorkflow(models.Model):
return random_workflow.submission_uuid
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(self)
)
u"for student {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -366,10 +364,11 @@ class PeerWorkflow(models.Model):
item_query = self.graded.filter(submission_uuid=submission_uuid).order_by("-started_at", "-id") # pylint:disable=E1101
items = list(item_query[:1])
if not items:
raise PeerAssessmentWorkflowError(_(
msg = (
u"No open assessment was found for student {} while assessing "
u"submission UUID {}.".format(self.student_id, submission_uuid)
))
u"submission UUID {}."
).format(self.student_id, submission_uuid)
raise PeerAssessmentWorkflowError(msg)
item = items[0]
item.assessment = assessment
item.save()
......@@ -379,12 +378,11 @@ class PeerWorkflow(models.Model):
item.author.grading_completed_at = now()
item.author.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = _(
error_message = (
u"An internal error occurred while retrieving a workflow item for "
u"student {}. Workflow Items are created when submissions are "
u"pulled for assessment."
.format(self.student_id)
)
).format(self.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
......
......@@ -71,7 +71,6 @@ locales:
# The locales used for fake-accented English, for testing.
dummy_locales:
- eo
- fake2
# Directories we don't search for strings.
ignore_dirs:
......
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: 0.1a\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-05 06:31-0400\n"
"POT-Creation-Date: 2014-06-05 13:11-0400\n"
"PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
......@@ -35,22 +35,29 @@ msgid "Status of Your Response"
msgstr ""
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:277
#: apps/openassessment/xblock/static/js/src/oa_response.js:200
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:274
msgid "This response has not been saved."
msgstr ""
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:295
#: apps/openassessment/xblock/static/js/src/oa_response.js:292
msgid "Saving..."
msgstr ""
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:313
#: apps/openassessment/xblock/static/js/src/oa_response.js:310
msgid "This response has been saved but not submitted."
msgstr ""
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:316
#: apps/openassessment/xblock/static/js/src/oa_response.js:313
msgid "Error"
msgstr ""
......
......@@ -2,19 +2,19 @@
# Copyright (C) 2014 EdX
# This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.
# EdX Team <info@edx.org>, 2014.
#
#
msgid ""
msgstr ""
"Project-Id-Version: 0.1a\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-05 06:31-0400\n"
"POT-Creation-Date: 2014-06-05 13:11-0400\n"
"PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
"Language: eo\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: en\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
......@@ -37,22 +37,32 @@ msgid "Status of Your Response"
msgstr "Stätüs öf Ýöür Réspönsé Ⱡ'σяє#"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:277
#: apps/openassessment/xblock/static/js/src/oa_response.js:200
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
"Ìf ýöü léävé thïs pägé wïthöüt sävïng ör süßmïttïng ýöür réspönsé, ýöü'll "
"lösé äný wörk ýöü'vé döné ön thé réspönsé. Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, "
"¢σηѕє¢т#"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:274
msgid "This response has not been saved."
msgstr "Thïs réspönsé häs nöt ßéén sävéd. Ⱡ'σяєм ι#"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:295
#: apps/openassessment/xblock/static/js/src/oa_response.js:292
msgid "Saving..."
msgstr "Sävïng... #"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:313
#: apps/openassessment/xblock/static/js/src/oa_response.js:310
msgid "This response has been saved but not submitted."
msgstr "Thïs réspönsé häs ßéén sävéd ßüt nöt süßmïttéd. Ⱡ'σяєм ιρѕυм #"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:316
#: apps/openassessment/xblock/static/js/src/oa_response.js:313
msgid "Error"
msgstr "Érrör Ⱡ'σяєм ι#"
......
# edX translation file.
# Copyright (C) 2014 EdX
# This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.
# EdX Team <info@edx.org>, 2014.
#
msgid ""
msgstr ""
"Project-Id-Version: 0.1a\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-05 06:32-0400\n"
"PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
"Language: fake2\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_base.js:151
msgid "Unable to Load"
msgstr "Ʉnɐblǝ ʇø Łøɐd"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_edit.js:81
msgid ""
"This problem has already been released. Any changes will apply only to "
"future assessments."
msgstr ""
"Ŧɥᴉs dɹøblǝɯ ɥɐs ɐlɹǝɐdʎ bǝǝn ɹǝlǝɐsǝd. Ⱥnʎ ɔɥɐnƃǝs ʍᴉll ɐddlʎ ønlʎ ʇø "
"ɟnʇnɹǝ ɐssǝssɯǝnʇs."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:174
msgid "Status of Your Response"
msgstr "Sʇɐʇns øɟ Ɏønɹ Ɍǝsdønsǝ"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:277
msgid "This response has not been saved."
msgstr "Ŧɥᴉs ɹǝsdønsǝ ɥɐs nøʇ bǝǝn sɐʌǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:295
msgid "Saving..."
msgstr "Sɐʌᴉnƃ..."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:313
msgid "This response has been saved but not submitted."
msgstr "Ŧɥᴉs ɹǝsdønsǝ ɥɐs bǝǝn sɐʌǝd bnʇ nøʇ snbɯᴉʇʇǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_response.js:316
msgid "Error"
msgstr "Ɇɹɹøɹ"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:59
#: apps/openassessment/xblock/static/js/src/oa_server.js:90
#: apps/openassessment/xblock/static/js/src/oa_server.js:109
msgid "This section could not be loaded."
msgstr "Ŧɥᴉs sǝɔʇᴉøn ɔønld nøʇ bǝ løɐdǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:144
msgid "This response could not be submitted."
msgstr "Ŧɥᴉs ɹǝsdønsǝ ɔønld nøʇ bǝ snbɯᴉʇʇǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:170
msgid "This response could not be saved."
msgstr "Ŧɥᴉs ɹǝsdønsǝ ɔønld nøʇ bǝ sɐʌǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:207
msgid "This feedback could not be submitted."
msgstr "Ŧɥᴉs ɟǝǝdbɐɔʞ ɔønld nøʇ bǝ snbɯᴉʇʇǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:253
#: apps/openassessment/xblock/static/js/src/oa_server.js:293
#: apps/openassessment/xblock/static/js/src/oa_server.js:334
msgid "This assessment could not be submitted."
msgstr "Ŧɥᴉs ɐssǝssɯǝnʇ ɔønld nøʇ bǝ snbɯᴉʇʇǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:362
msgid "This problem could not be loaded."
msgstr "Ŧɥᴉs dɹøblǝɯ ɔønld nøʇ bǝ løɐdǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:391
msgid "This problem could not be saved."
msgstr "Ŧɥᴉs dɹøblǝɯ ɔønld nøʇ bǝ sɐʌǝd."
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
#: apps/openassessment/xblock/static/js/src/oa_server.js:421
msgid "The server could not be contacted."
msgstr "Ŧɥǝ sǝɹʌǝɹ ɔønld nøʇ bǝ ɔønʇɐɔʇǝd."
......@@ -7,11 +7,7 @@
<span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value">
<span class="grade__value__title">
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}
<span class="grade__value__earned">{{ points_earned }}</span>
out of
<span class="grade__value__potential">{{ points_possible }}</span>
{% endblocktrans %}
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}<span class="grade__value__earned">{{ points_earned }}</span> out of <span class="grade__value__potential">{{ points_possible }}</span>{% endblocktrans %}
</span>
</span>
</span>
......@@ -42,14 +38,12 @@
<span class="question__score">
<span class="label sr">{% trans "Overall Grade" %}</span>
{% blocktrans with score=criterion.median_score total=criterion.total_value%}
<span class="question__score__value">{{ score }}</span>
<span class="question__score__value">{{ score.points_earned }}</span>
<span class="label label--divider sr">out of</span>
<span class="question__score__potential">
{{ total }}
<span class="unit">Points</span>
{{ score.points_possible }}
<span class="unit">{% trans "Points" %}</span>
</span>
{% endblocktrans %}
</span>
</h4>
......@@ -64,9 +58,7 @@
<span class="answer__source">
<span class="answer__source__label sr">{% trans "Assessor" %}: </span>
<span class="answer__source__value">
{% blocktrans with peer_num=peer_num%}
Peer {{ peer_num }}
{% endblocktrans %}
{% blocktrans with peer_num=peer_num%}Peer {{ peer_num }}{% endblocktrans %}
</span>
</span>
<span class="answer__value">
......
......@@ -7,11 +7,9 @@
{% if waiting %}
{% trans "Your grade will be available when your peers have completed their assessments of your response." %}
{% else %}
{% blocktrans %}
Review <a data-behavior="ui-scroll" href="#openassessment__grade"> your grade and your assessment details</a>.
{% endblocktrans %}
<a data-behavior="ui-scroll" href="#openassessment__grade">{% trans "Review your grade and your assessment details." %}</a>
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -4,15 +4,11 @@
<div class="message__content">
<p>
{% if approaching %}
{% blocktrans %}
Assignment submissions will close soon. To receive a grade, first provide a response to the question, then complete the steps below the <strong>Your Response</strong> field.
{% endblocktrans %}
{% blocktrans %}Assignment submissions will close soon. To receive a grade, first provide a response to the question, then complete the steps below the <strong>Your Response</strong> field.{% endblocktrans %}
{% else %}
{% blocktrans %}
This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.
{% endblocktrans %}
{% blocktrans %}This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.{% endblocktrans %}
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -22,16 +22,12 @@
{% trans "All submitted peer responses have been assessed. Check back later to see if more students have submitted responses. " %}
{% endif %}
{% if has_self %}
{% blocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href=#openassessment__peer-assessment">peer assessment</a> and <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> steps, and after your peers have assessed your response.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> and <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> steps, and after your peers have assessed your response.{% endblocktrans %}
{% else %}
{% blocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> step.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> step.{% endblocktrans %}
{% endif %}
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -19,16 +19,12 @@
<strong> {% trans "Self evaluation of this assignment will close soon. " %} </strong>
{% endif %}
{% if has_peer %}
{% blocktrans %}
You'll receive your grade after the required number of your peers have assessed your response and you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after the required number of your peers have assessed your response and you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.{% endblocktrans %}
{% else %}
{% blocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.{% endblocktrans %}
{% endif %}
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -11,18 +11,12 @@
<span class="wrapper--copy">
<span class="step__label">{% trans "Assess Peers" %}</span>
{% if peer_start %}
<span class="step__deadline">{% trans "available" %}
<span class="date">
{{ peer_start|utc|date:"N j, Y H:i e" }}
(in {{ peer_start|timeuntil }})
</span>
<span class="step__deadline">
{% blocktrans with start_date=peer_start|utc|date:"N j, Y H:i e" time_until=peer_start|timeuntil %}available <span class="date">{{ start_date }} (in {{ time_until }})</span>{% endblocktrans %}
</span>
{% elif peer_due %}
<span class="step__deadline">{% trans "due" %}
<span class="date">
{{ peer_due|utc|date:"N j, Y H:i e" }}
(in {{ peer_due|timeuntil }})
</span>
<span class="step__deadline">
{% blocktrans with due_date=peer_due|utc|date:"N j, Y H:i e" time_until=peer_due|timeuntil %}due <span class="date">{{ due_date }} (in {{ time_until }})</span>{% endblocktrans %}
</span>
{% endif %}
</span>
......@@ -33,9 +27,7 @@
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">
{% trans "In Progress" %}
(<span class="step__status__value--completed">{{ graded }}</span> of
<span class="step__status__value--required">{{ must_grade }}</span>)
{% blocktrans with graded=graded must_grade=must_grade%}In Progress (<span class="step__status__value--completed">{{ graded }}</span> of <span class="step__status__value--required">{{ must_grade }}</span>){% endblocktrans %}
</span>
</span>
</span>
......@@ -55,10 +47,7 @@
<article class="peer-assessment" id="peer-assessment--001">
<div class="peer-assessment__display">
<header class="peer-assessment__display__header">
<h3 class="peer-assessment__display__title">{% trans "Assessment #" %}
<span class="peer-assessment__number--current">{{ review_num }}</span> of
<span class="peer-assessment__number--required">{{ must_grade }}</span>
</h3>
{% blocktrans with review_num=review_num must_grade=must_grade%}<h3 class="peer-assessment__display__title">Assessment # <span class="peer-assessment__number--current">{{ review_num }}</span> of <span class="peer-assessment__number--required">{{ must_grade }}</span></h3>{% endblocktrans %}
</header>
<div class="peer-assessment__display__response">
......
......@@ -11,9 +11,7 @@
<span class="step__status__value">
<span class="copy">
<i class="ico icon-warning-sign"></i>
{% trans "Incomplete" %}
(<span class="step__status__value--completed">{{ graded }}</span> of
<span class="step__status__value--required">{{ must_grade }}</span>)
{% blocktrans with graded=graded must_grade=must_grade %}Incomplete (<span class="step__status__value--completed">{{ graded }}</span> of <span class="step__status__value--required">{{ must_grade }}</span>){% endblocktrans %}
</span>
</span>
</span>
......
......@@ -11,9 +11,7 @@
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">
{% trans "Complete" %}
(<span class="step__status__value--completed">{{ graded }}</span> of
<span class="step__status__value--required">{{ must_grade }}</span>)
{% blocktrans with graded=graded must_grade=must_grade %}Complete (<span class="step__status__value--completed">{{ graded }}</span> of <span class="step__status__value--required">{{ must_grade }}</span>){% endblocktrans %}
</span>
</span>
</span>
......
......@@ -11,8 +11,7 @@
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">
{% trans "Complete" %}
(<span class="step__status__value--completed">{{ graded }}</span>)
{% blocktrans with graded=graded %}Complete (<span class="step__status__value--completed">{{ graded }}</span>){% endblocktrans %}
</span>
</span>
</span>
......
......@@ -11,8 +11,7 @@
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">
Complete
(<span class="step__status__value--completed">{{ graded }}</span>)
{% blocktrans with graded=graded %} Complete (<span class="step__status__value--completed">{{ graded }}</span>){% endblocktrans %}
</span>
</span>
</span>
......
......@@ -10,9 +10,7 @@
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">
{% trans "In Progress" %}
(<span class="step__status__value--completed">{{ graded }}</span> of
<span class="step__status__value--required">{{ must_grade }}</span>)
{% blocktrans with graded=graded must_grade=must_grade %}In Progress (<span class="step__status__value--completed">{{ graded }}</span> of <span class="step__status__value--required">{{ must_grade }}</span>){% endblocktrans %}
</span>
</span>
</span>
......
......@@ -11,18 +11,12 @@
<span class="wrapper--copy">
<span class="step__label">{% trans "Your Response" %}</span>
{% if submission_start %}
<span class="step__deadline">available
<span class="date">
{{ submission_start|utc|date:"N j, Y H:i e" }}
(in {{ submission_start|timeuntil }})
</span>
<span class="step__deadline">
{% blocktrans with start_date=submission_start|utc|date:"N j, Y H:i e" time_until=submission_start|timeuntil %}available <span class="date">{{ start_date }} (in {{ time_until }})</span>{% endblocktrans %}
</span>
{% elif submission_due %}
<span class="step__deadline">due
<span class="date">
{{ submission_due|utc|date:"N j, Y H:i e" }}
(in {{ submission_due|timeuntil }})
</span>
<span class="step__deadline">
{% blocktrans with due_date=submission_due|utc|date:"N j, Y H:i e" time_until=submission_due|timeuntil %}due <span class="date"> {{ due_date }} (in {{ time_until }})</span>{% endblocktrans %}
</span>
{% endif %}
</span>
......@@ -75,7 +69,7 @@
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="submission__save" class="action action--save submission__save is--disabled">Save Your Progress</button>
<button type="submit" id="submission__save" class="action action--save submission__save is--disabled">{% trans "Save Your Progress" %}</button>
<div id="response__save_status" class="response__submission__status">
<h3 class="response__submission__status__title">
......
......@@ -21,7 +21,7 @@
<div class="step__message message message--complete">
<h3 class="message__title">{% trans "Your Response Has Been Submitted" %}</h3>
<div class="message__content">{% trans "You'll receive your grade after some of your peers have assessed your response and you complete the <a data-behavior=\"ui-scroll\" href=\"#openassessment__peer-assessment\">peer assessment</a> and <a data-behavior=\"ui-scroll\" href=\"#openassessment__self-assessment\">self assessment</a> steps" %}.</div>
<div class="message__content">{% blocktrans %}You'll receive your grade after some of your peers have assessed your response and you complete the <a data-behavior=\"ui-scroll\" href=\"#openassessment__peer-assessment\">peer assessment</a> and <a data-behavior=\"ui-scroll\" href=\"#openassessment__self-assessment\">self assessment</a> steps{% endblocktrans %}.</div>
</div>
<div class="step__content">
......
{% extends "openassessmentblock/response/oa_response.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response is--empty is--unavailable is--collapsed">
......@@ -6,9 +7,9 @@
{% block title %}
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">Not Available</span>
<span class="copy">{% trans "Not Available" %}</span>
</span>
</span>
{% endblock %}
......
......@@ -11,18 +11,12 @@
<span class="wrapper--copy">
<span class="step__label">{% trans "Assess Your Response" %}</span>
{% if self_start %}
<span class="step__deadline">{% trans "available" %}
<span class="date">
{{ self_start|utc|date:"N j, Y H:i e" }}
(in {{ self_start|timeuntil }})
</span>
<span class="step__deadline">
{% blocktrans with start_date=self_start|utc|date:"N j, Y H:i e" time_until=self_start|timeuntil %}available <span class="date">{{ start_date }} (in {{ time_until }})</span>{% endblocktrans %}
</span>
{% elif self_due %}
<span class="step__deadline">due
<span class="date">
{{ self_due|utc|date:"N j, Y H:i e" }}
(in {{ self_due|timeuntil }})
</span>
<span class="step__deadline">
{% blocktrans with due_date=self_due|utc|date:"N j, Y H:i e" time_until=self_due|timeuntil %}due <span class="date">{{ due_date }}</span> (in {{ time_until }}){% endblocktrans %}
</span>
{% endif %}
</span>
......
......@@ -11,17 +11,12 @@
<span class="wrapper--copy">
<span class="step__label">{% trans "Learn to Assess Responses" %}</span>
{% if training_start %}
<span class="step__deadline">{% trans "available" %}
<span class="date">
{{ training_start|utc|date:"N j, Y H:i e" }}
(in {{ training_start|timeuntil }})
<span class="step__deadline">
{% blocktrans with start_date=training_start|utc|date:"N j, Y H:i e" time_until=training_start|timeuntil %}available <span class="date"> {{ start_date }} (in {{ time_until }}) </span>{% endblocktrans %}
</span>
</span>
{% elif training_due %}
<span class="step__deadline">due
<span class="date">
{{ training_due|utc|date:"N j, Y H:i e" }}
(in {{ training_due|timeuntil }})
<span class="step__deadline">
{% blocktrans with due_date=training_due|utc|date:"N j, Y H:i e" time_until=training_due|timeuntil %}due <span class="date">{{ due_date }}</span> (in {{ time_until }}){% endblocktrans %}
</span>
</span>
{% endif %}
......@@ -64,9 +59,8 @@
<div class="step__content">
<article class="student-training__display" id="student-training">
<header class="student-training__display__header">
<h3 class="student-training__display__title">{% trans "Training Assessment #" %}
<span class="student-training__number--current">{{ training_num_current }}</span> of
<span class="student-training__number--required">{{ training_num_available }}</span>
<h3 class="student-training__display__title">
{% blocktrans with training_num_current=training_num_current training_num_available=training_num_available %}Training Assessment #<span class="student-training__number--current">{{ training_num_current }}</span> of <span class="student-training__number--required">{{ training_num_available }}</span>{% endblocktrans %}
</h3>
</header>
......
......@@ -179,17 +179,17 @@ class PeerAssessmentMixin(object):
context_dict["review_num"] = count + 1
if continue_grading:
context_dict["submit_button_text"] = (
context_dict["submit_button_text"] = _(
"Submit your assessment & review another response"
)
elif assessment["must_grade"] - count == 1:
context_dict["submit_button_text"] = (
context_dict["submit_button_text"] = _(
"Submit your assessment & move onto next step"
)
else:
context_dict["submit_button_text"] = (
"Submit your assessment & move to response #{}"
).format(count + 2)
context_dict["submit_button_text"] = _(
"Submit your assessment & move to response #{response_number}"
).format(response_number=(count + 2))
# Once a student has completed a problem, it stays complete,
# so this condition needs to be first.
......
......@@ -197,10 +197,7 @@ OpenAssessment.ResponseView.prototype = {
else {
if (enabled) {
window.onbeforeunload = function() {
return (
"If you leave this page without saving or submitting your response, " +
"you'll lose any work you've done on the response."
);
return gettext("If you leave this page without saving or submitting your response, you'll lose any work you've done on the response.");
};
}
else {
......
......@@ -159,10 +159,20 @@ class StudentTrainingMixin(object):
corrections = student_training.assess_training_example(
self.submission_uuid, data['options_selected']
)
except (student_training.StudentTrainingRequestError, student_training.StudentTrainingInternalError) as ex:
except student_training.StudentTrainingRequestError:
msg = (
u"Could not check student training scores for "
u"the student with submission UUID {uuid}"
).format(uuid=self.submission_uuid)
logger.warning(msg, exc_info=True)
return {
'success': False,
'msg': _(u"Your scores could not be checked: {error}.").format(error=ex)
'msg': _(u"Your scores could not be checked.")
}
except student_training.StudentTrainingInternalError:
return {
'success': False,
'msg': _(u"Your scores could not be checked.")
}
except:
return {
......@@ -173,9 +183,11 @@ class StudentTrainingMixin(object):
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
msg = _('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg}
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {uuid}".format(uuid=self.submission_uuid)
)
return {'success': False, 'msg': _('Could not update workflow status.')}
return {
'success': True,
'msg': u'',
......
#!/usr/bin/env bash
cd `dirname $BASH_SOURCE` && cd ..
python manage.py makemessages --all
python manage.py makemessages --all -d djangojs
i18n_tool dummy
python manage.py compilemessages
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment