Commit dd3d9015 by Will Daly

Remove translation strings from assessment APIs

Updated logging in XBlock mixins
Added more verbose logging in assessment APIs
Cleaned up i18n strings in templates.
parent 5f1184ae
...@@ -121,8 +121,8 @@ To extract strings and compile messages: ...@@ -121,8 +121,8 @@ To extract strings and compile messages:
.. code:: bash .. code:: bash
python manage.py makemessages -l en python manage.py makemessages --all
python manage.py makemessages -d djangojs -l en python manage.py makemessages --all -d djangojs
python manage.py compilemessages python manage.py compilemessages
Generate dummy strings for testing: Generate dummy strings for testing:
......
...@@ -6,7 +6,6 @@ the workflow for a given submission. ...@@ -6,7 +6,6 @@ the workflow for a given submission.
""" """
import logging import logging
from django.utils import timezone from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError, IntegrityError from django.db import DatabaseError, IntegrityError
from dogapi import dog_stats_api from dogapi import dog_stats_api
...@@ -162,18 +161,19 @@ def create_assessment( ...@@ -162,18 +161,19 @@ def create_assessment(
# and raise an error if this is not the case # and raise an error if this is not the case
try: try:
option_ids = rubric.options_ids(options_selected) option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection as ex: except InvalidOptionSelection:
msg = _("Selected options do not match the rubric: {error}").format(error=ex) msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
peer_workflow_item = scorer_workflow.get_latest_open_workflow_item() peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
if peer_workflow_item is None: if peer_workflow_item is None:
message = _( message = (
u"There are no open assessments associated with the scorer's " u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid) u"submission UUID {}."
) ).format(scorer_submission_uuid)
logger.warning(message) logger.warning(message)
raise PeerAssessmentWorkflowError(message) raise PeerAssessmentWorkflowError(message)
...@@ -213,17 +213,16 @@ def create_assessment( ...@@ -213,17 +213,16 @@ def create_assessment(
return assessment_dict return assessment_dict
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"An error occurred while creating assessment {} by: {}" u"An error occurred while creating assessment {} by: {}"
.format(assessment_dict, scorer_id) ).format(assessment_dict, scorer_id)
)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist: except PeerWorkflow.DoesNotExist:
message = _( message = (
u"There is no Peer Workflow associated with the given " u"There is no Peer Workflow associated with the given "
u"submission UUID {}.".format(scorer_submission_uuid) u"submission UUID {}."
) ).format(scorer_submission_uuid)
logger.error(message) logger.error(message)
raise PeerAssessmentWorkflowError(message) raise PeerAssessmentWorkflowError(message)
...@@ -259,10 +258,9 @@ def get_rubric_max_scores(submission_uuid): ...@@ -259,10 +258,9 @@ def get_rubric_max_scores(submission_uuid):
for criterion in rubric_dict["criteria"] for criterion in rubric_dict["criteria"]
} }
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"Error getting rubric options max scores for submission uuid " u"Error getting rubric options max scores for submission uuid {uuid}"
u"[{}]".format(submission_uuid) ).format(uuid=submission_uuid)
)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -298,7 +296,9 @@ def get_assessment_median_scores(submission_uuid): ...@@ -298,7 +296,9 @@ def get_assessment_median_scores(submission_uuid):
scores = Assessment.scores_by_criterion(assessments) scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores) return Assessment.get_median_score_dict(scores)
except DatabaseError: except DatabaseError:
error_message = _(u"Error getting assessment median scores {}".format(submission_uuid)) error_message = (
u"Error getting assessment median scores for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -401,9 +401,9 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -401,9 +401,9 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
)[:limit] )[:limit]
return serialize_assessments(assessments) return serialize_assessments(assessments)
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"Error getting assessments for submission {}".format(submission_uuid) u"Error getting assessments for submission {uuid}"
) ).format(uuid=submission_uuid)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -471,10 +471,10 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -471,10 +471,10 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
pk__in=[item.assessment.pk for item in items])[:limit] pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments) return serialize_assessments(assessments)
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"Couldn't retrieve the assessments that the author of response {}" u"Couldn't retrieve the assessments completed by the "
u" completed".format(submission_uuid) " student with submission {uuid}"
) ).format(uuid=submission_uuid)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -544,11 +544,10 @@ def get_submission_to_assess(submission_uuid, graded_by): ...@@ -544,11 +544,10 @@ def get_submission_to_assess(submission_uuid, graded_by):
_log_workflow(peer_submission_uuid, workflow) _log_workflow(peer_submission_uuid, workflow)
return submission_data return submission_data
except sub_api.SubmissionNotFoundError: except sub_api.SubmissionNotFoundError:
error_message = _( error_message = (
u"Could not find a submission with the uuid {} for student {} " u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow." u"in the peer workflow."
.format(peer_submission_uuid, workflow.student_id) ).format(peer_submission_uuid, workflow.student_id)
)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message) raise PeerAssessmentWorkflowError(error_message)
else: else:
...@@ -598,11 +597,10 @@ def create_peer_workflow(submission_uuid): ...@@ -598,11 +597,10 @@ def create_peer_workflow(submission_uuid):
# created a workflow for this submission, so we don't need to do anything. # created a workflow for this submission, so we don't need to do anything.
pass pass
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"An internal error occurred while creating a new peer " u"An internal error occurred while creating a new peer "
u"workflow for submission {}" u"workflow for submission {}"
.format(submission_uuid) ).format(submission_uuid)
)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
Public interface for self-assessment. Public interface for self-assessment.
""" """
import logging import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
...@@ -46,25 +45,45 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -46,25 +45,45 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
""" """
# Check that there are not any assessments for this submission # Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists(): if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response.")) msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
"because another self-assessment already exists for that submission."
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError(msg)
# Check that the student is allowed to assess this submission # Check that the student is allowed to assess this submission
try: try:
submission = get_submission_and_student(submission_uuid) submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id: if submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response.")) msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
u"because it was created by another student "
u"(submission student ID {student_id} does not match your "
u"student id {other_id})"
).format(
uuid=submission_uuid,
student_id=submission['student_item']['student_id'],
other_id=user_id
)
raise SelfAssessmentRequestError(msg)
except SubmissionNotFoundError: except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the response.")) msg = (
"Could not submit a self-assessment because no submission "
"exists with UUID {uuid}"
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
# Get or create the rubric # Get or create the rubric
try: try:
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected) option_ids = rubric.options_ids(options_selected)
except InvalidRubric as ex: except InvalidRubric:
msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors) msg = "Invalid rubric definition"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg) raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection: except InvalidOptionSelection:
msg = _("Selected options do not match the rubric") msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg) raise SelfAssessmentRequestError(msg)
# Create the assessment # Create the assessment
...@@ -84,7 +103,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -84,7 +103,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# Serialize the assessment # Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment) serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid(): if not serializer.is_valid():
msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors) msg = "Could not create self assessment: {errors}".format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg) raise SelfAssessmentRequestError(msg)
assessment = serializer.save() assessment = serializer.save()
...@@ -229,7 +248,9 @@ def get_assessment_scores_by_criteria(submission_uuid): ...@@ -229,7 +248,9 @@ def get_assessment_scores_by_criteria(submission_uuid):
scores = Assessment.scores_by_criterion(assessments) scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores) return Assessment.get_median_score_dict(scores)
except DatabaseError: except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid) error_message = (
u"Error getting self assessment scores for submission {}"
).format(submission_uuid)
logger.exception(error_message) logger.exception(error_message)
raise SelfAssessmentInternalError(error_message) raise SelfAssessmentInternalError(error_message)
......
...@@ -7,8 +7,8 @@ Public interface for student training: ...@@ -7,8 +7,8 @@ Public interface for student training:
""" """
import logging import logging
from django.db import DatabaseError
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
...@@ -158,10 +158,9 @@ def validate_training_examples(rubric, examples): ...@@ -158,10 +158,9 @@ def validate_training_examples(rubric, examples):
] ]
for criterion in rubric['criteria'] for criterion in rubric['criteria']
} }
except (ValueError, KeyError) as ex: except (ValueError, KeyError):
msg = _(u"Could not parse serialized rubric") logger.warning("Could not parse serialized rubric", exc_info=True)
logger.warning("{}: {}".format(msg, ex)) return [_(u"Could not parse serialized rubric")]
return [msg]
# Check each example # Check each example
for order_num, example_dict in enumerate(examples, start=1): for order_num, example_dict in enumerate(examples, start=1):
...@@ -170,7 +169,9 @@ def validate_training_examples(rubric, examples): ...@@ -170,7 +169,9 @@ def validate_training_examples(rubric, examples):
is_format_valid, format_errors = validate_training_example_format(example_dict) is_format_valid, format_errors = validate_training_example_format(example_dict)
if not is_format_valid: if not is_format_valid:
format_errors = [ format_errors = [
_(u"Example {} has a validation error: {}").format(order_num, error) _(u"Example {example_number} has a validation error: {error}").format(
example_number=order_num, error=error
)
for error in format_errors for error in format_errors
] ]
errors.extend(format_errors) errors.extend(format_errors)
...@@ -181,20 +182,33 @@ def validate_training_examples(rubric, examples): ...@@ -181,20 +182,33 @@ def validate_training_examples(rubric, examples):
if criterion_name in criteria_options: if criterion_name in criteria_options:
valid_options = criteria_options[criterion_name] valid_options = criteria_options[criterion_name]
if option_name not in valid_options: if option_name not in valid_options:
msg = u"Example {} has an invalid option for \"{}\": \"{}\"".format( msg = _(
order_num, criterion_name, option_name u"Example {example_number} has an invalid option "
u"for \"{criterion_name}\": \"{option_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name,
option_name=option_name
) )
errors.append(msg) errors.append(msg)
else: else:
msg = _(u"Example {} has an extra option for \"{}\"").format( msg = _(
order_num, criterion_name u"Example {example_number} has an extra option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name
) )
errors.append(msg) errors.append(msg)
# Check for missing criteria # Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()): for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()):
msg = _(u"Example {} is missing an option for \"{}\"").format( msg = _(
order_num, missing_criterion u"Example {example_number} is missing an option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=missing_criterion
) )
errors.append(msg) errors.append(msg)
...@@ -303,9 +317,9 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -303,9 +317,9 @@ def get_training_example(submission_uuid, rubric, examples):
# Validate the training examples # Validate the training examples
errors = validate_training_examples(rubric, examples) errors = validate_training_examples(rubric, examples)
if len(errors) > 0: if len(errors) > 0:
msg = _(u"Training examples do not match the rubric: {errors}").format( msg = (
errors="\n".join(errors) u"Training examples do not match the rubric (submission UUID is {uuid}): {errors}"
) ).format(uuid=submission_uuid, errors="\n".join(errors))
raise StudentTrainingRequestError(msg) raise StudentTrainingRequestError(msg)
# Get or create the workflow # Get or create the workflow
...@@ -328,11 +342,11 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -328,11 +342,11 @@ def get_training_example(submission_uuid, rubric, examples):
) )
raise StudentTrainingRequestError(ex) raise StudentTrainingRequestError(ex)
except sub_api.SubmissionNotFoundError as ex: except sub_api.SubmissionNotFoundError as ex:
msg = _(u"Could not retrieve the submission with UUID {}").format(submission_uuid) msg = u"Could not retrieve the submission with UUID {}".format(submission_uuid)
logger.exception(msg) logger.exception(msg)
raise StudentTrainingRequestError(msg) raise StudentTrainingRequestError(msg)
except DatabaseError: except DatabaseError:
msg = _( msg = (
u"Could not retrieve a training example " u"Could not retrieve a training example "
u"for the student with submission UUID {}" u"for the student with submission UUID {}"
).format(submission_uuid) ).format(submission_uuid)
......
...@@ -20,7 +20,6 @@ import json ...@@ -20,7 +20,6 @@ import json
from django.core.cache import cache from django.core.cache import cache
from django.db import models from django.db import models
from django.utils.timezone import now from django.utils.timezone import now
from django.utils.translation import ugettext as _
import math import math
import logging import logging
...@@ -146,8 +145,13 @@ class Rubric(models.Model): ...@@ -146,8 +145,13 @@ class Rubric(models.Model):
# Validate: are options selected for each criterion in the rubric? # Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict): if len(options_selected) != len(rubric_criteria_dict):
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected})").format( msg = (
actual=len(options_selected), expected=len(rubric_criteria_dict)) u"Incorrect number of options for this rubric "
u"({actual} instead of {expected})"
).format(
actual=len(options_selected),
expected=len(rubric_criteria_dict)
)
raise InvalidOptionSelection(msg) raise InvalidOptionSelection(msg)
# Look up each selected option # Look up each selected option
...@@ -159,9 +163,9 @@ class Rubric(models.Model): ...@@ -159,9 +163,9 @@ class Rubric(models.Model):
option_id = rubric_criteria_dict[criterion_name][option_name] option_id = rubric_criteria_dict[criterion_name][option_name]
option_id_set.add(option_id) option_id_set.add(option_id)
else: else:
msg = _("{criterion}: {option} not found in rubric").format( msg = (
criterion=criterion_name, option=option_name "{criterion}: {option} not found in rubric"
) ).format(criterion=criterion_name, option=option_name)
raise InvalidOptionSelection(msg) raise InvalidOptionSelection(msg)
return option_id_set return option_id_set
......
...@@ -12,7 +12,6 @@ from datetime import timedelta ...@@ -12,7 +12,6 @@ from datetime import timedelta
from django.db import models, DatabaseError from django.db import models, DatabaseError
from django.utils.timezone import now from django.utils.timezone import now
from django.utils.translation import ugettext as _
from openassessment.assessment.models.base import Assessment from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
...@@ -154,11 +153,10 @@ class PeerWorkflow(models.Model): ...@@ -154,11 +153,10 @@ class PeerWorkflow(models.Model):
except cls.DoesNotExist: except cls.DoesNotExist:
return None return None
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"Error finding workflow for submission UUID {}. Workflow must be " u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment." u"created for submission before beginning peer assessment."
.format(submission_uuid) ).format(submission_uuid)
)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message) raise PeerAssessmentWorkflowError(error_message)
...@@ -196,10 +194,10 @@ class PeerWorkflow(models.Model): ...@@ -196,10 +194,10 @@ class PeerWorkflow(models.Model):
item.save() item.save()
return item return item
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"An internal error occurred while creating a new peer workflow " u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}".format(scorer_workflow) u"item for workflow {}"
) ).format(scorer_workflow)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -288,10 +286,10 @@ class PeerWorkflow(models.Model): ...@@ -288,10 +286,10 @@ class PeerWorkflow(models.Model):
return peer_workflows[0].submission_uuid return peer_workflows[0].submission_uuid
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"An internal error occurred while retrieving a peer submission " u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(self) u"for student {}"
) ).format(self)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -326,10 +324,10 @@ class PeerWorkflow(models.Model): ...@@ -326,10 +324,10 @@ class PeerWorkflow(models.Model):
return random_workflow.submission_uuid return random_workflow.submission_uuid
except DatabaseError: except DatabaseError:
error_message = _( error_message = (
u"An internal error occurred while retrieving a peer submission " u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(self) u"for student {}"
) ).format(self)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -366,10 +364,11 @@ class PeerWorkflow(models.Model): ...@@ -366,10 +364,11 @@ class PeerWorkflow(models.Model):
item_query = self.graded.filter(submission_uuid=submission_uuid).order_by("-started_at", "-id") # pylint:disable=E1101 item_query = self.graded.filter(submission_uuid=submission_uuid).order_by("-started_at", "-id") # pylint:disable=E1101
items = list(item_query[:1]) items = list(item_query[:1])
if not items: if not items:
raise PeerAssessmentWorkflowError(_( msg = (
u"No open assessment was found for student {} while assessing " u"No open assessment was found for student {} while assessing "
u"submission UUID {}.".format(self.student_id, submission_uuid) u"submission UUID {}."
)) ).format(self.student_id, submission_uuid)
raise PeerAssessmentWorkflowError(msg)
item = items[0] item = items[0]
item.assessment = assessment item.assessment = assessment
item.save() item.save()
...@@ -379,12 +378,11 @@ class PeerWorkflow(models.Model): ...@@ -379,12 +378,11 @@ class PeerWorkflow(models.Model):
item.author.grading_completed_at = now() item.author.grading_completed_at = now()
item.author.save() item.author.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist): except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = _( error_message = (
u"An internal error occurred while retrieving a workflow item for " u"An internal error occurred while retrieving a workflow item for "
u"student {}. Workflow Items are created when submissions are " u"student {}. Workflow Items are created when submissions are "
u"pulled for assessment." u"pulled for assessment."
.format(self.student_id) ).format(self.student_id)
)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message) raise PeerAssessmentWorkflowError(error_message)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Copyright (C) 2014 EdX # Copyright (C) 2014 EdX
# This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE. # This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.
# EdX Team <info@edx.org>, 2014. # EdX Team <info@edx.org>, 2014.
# #
msgid "" msgid ""
msgstr "" msgstr ""
"Project-Id-Version: 0.1a\n" "Project-Id-Version: 0.1a\n"
...@@ -11,10 +11,10 @@ msgstr "" ...@@ -11,10 +11,10 @@ msgstr ""
"PO-Revision-Date: 2014-06-04 15:41-0400\n" "PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n" "Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n" "Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
"Language: eo\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"Language: en\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1 #: apps/openassessment/xblock/static/js/openassessment.min.js:1
......
...@@ -2,19 +2,19 @@ ...@@ -2,19 +2,19 @@
# Copyright (C) 2014 EdX # Copyright (C) 2014 EdX
# This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE. # This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.
# EdX Team <info@edx.org>, 2014. # EdX Team <info@edx.org>, 2014.
# #
msgid "" msgid ""
msgstr "" msgstr ""
"Project-Id-Version: 0.1a\n" "Project-Id-Version: 0.1a\n"
"Report-Msgid-Bugs-To: \n" "Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-05 06:32-0400\n" "POT-Creation-Date: 2014-06-05 06:31-0400\n"
"PO-Revision-Date: 2014-06-04 15:41-0400\n" "PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n" "Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n" "Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
"Language: fake2\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"Language: en\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1 #: apps/openassessment/xblock/static/js/openassessment.min.js:1
......
...@@ -7,11 +7,7 @@ ...@@ -7,11 +7,7 @@
<span class="step__label">{% trans "Your Grade" %}: </span> <span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value"> <span class="grade__value">
<span class="grade__value__title"> <span class="grade__value__title">
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%} {% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}<span class="grade__value__earned">{{ points_earned }}</span> out of <span class="grade__value__potential">{{ points_possible }}</span>{% endblocktrans %}
<span class="grade__value__earned">{{ points_earned }}</span>
out of
<span class="grade__value__potential">{{ points_possible }}</span>
{% endblocktrans %}
</span> </span>
</span> </span>
</span> </span>
...@@ -42,14 +38,12 @@ ...@@ -42,14 +38,12 @@
<span class="question__score"> <span class="question__score">
<span class="label sr">{% trans "Overall Grade" %}</span> <span class="label sr">{% trans "Overall Grade" %}</span>
{% blocktrans with score=criterion.median_score total=criterion.total_value%} <span class="question__score__value">{{ score.points_earned }}</span>
<span class="question__score__value">{{ score }}</span>
<span class="label label--divider sr">out of</span> <span class="label label--divider sr">out of</span>
<span class="question__score__potential"> <span class="question__score__potential">
{{ total }} {{ score.points_possible }}
<span class="unit">Points</span> <span class="unit">{% trans "Points" %}</span>
</span> </span>
{% endblocktrans %}
</span> </span>
</h4> </h4>
...@@ -64,9 +58,7 @@ ...@@ -64,9 +58,7 @@
<span class="answer__source"> <span class="answer__source">
<span class="answer__source__label sr">{% trans "Assessor" %}: </span> <span class="answer__source__label sr">{% trans "Assessor" %}: </span>
<span class="answer__source__value"> <span class="answer__source__value">
{% blocktrans with peer_num=peer_num%} {% blocktrans with peer_num=peer_num%}Peer {{ peer_num }}{% endblocktrans %}
Peer {{ peer_num }}
{% endblocktrans %}
</span> </span>
</span> </span>
<span class="answer__value"> <span class="answer__value">
......
...@@ -7,11 +7,9 @@ ...@@ -7,11 +7,9 @@
{% if waiting %} {% if waiting %}
{% trans "Your grade will be available when your peers have completed their assessments of your response." %} {% trans "Your grade will be available when your peers have completed their assessments of your response." %}
{% else %} {% else %}
{% blocktrans %} <a data-behavior="ui-scroll" href="#openassessment__grade">{% trans "Review your grade and your assessment details." %}</a>
Review <a data-behavior="ui-scroll" href="#openassessment__grade"> your grade and your assessment details</a>.
{% endblocktrans %}
{% endif %} {% endif %}
</p> </p>
</div> </div>
</div> </div>
{% endspaceless %} {% endspaceless %}
\ No newline at end of file
...@@ -4,15 +4,11 @@ ...@@ -4,15 +4,11 @@
<div class="message__content"> <div class="message__content">
<p> <p>
{% if approaching %} {% if approaching %}
{% blocktrans %} {% blocktrans %}Assignment submissions will close soon. To receive a grade, first provide a response to the question, then complete the steps below the <strong>Your Response</strong> field.{% endblocktrans %}
Assignment submissions will close soon. To receive a grade, first provide a response to the question, then complete the steps below the <strong>Your Response</strong> field.
{% endblocktrans %}
{% else %} {% else %}
{% blocktrans %} {% blocktrans %}This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.{% endblocktrans %}
This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.
{% endblocktrans %}
{% endif %} {% endif %}
</p> </p>
</div> </div>
</div> </div>
{% endspaceless %} {% endspaceless %}
\ No newline at end of file
...@@ -22,16 +22,12 @@ ...@@ -22,16 +22,12 @@
{% trans "All submitted peer responses have been assessed. Check back later to see if more students have submitted responses. " %} {% trans "All submitted peer responses have been assessed. Check back later to see if more students have submitted responses. " %}
{% endif %} {% endif %}
{% if has_self %} {% if has_self %}
{% blocktrans %} {% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> and <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> steps, and after your peers have assessed your response.{% endblocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href=#openassessment__peer-assessment">peer assessment</a> and <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> steps, and after your peers have assessed your response.
{% endblocktrans %}
{% else %} {% else %}
{% blocktrans %} {% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> step.{% endblocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> step.
{% endblocktrans %}
{% endif %} {% endif %}
{% endif %} {% endif %}
</p> </p>
</div> </div>
</div> </div>
{% endspaceless %} {% endspaceless %}
\ No newline at end of file
...@@ -19,16 +19,12 @@ ...@@ -19,16 +19,12 @@
<strong> {% trans "Self evaluation of this assignment will close soon. " %} </strong> <strong> {% trans "Self evaluation of this assignment will close soon. " %} </strong>
{% endif %} {% endif %}
{% if has_peer %} {% if has_peer %}
{% blocktrans %} {% blocktrans %}You'll receive your grade after the required number of your peers have assessed your response and you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.{% endblocktrans %}
You'll receive your grade after the required number of your peers have assessed your response and you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.
{% endblocktrans %}
{% else %} {% else %}
{% blocktrans %} {% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.{% endblocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.
{% endblocktrans %}
{% endif %} {% endif %}
{% endif %} {% endif %}
</p> </p>
</div> </div>
</div> </div>
{% endspaceless %} {% endspaceless %}
\ No newline at end of file
...@@ -159,10 +159,20 @@ class StudentTrainingMixin(object): ...@@ -159,10 +159,20 @@ class StudentTrainingMixin(object):
corrections = student_training.assess_training_example( corrections = student_training.assess_training_example(
self.submission_uuid, data['options_selected'] self.submission_uuid, data['options_selected']
) )
except (student_training.StudentTrainingRequestError, student_training.StudentTrainingInternalError) as ex: except student_training.StudentTrainingRequestError:
msg = (
u"Could not check student training scores for "
u"the student with submission UUID {uuid}"
).format(uuid=self.submission_uuid)
logger.warning(msg, exc_info=True)
return { return {
'success': False, 'success': False,
'msg': _(u"Your scores could not be checked: {error}.").format(error=ex) 'msg': _(u"Your scores could not be checked.")
}
except student_training.StudentTrainingInternalError:
return {
'success': False,
'msg': _(u"Your scores could not be checked.")
} }
except: except:
return { return {
...@@ -173,9 +183,11 @@ class StudentTrainingMixin(object): ...@@ -173,9 +183,11 @@ class StudentTrainingMixin(object):
try: try:
self.update_workflow_status() self.update_workflow_status()
except workflow_api.AssessmentWorkflowError: except workflow_api.AssessmentWorkflowError:
msg = _('Could not update workflow status.') logger.exception(
logger.exception(msg) u"Workflow error occurred when submitting peer assessment "
return {'success': False, 'msg': msg} u"for submission {uuid}".format(uuid=self.submission_uuid)
)
return {'success': False, 'msg': _('Could not update workflow status.')}
return { return {
'success': True, 'success': True,
'msg': u'', 'msg': u'',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment