Commit dd3d9015 by Will Daly

Remove translation strings from assessment APIs

Updated logging in XBlock mixins
Added more verbose logging in assessment APIs
Cleaned up i18n strings in templates.
parent 5f1184ae
......@@ -121,8 +121,8 @@ To extract strings and compile messages:
.. code:: bash
python manage.py makemessages -l en
python manage.py makemessages -d djangojs -l en
python manage.py makemessages --all
python manage.py makemessages --all -d djangojs
python manage.py compilemessages
Generate dummy strings for testing:
......
......@@ -6,7 +6,6 @@ the workflow for a given submission.
"""
import logging
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError, IntegrityError
from dogapi import dog_stats_api
......@@ -162,18 +161,19 @@ def create_assessment(
# and raise an error if this is not the case
try:
option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection as ex:
msg = _("Selected options do not match the rubric: {error}").format(error=ex)
except InvalidOptionSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
if peer_workflow_item is None:
message = _(
message = (
u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid)
)
u"submission UUID {}."
).format(scorer_submission_uuid)
logger.warning(message)
raise PeerAssessmentWorkflowError(message)
......@@ -213,17 +213,16 @@ def create_assessment(
return assessment_dict
except DatabaseError:
error_message = _(
error_message = (
u"An error occurred while creating assessment {} by: {}"
.format(assessment_dict, scorer_id)
)
).format(assessment_dict, scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist:
message = _(
message = (
u"There is no Peer Workflow associated with the given "
u"submission UUID {}.".format(scorer_submission_uuid)
)
u"submission UUID {}."
).format(scorer_submission_uuid)
logger.error(message)
raise PeerAssessmentWorkflowError(message)
......@@ -259,10 +258,9 @@ def get_rubric_max_scores(submission_uuid):
for criterion in rubric_dict["criteria"]
}
except DatabaseError:
error_message = _(
u"Error getting rubric options max scores for submission uuid "
u"[{}]".format(submission_uuid)
)
error_message = (
u"Error getting rubric options max scores for submission uuid {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -298,7 +296,9 @@ def get_assessment_median_scores(submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting assessment median scores {}".format(submission_uuid))
error_message = (
u"Error getting assessment median scores for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -401,9 +401,9 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
)[:limit]
return serialize_assessments(assessments)
except DatabaseError:
error_message = _(
u"Error getting assessments for submission {}".format(submission_uuid)
)
error_message = (
u"Error getting assessments for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -471,10 +471,10 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments)
except DatabaseError:
error_message = _(
u"Couldn't retrieve the assessments that the author of response {}"
u" completed".format(submission_uuid)
)
error_message = (
u"Couldn't retrieve the assessments completed by the "
" student with submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -544,11 +544,10 @@ def get_submission_to_assess(submission_uuid, graded_by):
_log_workflow(peer_submission_uuid, workflow)
return submission_data
except sub_api.SubmissionNotFoundError:
error_message = _(
error_message = (
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
.format(peer_submission_uuid, workflow.student_id)
)
).format(peer_submission_uuid, workflow.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
......@@ -598,11 +597,10 @@ def create_peer_workflow(submission_uuid):
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
.format(submission_uuid)
)
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......
......@@ -2,7 +2,6 @@
Public interface for self-assessment.
"""
import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api
......@@ -46,25 +45,45 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
"""
# Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response."))
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
"because another self-assessment already exists for that submission."
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError(msg)
# Check that the student is allowed to assess this submission
try:
submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response."))
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
u"because it was created by another student "
u"(submission student ID {student_id} does not match your "
u"student id {other_id})"
).format(
uuid=submission_uuid,
student_id=submission['student_item']['student_id'],
other_id=user_id
)
raise SelfAssessmentRequestError(msg)
except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the response."))
msg = (
"Could not submit a self-assessment because no submission "
"exists with UUID {uuid}"
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
# Get or create the rubric
try:
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected)
except InvalidRubric as ex:
msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors)
except InvalidRubric:
msg = "Invalid rubric definition"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection:
msg = _("Selected options do not match the rubric")
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
# Create the assessment
......@@ -84,7 +103,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid():
msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors)
msg = "Could not create self assessment: {errors}".format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg)
assessment = serializer.save()
......@@ -229,7 +248,9 @@ def get_assessment_scores_by_criteria(submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
error_message = (
u"Error getting self assessment scores for submission {}"
).format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
......
......@@ -7,8 +7,8 @@ Public interface for student training:
"""
import logging
from django.db import DatabaseError
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.assessment.serializers import (
......@@ -158,10 +158,9 @@ def validate_training_examples(rubric, examples):
]
for criterion in rubric['criteria']
}
except (ValueError, KeyError) as ex:
msg = _(u"Could not parse serialized rubric")
logger.warning("{}: {}".format(msg, ex))
return [msg]
except (ValueError, KeyError):
logger.warning("Could not parse serialized rubric", exc_info=True)
return [_(u"Could not parse serialized rubric")]
# Check each example
for order_num, example_dict in enumerate(examples, start=1):
......@@ -170,7 +169,9 @@ def validate_training_examples(rubric, examples):
is_format_valid, format_errors = validate_training_example_format(example_dict)
if not is_format_valid:
format_errors = [
_(u"Example {} has a validation error: {}").format(order_num, error)
_(u"Example {example_number} has a validation error: {error}").format(
example_number=order_num, error=error
)
for error in format_errors
]
errors.extend(format_errors)
......@@ -181,20 +182,33 @@ def validate_training_examples(rubric, examples):
if criterion_name in criteria_options:
valid_options = criteria_options[criterion_name]
if option_name not in valid_options:
msg = u"Example {} has an invalid option for \"{}\": \"{}\"".format(
order_num, criterion_name, option_name
msg = _(
u"Example {example_number} has an invalid option "
u"for \"{criterion_name}\": \"{option_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name,
option_name=option_name
)
errors.append(msg)
else:
msg = _(u"Example {} has an extra option for \"{}\"").format(
order_num, criterion_name
msg = _(
u"Example {example_number} has an extra option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name
)
errors.append(msg)
# Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()):
msg = _(u"Example {} is missing an option for \"{}\"").format(
order_num, missing_criterion
msg = _(
u"Example {example_number} is missing an option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=missing_criterion
)
errors.append(msg)
......@@ -303,9 +317,9 @@ def get_training_example(submission_uuid, rubric, examples):
# Validate the training examples
errors = validate_training_examples(rubric, examples)
if len(errors) > 0:
msg = _(u"Training examples do not match the rubric: {errors}").format(
errors="\n".join(errors)
)
msg = (
u"Training examples do not match the rubric (submission UUID is {uuid}): {errors}"
).format(uuid=submission_uuid, errors="\n".join(errors))
raise StudentTrainingRequestError(msg)
# Get or create the workflow
......@@ -328,11 +342,11 @@ def get_training_example(submission_uuid, rubric, examples):
)
raise StudentTrainingRequestError(ex)
except sub_api.SubmissionNotFoundError as ex:
msg = _(u"Could not retrieve the submission with UUID {}").format(submission_uuid)
msg = u"Could not retrieve the submission with UUID {}".format(submission_uuid)
logger.exception(msg)
raise StudentTrainingRequestError(msg)
except DatabaseError:
msg = _(
msg = (
u"Could not retrieve a training example "
u"for the student with submission UUID {}"
).format(submission_uuid)
......
......@@ -20,7 +20,6 @@ import json
from django.core.cache import cache
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext as _
import math
import logging
......@@ -146,8 +145,13 @@ class Rubric(models.Model):
# Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict):
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected})").format(
actual=len(options_selected), expected=len(rubric_criteria_dict))
msg = (
u"Incorrect number of options for this rubric "
u"({actual} instead of {expected})"
).format(
actual=len(options_selected),
expected=len(rubric_criteria_dict)
)
raise InvalidOptionSelection(msg)
# Look up each selected option
......@@ -159,9 +163,9 @@ class Rubric(models.Model):
option_id = rubric_criteria_dict[criterion_name][option_name]
option_id_set.add(option_id)
else:
msg = _("{criterion}: {option} not found in rubric").format(
criterion=criterion_name, option=option_name
)
msg = (
"{criterion}: {option} not found in rubric"
).format(criterion=criterion_name, option=option_name)
raise InvalidOptionSelection(msg)
return option_id_set
......
......@@ -12,7 +12,6 @@ from datetime import timedelta
from django.db import models, DatabaseError
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
......@@ -154,11 +153,10 @@ class PeerWorkflow(models.Model):
except cls.DoesNotExist:
return None
except DatabaseError:
error_message = _(
error_message = (
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
.format(submission_uuid)
)
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
......@@ -196,10 +194,10 @@ class PeerWorkflow(models.Model):
item.save()
return item
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}".format(scorer_workflow)
)
u"item for workflow {}"
).format(scorer_workflow)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -288,10 +286,10 @@ class PeerWorkflow(models.Model):
return peer_workflows[0].submission_uuid
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(self)
)
u"for student {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -326,10 +324,10 @@ class PeerWorkflow(models.Model):
return random_workflow.submission_uuid
except DatabaseError:
error_message = _(
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(self)
)
u"for student {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
......@@ -366,10 +364,11 @@ class PeerWorkflow(models.Model):
item_query = self.graded.filter(submission_uuid=submission_uuid).order_by("-started_at", "-id") # pylint:disable=E1101
items = list(item_query[:1])
if not items:
raise PeerAssessmentWorkflowError(_(
msg = (
u"No open assessment was found for student {} while assessing "
u"submission UUID {}.".format(self.student_id, submission_uuid)
))
u"submission UUID {}."
).format(self.student_id, submission_uuid)
raise PeerAssessmentWorkflowError(msg)
item = items[0]
item.assessment = assessment
item.save()
......@@ -379,12 +378,11 @@ class PeerWorkflow(models.Model):
item.author.grading_completed_at = now()
item.author.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = _(
error_message = (
u"An internal error occurred while retrieving a workflow item for "
u"student {}. Workflow Items are created when submissions are "
u"pulled for assessment."
.format(self.student_id)
)
).format(self.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
......
......@@ -2,7 +2,7 @@
# Copyright (C) 2014 EdX
# This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.
# EdX Team <info@edx.org>, 2014.
#
#
msgid ""
msgstr ""
"Project-Id-Version: 0.1a\n"
......@@ -11,10 +11,10 @@ msgstr ""
"PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
"Language: eo\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: en\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
......
......@@ -2,19 +2,19 @@
# Copyright (C) 2014 EdX
# This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.
# EdX Team <info@edx.org>, 2014.
#
#
msgid ""
msgstr ""
"Project-Id-Version: 0.1a\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-05 06:32-0400\n"
"POT-Creation-Date: 2014-06-05 06:31-0400\n"
"PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
"Language: fake2\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: en\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: apps/openassessment/xblock/static/js/openassessment.min.js:1
......
......@@ -7,11 +7,7 @@
<span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value">
<span class="grade__value__title">
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}
<span class="grade__value__earned">{{ points_earned }}</span>
out of
<span class="grade__value__potential">{{ points_possible }}</span>
{% endblocktrans %}
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}<span class="grade__value__earned">{{ points_earned }}</span> out of <span class="grade__value__potential">{{ points_possible }}</span>{% endblocktrans %}
</span>
</span>
</span>
......@@ -42,14 +38,12 @@
<span class="question__score">
<span class="label sr">{% trans "Overall Grade" %}</span>
{% blocktrans with score=criterion.median_score total=criterion.total_value%}
<span class="question__score__value">{{ score }}</span>
<span class="question__score__value">{{ score.points_earned }}</span>
<span class="label label--divider sr">out of</span>
<span class="question__score__potential">
{{ total }}
<span class="unit">Points</span>
{{ score.points_possible }}
<span class="unit">{% trans "Points" %}</span>
</span>
{% endblocktrans %}
</span>
</h4>
......@@ -64,9 +58,7 @@
<span class="answer__source">
<span class="answer__source__label sr">{% trans "Assessor" %}: </span>
<span class="answer__source__value">
{% blocktrans with peer_num=peer_num%}
Peer {{ peer_num }}
{% endblocktrans %}
{% blocktrans with peer_num=peer_num%}Peer {{ peer_num }}{% endblocktrans %}
</span>
</span>
<span class="answer__value">
......
......@@ -7,11 +7,9 @@
{% if waiting %}
{% trans "Your grade will be available when your peers have completed their assessments of your response." %}
{% else %}
{% blocktrans %}
Review <a data-behavior="ui-scroll" href="#openassessment__grade"> your grade and your assessment details</a>.
{% endblocktrans %}
<a data-behavior="ui-scroll" href="#openassessment__grade">{% trans "Review your grade and your assessment details." %}</a>
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -4,15 +4,11 @@
<div class="message__content">
<p>
{% if approaching %}
{% blocktrans %}
Assignment submissions will close soon. To receive a grade, first provide a response to the question, then complete the steps below the <strong>Your Response</strong> field.
{% endblocktrans %}
{% blocktrans %}Assignment submissions will close soon. To receive a grade, first provide a response to the question, then complete the steps below the <strong>Your Response</strong> field.{% endblocktrans %}
{% else %}
{% blocktrans %}
This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.
{% endblocktrans %}
{% blocktrans %}This assignment has several steps. In the first step, you'll provide a response to the question. The other steps appear below the <strong>Your Response</strong> field.{% endblocktrans %}
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -22,16 +22,12 @@
{% trans "All submitted peer responses have been assessed. Check back later to see if more students have submitted responses. " %}
{% endif %}
{% if has_self %}
{% blocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href=#openassessment__peer-assessment">peer assessment</a> and <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> steps, and after your peers have assessed your response.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> and <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> steps, and after your peers have assessed your response.{% endblocktrans %}
{% else %}
{% blocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> step.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__peer-assessment">peer assessment</a> step.{% endblocktrans %}
{% endif %}
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -19,16 +19,12 @@
<strong> {% trans "Self evaluation of this assignment will close soon. " %} </strong>
{% endif %}
{% if has_peer %}
{% blocktrans %}
You'll receive your grade after the required number of your peers have assessed your response and you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after the required number of your peers have assessed your response and you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.{% endblocktrans %}
{% else %}
{% blocktrans %}
You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.
{% endblocktrans %}
{% blocktrans %}You'll receive your grade after you complete the <a data-behavior="ui-scroll" href="#openassessment__self-assessment">self assessment</a> step.{% endblocktrans %}
{% endif %}
{% endif %}
</p>
</div>
</div>
{% endspaceless %}
\ No newline at end of file
{% endspaceless %}
......@@ -159,10 +159,20 @@ class StudentTrainingMixin(object):
corrections = student_training.assess_training_example(
self.submission_uuid, data['options_selected']
)
except (student_training.StudentTrainingRequestError, student_training.StudentTrainingInternalError) as ex:
except student_training.StudentTrainingRequestError:
msg = (
u"Could not check student training scores for "
u"the student with submission UUID {uuid}"
).format(uuid=self.submission_uuid)
logger.warning(msg, exc_info=True)
return {
'success': False,
'msg': _(u"Your scores could not be checked: {error}.").format(error=ex)
'msg': _(u"Your scores could not be checked.")
}
except student_training.StudentTrainingInternalError:
return {
'success': False,
'msg': _(u"Your scores could not be checked.")
}
except:
return {
......@@ -173,9 +183,11 @@ class StudentTrainingMixin(object):
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
msg = _('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg}
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {uuid}".format(uuid=self.submission_uuid)
)
return {'success': False, 'msg': _('Could not update workflow status.')}
return {
'success': True,
'msg': u'',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment