Commit 195dc62e by gradyward

Merge branch 'authoring' of https://github.com/edx/edx-ora2 into grady/assessment-reorder

parents 9d60aef7 0c026056
......@@ -31,6 +31,8 @@ pip-log.txt
nosetests.xml
htmlcov
coverage.xml
test_ora2db
test_ora2db-journal
# Mr Developer
.mr.developer.cfg
......
......@@ -8,5 +8,11 @@ if __name__ == "__main__":
if os.environ.get('DJANGO_SETTINGS_MODULE') is None:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.dev'
# When using an on-disk database for the test suite,
# Django asks us if we want to delete the database.
# We do.
if 'test' in sys.argv[0:3]:
sys.argv.append('--noinput')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
......@@ -91,8 +91,8 @@ class AssessmentAdmin(admin.ModelAdmin):
u"{}/{} - {}: {} - {}".format(
part.points_earned,
part.points_possible,
part.option.criterion.name,
part.option.name,
part.criterion.name,
part.option.name if part.option else "None",
part.feedback,
)
)
......
......@@ -14,7 +14,7 @@ from openassessment.assessment.errors import (
)
from openassessment.assessment.models import (
Assessment, AITrainingWorkflow, AIGradingWorkflow,
InvalidOptionSelection, NoTrainingExamples,
InvalidRubricSelection, NoTrainingExamples,
AI_ASSESSMENT_TYPE, AIClassifierSet
)
from openassessment.assessment.worker import training as training_tasks
......@@ -268,7 +268,7 @@ def train_classifiers(rubric_dict, examples, course_id, item_id, algorithm_id):
# Get or create the rubric and training examples
try:
examples = deserialize_training_examples(examples, rubric_dict)
except (InvalidRubric, InvalidTrainingExample, InvalidOptionSelection) as ex:
except (InvalidRubric, InvalidTrainingExample, InvalidRubricSelection) as ex:
msg = u"Could not parse rubric and/or training examples: {ex}".format(ex=ex)
raise AITrainingRequestError(msg)
......
......@@ -8,7 +8,8 @@ from dogapi import dog_stats_api
from openassessment.assessment.models import (
AITrainingWorkflow, AIGradingWorkflow,
ClassifierUploadError, ClassifierSerializeError,
IncompleteClassifierSet, NoTrainingExamples
IncompleteClassifierSet, NoTrainingExamples,
InvalidRubricSelection
)
from openassessment.assessment.errors import (
AITrainingRequestError, AITrainingInternalError,
......@@ -274,7 +275,7 @@ def create_classifiers(training_workflow_uuid, classifier_set):
except NoTrainingExamples as ex:
logger.exception(ex)
raise AITrainingInternalError(ex)
except IncompleteClassifierSet as ex:
except (IncompleteClassifierSet, InvalidRubricSelection) as ex:
msg = (
u"An error occurred while creating the classifier set "
u"for the training workflow with UUID {uuid}: {ex}"
......
......@@ -11,11 +11,12 @@ from dogapi import dog_stats_api
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
InvalidOptionSelection, PeerWorkflow, PeerWorkflowItem,
InvalidRubricSelection, PeerWorkflow, PeerWorkflowItem,
)
from openassessment.assessment.serializers import (
AssessmentSerializer, AssessmentFeedbackSerializer, RubricSerializer,
AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
)
from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentInternalError
......@@ -81,7 +82,16 @@ def assessment_is_finished(submission_uuid, requirements):
"""
if requirements is None:
return False
return bool(get_score(submission_uuid, requirements))
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
if workflow is None:
return False
scored_items = workflow.graded_by.filter(
assessment__submission_uuid=submission_uuid,
assessment__score_type=PEER_TYPE
)
return scored_items.count() >= requirements["must_be_graded_by"]
def on_start(submission_uuid):
......@@ -151,12 +161,12 @@ def get_score(submission_uuid, requirements):
if workflow is None:
return None
# This query will use the ordering defined by the assessment model
# (descending scored_at, then descending id)
# Retrieve the assessments in ascending order by score date,
# because we want to use the *first* one(s) for the score.
items = workflow.graded_by.filter(
assessment__submission_uuid=submission_uuid,
assessment__score_type=PEER_TYPE
).order_by('assessment')
).order_by('-assessment')
submission_finished = items.count() >= requirements["must_be_graded_by"]
if not submission_finished:
......@@ -190,7 +200,8 @@ def create_assessment(
overall_feedback,
rubric_dict,
num_required_grades,
scored_at=None):
scored_at=None
):
"""Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular
......@@ -235,24 +246,9 @@ def create_assessment(
>>> feedback = "Your submission was thrilling."
>>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
"""
# Ensure that this variables is declared so if an error occurs
# we don't get an error when trying to log it!
assessment_dict = None
try:
rubric = rubric_from_dict(rubric_dict)
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
try:
option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
# Retrieve workflow information
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
if peer_workflow_item is None:
message = (
......@@ -261,55 +257,50 @@ def create_assessment(
).format(scorer_submission_uuid)
logger.warning(message)
raise PeerAssessmentWorkflowError(message)
peer_submission_uuid = peer_workflow_item.author.submission_uuid
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission_uuid": peer_submission_uuid,
"score_type": PEER_TYPE,
"feedback": overall_feedback[0:Assessment.MAXSIZE],
}
if scored_at is not None:
peer_assessment["scored_at"] = scored_at
peer_serializer = AssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid():
msg = (
u"An error occurred while serializing "
u"the peer assessment associated with "
u"the scorer's submission UUID {}."
).format(scorer_submission_uuid)
raise PeerAssessmentRequestError(msg)
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
assessment = peer_serializer.save()
# Create the peer assessment
assessment = Assessment.create(
rubric,
scorer_id,
peer_submission_uuid,
PEER_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)
# Create assessment parts for each criterion in the rubric
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
# Close the active assessment
scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, scorer_workflow)
return assessment_dict
except DatabaseError:
error_message = (
u"An error occurred while creating assessment {} by: {}"
).format(assessment_dict, scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
return full_assessment_dict(assessment)
except PeerWorkflow.DoesNotExist:
message = (
u"There is no Peer Workflow associated with the given "
u"submission UUID {}."
).format(scorer_submission_uuid)
logger.error(message)
logger.exception(message)
raise PeerAssessmentWorkflowError(message)
except InvalidRubric:
msg = u"Rubric definition was not valid"
logger.exception(msg)
raise PeerAssessmentRequestError(msg)
except InvalidRubricSelection:
msg = u"Invalid options selected in the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
except DatabaseError:
error_message = (
u"An error occurred while retrieving the peer workflow item by scorer with ID: {}"
).format(scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_rubric_max_scores(submission_uuid):
......
......@@ -7,11 +7,10 @@ from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError
from openassessment.assessment.serializers import (
AssessmentSerializer, InvalidRubric,
full_assessment_dict, rubric_from_dict, serialize_assessments
InvalidRubric, full_assessment_dict, rubric_from_dict, serialize_assessments
)
from openassessment.assessment.models import (
Assessment, AssessmentPart, InvalidOptionSelection
Assessment, AssessmentPart, InvalidRubricSelection
)
from openassessment.assessment.errors import (
SelfAssessmentRequestError, SelfAssessmentInternalError
......@@ -139,50 +138,25 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
# Get or create the rubric
try:
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected)
# Create the self assessment
assessment = Assessment.create(rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at)
AssessmentPart.create_from_option_names(assessment, options_selected)
_log_assessment(assessment, submission)
except InvalidRubric:
msg = "Invalid rubric definition"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection:
except InvalidRubricSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
# Create the assessment
# Since we have already retrieved the submission, we can assume that
# the user who created the submission exists.
self_assessment = {
"rubric": rubric.id,
"scorer_id": user_id,
"submission_uuid": submission_uuid,
"score_type": SELF_TYPE,
"feedback": u"",
}
if scored_at is not None:
self_assessment['scored_at'] = scored_at
# Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid():
msg = "Could not create self assessment: {errors}".format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg)
assessment = serializer.save()
# We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission)
# Return the serialized assessment
return assessment_dict
return full_assessment_dict(assessment)
def get_assessment(submission_uuid):
......
......@@ -10,7 +10,7 @@ import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.assessment.models import StudentTrainingWorkflow, InvalidRubricSelection
from openassessment.assessment.serializers import (
deserialize_training_examples, serialize_training_example,
validate_training_example_format,
......@@ -179,6 +179,21 @@ def validate_training_examples(rubric, examples):
logger.warning("Could not parse serialized rubric", exc_info=True)
return [_(u"Could not parse serialized rubric")]
# Check that at least one criterion in the rubric has options
# If this is not the case (that is, if all rubric criteria are written feedback only),
# then it doesn't make sense to do student training.
criteria_without_options = [
criterion_name
for criterion_name, criterion_option_list in criteria_options.iteritems()
if len(criterion_option_list) == 0
]
if len(set(criteria_options) - set(criteria_without_options)) == 0:
return [_(
u"When you include a student training assessment, "
u"the rubric for the assessment must contain at least one criterion, "
u"and each criterion must contain at least two options."
)]
# Check each example
for order_num, example_dict in enumerate(examples, start=1):
......@@ -219,7 +234,9 @@ def validate_training_examples(rubric, examples):
errors.append(msg)
# Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()):
# Ignore options
all_example_criteria = set(options_selected.keys() + criteria_without_options)
for missing_criterion in set(criteria_options.keys()) - all_example_criteria:
msg = _(
u"Example {example_number} is missing an option "
u"for \"{criterion_name}\""
......@@ -353,7 +370,7 @@ def get_training_example(submission_uuid, rubric, examples):
# If the student already started a training example, then return that instead.
next_example = workflow.next_training_example(examples)
return None if next_example is None else serialize_training_example(next_example)
except (InvalidRubric, InvalidTrainingExample) as ex:
except (InvalidRubric, InvalidRubricSelection, InvalidTrainingExample) as ex:
logger.exception(
"Could not deserialize training examples for submission UUID {}".format(submission_uuid)
)
......
# -*- coding: utf-8 -*-
import datetime
from contextlib import contextmanager
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
@contextmanager
def lock_table(self, table_name):
""" Context manager for locking a table (MySQL only) """
# Lock tables only under MySQL (it isn't supported for SQLLite)
is_mysql = (db.backend_name == 'mysql')
# Before the block executes, lock the specified table
if is_mysql:
db.execute("LOCK TABLE {table} WRITE".format(table=table_name))
# Execute the block
yield
# Add a deferred statement to unlock tables
# This will ensure that tables stay locked until
# all deferred SQL executes
# (for example, creating foreign key constraints and adding indices)
if is_mysql:
db.add_deferred_sql("UNLOCK TABLES")
def forwards(self, orm):
# Adding field 'AssessmentPart.criterion'
# We need to lock the table to avoid a potential deadlock with the application queries.
# We need to provide a default value of NULL so that the INSERT queries don't
# raise an exception when they don't specify the new field.
with self.lock_table('assessment_assessmentpart'):
db.add_column('assessment_assessmentpart', 'criterion',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['assessment.Criterion']),
self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, default=None, to=orm['assessment.Criterion']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AssessmentPart.criterion'
with self.lock_table('assessment_assessmentpart'):
db.delete_column('assessment_assessmentpart', 'criterion_id')
......
......@@ -12,7 +12,6 @@ from django.utils.timezone import now
from django_extensions.db.fields import UUIDField
from dogapi import dog_stats_api
from submissions import api as sub_api
from openassessment.assessment.serializers import rubric_from_dict
from .base import Rubric, Criterion, Assessment, AssessmentPart
from .training import TrainingExample
......@@ -45,16 +44,14 @@ class IncompleteClassifierSet(Exception):
"""
The classifier set is missing a classifier for a criterion in the rubric.
"""
def __init__(self, expected_criteria, actual_criteria):
def __init__(self, missing_criteria):
"""
Construct an error message that explains which criteria were missing.
Args:
expected_criteria (iterable of unicode): The criteria in the rubric.
actual_criteria (iterable of unicode): The criteria specified by the classifier set.
missing_criteria (list): The list of criteria names that were missing.
"""
missing_criteria = set(expected_criteria) - set(actual_criteria)
msg = (
u"Missing classifiers for the following "
u"criteria: {missing}"
......@@ -136,6 +133,7 @@ class AIClassifierSet(models.Model):
Raises:
ClassifierSerializeError
ClassifierUploadError
InvalidRubricSelection
DatabaseError
"""
......@@ -146,12 +144,8 @@ class AIClassifierSet(models.Model):
# Retrieve the criteria for this rubric,
# then organize them by criterion name
try:
criteria = {
criterion.name: criterion
for criterion in Criterion.objects.filter(rubric=rubric)
}
rubric_index = rubric.index
except DatabaseError as ex:
msg = (
u"An unexpected error occurred while retrieving rubric criteria with the"
......@@ -161,15 +155,22 @@ class AIClassifierSet(models.Model):
raise
# Check that we have classifiers for all criteria in the rubric
if set(criteria.keys()) != set(classifiers_dict.keys()):
raise IncompleteClassifierSet(criteria.keys(), classifiers_dict.keys())
# Ignore criteria that have no options: since these have only written feedback,
# we can't assign them a score.
all_criteria = set(classifiers_dict.keys())
all_criteria |= set(
criterion.name for criterion in
rubric_index.find_criteria_without_options()
)
missing_criteria = rubric_index.find_missing_criteria(all_criteria)
if missing_criteria:
raise IncompleteClassifierSet(missing_criteria)
# Create classifiers for each criterion
for criterion_name, classifier_data in classifiers_dict.iteritems():
criterion = criteria.get(criterion_name)
classifier = AIClassifier.objects.create(
classifier_set=classifier_set,
criterion=criterion
criterion=rubric_index.find_criterion(criterion_name)
)
# Serialize the classifier data and upload
......@@ -279,7 +280,6 @@ class AIClassifierSet(models.Model):
Returns:
dict: keys are criteria names, values are JSON-serializable classifier data
If there are no classifiers in the set, returns None
Raises:
ValueError
......@@ -328,7 +328,7 @@ class AIClassifierSet(models.Model):
).format(key=cache_key)
logger.info(msg)
return classifiers_dict if classifiers_dict else None
return classifiers_dict
@property
def valid_scores_by_criterion(self):
......@@ -698,6 +698,7 @@ class AITrainingWorkflow(AIWorkflow):
IncompleteClassifierSet
ClassifierSerializeError
ClassifierUploadError
InvalidRubricSelection
DatabaseError
"""
self.classifier_set = AIClassifierSet.create_classifier_set(
......@@ -788,6 +789,7 @@ class AIGradingWorkflow(AIWorkflow):
submission = sub_api.get_submission_and_student(submission_uuid)
# Get or create the rubric
from openassessment.assessment.serializers import rubric_from_dict
rubric = rubric_from_dict(rubric_dict)
# Retrieve the submission text
......@@ -828,18 +830,12 @@ class AIGradingWorkflow(AIWorkflow):
criterion_scores (dict): Dictionary mapping criteria names to integer scores.
Raises:
InvalidRubricSelection
DatabaseError
"""
assessment = Assessment.objects.create(
submission_uuid=self.submission_uuid,
rubric=self.rubric,
scorer_id=self.algorithm_id,
score_type=AI_ASSESSMENT_TYPE
self.assessment = Assessment.create(
self.rubric, self.algorithm_id, self.submission_uuid, AI_ASSESSMENT_TYPE
)
option_ids = self.rubric.options_ids_for_points(criterion_scores)
AssessmentPart.add_to_assessment(assessment, option_ids)
self.assessment = assessment
AssessmentPart.create_from_option_points(self.assessment, criterion_scores)
self.mark_complete_and_save()
......@@ -39,6 +39,9 @@ class TrainingExample(models.Model):
Returns:
TrainingExample
Raises:
InvalidRubricSelection
"""
content_hash = cls.calculate_hash(answer, options_selected, rubric)
example = TrainingExample.objects.create(
......@@ -46,11 +49,12 @@ class TrainingExample(models.Model):
raw_answer=json.dumps(answer),
rubric=rubric
)
options_ids = rubric.options_ids(options_selected)
for option in CriterionOption.objects.filter(pk__in=list(options_ids)):
# This will raise `InvalidRubricSelection` if the selected options
# do not match the rubric.
for criterion_name, option_name in options_selected.iteritems():
option = rubric.index.find_option(criterion_name, option_name)
example.options_selected.add(option)
return example
@property
......
......@@ -75,15 +75,6 @@ class CriterionSerializer(NestedModelSerializer):
model = Criterion
fields = ('order_num', 'name', 'prompt', 'options', 'points_possible')
def validate_options(self, attrs, source):
"""Make sure we have at least one CriterionOption in a Criterion."""
options = attrs[source]
if not options:
raise serializers.ValidationError(
"Criterion must have at least one option."
)
return attrs
class RubricSerializer(NestedModelSerializer):
"""Serializer for :class:`Rubric`."""
......@@ -150,7 +141,7 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class Meta:
model = AssessmentPart
fields = ('option', 'feedback')
fields = ('option', 'criterion', 'feedback')
class AssessmentSerializer(serializers.ModelSerializer):
......@@ -219,12 +210,15 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `CriterionOption` again, we simply index into the places we expect them to
# be from the big, saved `Rubric` serialization.
parts = []
for part in assessment.parts.all().select_related("option__criterion"):
criterion_dict = rubric_dict["criteria"][part.option.criterion.order_num]
for part in assessment.parts.all().select_related("criterion", "option"):
criterion_dict = rubric_dict["criteria"][part.criterion.order_num]
options_dict = None
if part.option is not None:
options_dict = criterion_dict["options"][part.option.order_num]
options_dict["criterion"] = criterion_dict
parts.append({
"option": options_dict,
"criterion": criterion_dict,
"feedback": part.feedback
})
......@@ -232,7 +226,9 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `Assessment` so we can again avoid DB calls.
assessment_dict["parts"] = parts
assessment_dict["points_earned"] = sum(
part_dict["option"]["points"] for part_dict in parts
part_dict["option"]["points"]
if part_dict["option"] is not None else 0
for part_dict in parts
)
assessment_dict["points_possible"] = rubric_dict["points_possible"]
......
......@@ -80,6 +80,7 @@ def deserialize_training_examples(examples, rubric_dict):
Raises:
InvalidRubric
InvalidRubricSelection
InvalidTrainingExample
Example usage:
......
......@@ -474,5 +474,110 @@
"Example 3 has a validation error: Training example must contain an \"answer\" field.",
"Example 3 has a validation error: Training example must contain an \"options_selected\" field."
]
},
"feedback_only_criterion": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": { "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭" }
}
],
"errors": []
},
"feedback_only_criterion_extra_score": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"feedback only": "𝓰𝓸𝓸𝓭"
}
}
],
"errors": ["Example 1 has an invalid option for \"feedback only\": \"𝓰𝓸𝓸𝓭\""]
},
"feedback_only_all_criteria": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {}
}
],
"errors": ["When you include a student training assessment, the rubric for the assessment must contain at least one criterion, and each criterion must contain at least two options."]
}
}
# coding=utf-8
"""
Tests for the assessment Django models.
"""
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
class AssessmentTest(CacheResetTest):
"""
Tests for the `Assessment` and `AssessmentPart` models.
"""
def test_create_with_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts
# We can't select an option for the last criterion, but we do
# provide written feedback.
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {
u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐."
}
AssessmentPart.create_from_option_names(
assessment, selected, feedback=feedback
)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
def test_create_with_all_feedback_only_criteria(self):
rubric = self._rubric_with_all_feedback_only_criteria()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts, each of which are feedback-only (no points)
selected = {}
feedback = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
AssessmentPart.create_from_option_names(
assessment, selected, feedback=feedback
)
# Check the score (should be 0, since we haven't selected any points)
self.assertEqual(assessment.points_earned, 0)
self.assertEqual(assessment.points_possible, 0)
def test_create_from_option_points_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts by providing scores for options
# but NO feedback. This simulates how an example-based AI
# assessment is created.
selected = {
u"vøȼȺƀᵾłȺɍɏ": 2,
u"ﻭɼค๓๓คɼ": 1,
}
AssessmentPart.create_from_option_points(assessment, selected)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text (should default to an empty string)
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"")
def test_create_from_option_points_all_feedback_only_criteria(self):
rubric = self._rubric_with_all_feedback_only_criteria()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Since there are no criteria with options, and we're not
# providing written feedback, pass in an empty selection.
selected = {}
AssessmentPart.create_from_option_points(assessment, selected)
# Score should be zero, since none of the criteria have options
self.assertEqual(assessment.points_earned, 0)
self.assertEqual(assessment.points_possible, 0)
def test_default_feedback_for_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts, but do NOT provide any feedback
# This simulates how non-peer assessments are created
# Note that this is different from providing an empty feedback dict;
# here, we're not providing the `feedback` kwarg at all.
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
AssessmentPart.create_from_option_names(assessment, selected)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text, which should default to an empty string
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"")
def test_no_feedback_provided_for_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts
# Do NOT provide feedback for the feedback-only criterion
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {}
# Expect an error when we try to create the assessment parts
with self.assertRaises(InvalidRubricSelection):
AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
def _rubric_with_one_feedback_only_criterion(self):
"""Create a rubric with one feedback-only criterion."""
rubric_dict = copy.deepcopy(RUBRIC)
rubric_dict['criteria'].append({
"order_num": 2,
"name": u"feedback",
"prompt": u"only feedback, no points",
"options": []
})
return rubric_from_dict(rubric_dict)
def _rubric_with_all_feedback_only_criteria(self):
"""Create a rubric with all feedback-only criteria."""
rubric_dict = copy.deepcopy(RUBRIC)
for criterion in rubric_dict['criteria']:
criterion['options'] = []
return rubric_from_dict(rubric_dict)
# -*- coding: utf-8 -*-
"""
Tests for assessment models.
"""
from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection,
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
)
class TestRubricOptionIds(CacheResetTest):
"""
Test selection of options from a rubric.
"""
NUM_CRITERIA = 4
NUM_OPTIONS = 3
def setUp(self):
"""
Create a rubric in the database.
"""
self.rubric = Rubric.objects.create()
self.criteria = [
Criterion.objects.create(
rubric=self.rubric,
name="test criterion {num}".format(num=num),
order_num=num,
) for num in range(self.NUM_CRITERIA)
]
self.options = dict()
for criterion in self.criteria:
self.options[criterion.name] = [
CriterionOption.objects.create(
criterion=criterion,
name="test option {num}".format(num=num),
order_num=num,
points=num
) for num in range(self.NUM_OPTIONS)
]
def test_option_ids(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_different_order(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 3": "test option 2",
})
def test_option_ids_extra_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
"extra criterion": "test",
})
def test_option_ids_mutated_criterion_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test mutated criterion": "test option 1",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_option_ids_mutated_option_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 1",
"test criterion 1": "test mutated option",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
class AssessmentFeedbackTest(CacheResetTest):
"""
Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive.
"""
def setUp(self):
self.feedback = AssessmentFeedback.objects.create(
submission_uuid='test_submission',
feedback_text='test feedback',
)
def test_default_options(self):
self.assertEqual(self.feedback.options.count(), 0)
def test_add_options_all_new(self):
# We haven't created any feedback options yet, so these should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_some_new(self):
# Create one feedback option in the database
AssessmentFeedbackOption.objects.create(text='I liked my assessment')
# Add feedback options. The one that's new should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_empty(self):
# No options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 0)
# Add an option
self.feedback.add_options(['test'])
self.assertEqual(len(self.feedback.options.all()), 1)
# Add an empty list of options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 1)
def test_add_options_duplicates(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, one of which is a duplicate
self.feedback.add_options(['I liked my assessment', 'I disliked my assessment'])
# There should be three options
options = self.feedback.options.all()
self.assertEqual(len(options), 3)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
self.assertEqual(options[2].text, 'I disliked my assessment')
# There should be only three options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 3)
def test_add_options_all_old(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, all of which are duplicates
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# There should be two options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
def test_unicode(self):
# Create options with unicode
self.feedback.add_options([u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽', u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'])
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
class PeerWorkflowTest(CacheResetTest):
"""
Tests for the peer workflow model.
"""
STUDENT_ITEM = {
'student_id': 'test_student',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
OTHER_STUDENT = {
'student_id': 'test_student_2',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
def test_create_item_multiple_available(self):
# Bugfix TIM-572
submitter_sub = sub_api.create_submission(self.STUDENT_ITEM, 'test answer')
submitter_workflow = PeerWorkflow.objects.create(
student_id=self.STUDENT_ITEM['student_id'],
item_id=self.STUDENT_ITEM['item_id'],
course_id=self.STUDENT_ITEM['course_id'],
submission_uuid=submitter_sub['uuid']
)
scorer_sub = sub_api.create_submission(self.OTHER_STUDENT, 'test answer 2')
scorer_workflow = PeerWorkflow.objects.create(
student_id=self.OTHER_STUDENT['student_id'],
item_id=self.OTHER_STUDENT['item_id'],
course_id=self.OTHER_STUDENT['course_id'],
submission_uuid=scorer_sub['uuid']
)
for _ in range(2):
PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=submitter_workflow,
submission_uuid=submitter_sub['uuid']
)
# This used to cause an error when `get_or_create` returned multiple workflow items
PeerWorkflow.create_item(scorer_workflow, submitter_sub['uuid'])
......@@ -6,12 +6,12 @@ Tests for assessment models.
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection
Rubric, Criterion, CriterionOption, InvalidRubricSelection
)
from openassessment.assessment.test.constants import RUBRIC
class TestRubricOptionIds(CacheResetTest):
class RubricIndexTest(CacheResetTest):
"""
Test selection of options from a rubric.
"""
......@@ -23,6 +23,8 @@ class TestRubricOptionIds(CacheResetTest):
"""
Create a rubric in the database.
"""
super(RubricIndexTest, self).setUp()
self.rubric = Rubric.objects.create()
self.criteria = [
Criterion.objects.create(
......@@ -43,104 +45,73 @@ class TestRubricOptionIds(CacheResetTest):
) for num in range(self.NUM_OPTIONS)
]
def test_option_ids(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_different_order(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 3": "test option 2",
})
def test_option_ids_extra_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
"extra criterion": "test",
})
def test_option_ids_mutated_criterion_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test mutated criterion": "test option 1",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_option_ids_mutated_option_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 1",
"test criterion 1": "test mutated option",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_options_ids_points(self):
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 0,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][1].id
]))
def test_options_ids_points_caching(self):
# First call: the dict is not cached
with self.assertNumQueries(1):
self.rubric.options_ids_for_points({
'test criterion 0': 0,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
# Second call: the dict is not cached
with self.assertNumQueries(0):
self.rubric.options_ids_for_points({
'test criterion 0': 1,
'test criterion 1': 2,
'test criterion 2': 1,
'test criterion 3': 0
})
def test_options_ids_first_of_duplicate_points(self):
def test_find_option(self):
self.assertEqual(
self.rubric.index.find_option("test criterion 0", "test option 0"),
self.options["test criterion 0"][0]
)
self.assertEqual(
self.rubric.index.find_option("test criterion 1", "test option 1"),
self.options["test criterion 1"][1]
)
self.assertEqual(
self.rubric.index.find_option("test criterion 2", "test option 2"),
self.options["test criterion 2"][2]
)
self.assertEqual(
self.rubric.index.find_option("test criterion 3", "test option 0"),
self.options["test criterion 3"][0]
)
def test_find_missing_criteria(self):
missing = self.rubric.index.find_missing_criteria([
'test criterion 0', 'test criterion 1', 'test criterion 3'
])
expected_missing = set(['test criterion 2'])
self.assertEqual(missing, expected_missing)
def test_invalid_option(self):
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option("test criterion 0", "invalid")
def test_valid_option_wrong_criterion(self):
# Add another option to the first criterion
new_option = CriterionOption.objects.create(
criterion=self.criteria[0],
name="extra option",
order_num=(self.NUM_OPTIONS + 1),
points=4
)
# We should be able to find it in the first criterion
self.assertEqual(
new_option,
self.rubric.index.find_option("test criterion 0", "extra option")
)
# ... but not from another criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option("test criterion 1", "extra option")
def test_find_option_for_points(self):
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 0", 0),
self.options["test criterion 0"][0]
)
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 1", 1),
self.options["test criterion 1"][1]
)
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 2", 2),
self.options["test criterion 2"][2]
)
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 3", 1),
self.options["test criterion 3"][1]
)
def test_find_option_for_points_first_of_duplicate_points(self):
# Change the first criterion options so that the second and third
# option have the same point value
self.options['test criterion 0'][1].points = 5
......@@ -149,23 +120,42 @@ class TestRubricOptionIds(CacheResetTest):
self.options['test criterion 0'][2].save()
# Should get the first option back
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 5,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertIn(self.options['test criterion 0'][1].id, options_ids)
def test_options_ids_points_invalid_selection(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids_for_points({
'test criterion 0': self.NUM_OPTIONS + 1,
'test criterion 1': 2,
'test criterion 2': 1,
'test criterion 3': 0
})
option = self.rubric.index.find_option_for_points("test criterion 0", 5)
self.assertEqual(option, self.options['test criterion 0'][1])
def test_find_option_for_points_invalid_selection(self):
# No such point value
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("test criterion 0", 10)
# No such criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("no such criterion", 0)
def test_valid_points_wrong_criterion(self):
# Add another option to the first criterion
new_option = CriterionOption.objects.create(
criterion=self.criteria[0],
name="extra option",
order_num=(self.NUM_OPTIONS + 1),
points=10
)
# We should be able to find it in the first criterion
self.assertEqual(
new_option,
self.rubric.index.find_option_for_points("test criterion 0", 10)
)
# ... but not from another criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("test criterion 1", 10)
class RubricHashTest(CacheResetTest):
"""
Tests of the rubric content and structure hash.
"""
def test_structure_hash_identical(self):
first_hash = Rubric.structure_hash_from_dict(RUBRIC)
......
......@@ -91,7 +91,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_wrong_user(self):
......@@ -103,7 +103,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
'invalid_submission_uuid', u'another user',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_invalid_criterion(self):
......@@ -119,7 +119,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_invalid_option(self):
......@@ -135,7 +135,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_missing_criterion(self):
......@@ -151,7 +151,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_timestamp(self):
......@@ -200,3 +200,51 @@ class TestSelfApi(CacheResetTest):
def test_is_complete_no_submission(self):
# This submission uuid does not exist
self.assertFalse(submitter_is_finished('abc1234', {}))
def test_create_assessment_criterion_with_zero_options(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Modify the rubric to include a criterion with no options (only written feedback)
rubric = copy.deepcopy(self.RUBRIC)
rubric['criteria'].append({
"name": "feedback only",
"prompt": "feedback only",
"options": []
})
# Create a self-assessment for the submission
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# the criterion with no options to an empty string
self.assertEqual(assessment["parts"][2]["option"], None)
self.assertEqual(assessment["parts"][2]["feedback"], u"")
def test_create_assessment_all_criteria_have_zero_options(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Use a rubric with only criteria with no options (only written feedback)
rubric = copy.deepcopy(self.RUBRIC)
for criterion in rubric["criteria"]:
criterion["options"] = []
# Create a self-assessment for the submission
# We don't select any options, since none of the criteria have options
options_selected = {}
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options_selected, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# all criteria to an empty string.
for part in assessment["parts"]:
self.assertEqual(part["option"], None)
self.assertEqual(part["feedback"], u"")
# coding=utf-8
"""
Tests for assessment serializers.
"""
import json
import os.path
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback
from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback
)
from openassessment.assessment.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict,
AssessmentFeedbackSerializer
rubric_from_dict, full_assessment_dict,
AssessmentFeedbackSerializer, InvalidRubric
)
from .constants import RUBRIC
def json_data(filename):
curr_dir = os.path.dirname(__file__)
......@@ -14,7 +24,7 @@ def json_data(filename):
return json.load(json_file)
class TestRubricDeserialization(CacheResetTest):
class RubricDeserializationTest(CacheResetTest):
def test_rubric_only_created_once(self):
# Make sure sending the same Rubric data twice only creates one Rubric,
......@@ -35,7 +45,7 @@ class TestRubricDeserialization(CacheResetTest):
rubric_from_dict(json_data('data/rubric/no_points.json'))
class TestCriterionDeserialization(CacheResetTest):
class CriterionDeserializationTest(CacheResetTest):
def test_empty_criteria(self):
with self.assertRaises(InvalidRubric) as cm:
......@@ -54,20 +64,11 @@ class TestCriterionDeserialization(CacheResetTest):
)
class TestCriterionOptionDeserialization(CacheResetTest):
class CriterionOptionDeserializationTest(CacheResetTest):
def test_empty_options(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('data/rubric/empty_options.json'))
self.assertEqual(
cm.exception.errors,
{
'criteria': [
{}, # There are no errors in the first criterion
{'options': [u'Criterion must have at least one option.']}
]
}
)
rubric = rubric_from_dict(json_data('data/rubric/empty_options.json'))
self.assertEqual(rubric.criteria.count(), 2)
def test_missing_options(self):
with self.assertRaises(InvalidRubric) as cm:
......@@ -83,7 +84,7 @@ class TestCriterionOptionDeserialization(CacheResetTest):
)
class TestAssessmentFeedbackSerializer(CacheResetTest):
class AssessmentFeedbackSerializerTest(CacheResetTest):
def test_serialize(self):
feedback = AssessmentFeedback.objects.create(
......@@ -114,3 +115,41 @@ class TestAssessmentFeedbackSerializer(CacheResetTest):
'options': [],
'assessments': [],
})
class AssessmentSerializerTest(CacheResetTest):
def test_full_assessment_dict_criteria_no_options(self):
# Create a rubric with a criterion that has no options (just feedback)
rubric_dict = copy.deepcopy(RUBRIC)
rubric_dict['criteria'].append({
'order_num': 2,
'name': 'feedback only',
'prompt': 'feedback only',
'options': []
})
rubric = rubric_from_dict(rubric_dict)
# Create an assessment for the rubric
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {
u"feedback only": u"enjoy the feedback!"
}
AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
# Serialize the assessment
serialized = full_assessment_dict(assessment)
# Verify that the assessment dict correctly serialized the criterion with options.
self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")
# Verify that the assessment dict correctly serialized the criterion with no options.
self.assertIs(serialized['parts'][2]['option'], None)
self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
......@@ -3,6 +3,7 @@ Aggregate data for openassessment.
"""
import csv
import json
from django.conf import settings
from submissions import api as sub_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.models import AssessmentPart, AssessmentFeedback
......@@ -110,14 +111,18 @@ class CsvWriter(object):
# Django 1.4 doesn't follow reverse relations when using select_related,
# so we select AssessmentPart and follow the foreign key to the Assessment.
parts = AssessmentPart.objects.select_related(
'assessment', 'option', 'option__criterion'
).filter(assessment__submission_uuid=submission_uuid).order_by('assessment__pk')
parts = self._use_read_replica(
AssessmentPart.objects.select_related('assessment', 'option', 'option__criterion')
.filter(assessment__submission_uuid=submission_uuid)
.order_by('assessment__pk')
)
self._write_assessment_to_csv(parts, rubric_points_cache)
feedback_query = AssessmentFeedback.objects.filter(
submission_uuid=submission_uuid
).prefetch_related('options')
feedback_query = self._use_read_replica(
AssessmentFeedback.objects
.filter(submission_uuid=submission_uuid)
.prefetch_related('options')
)
for assessment_feedback in feedback_query:
self._write_assessment_feedback_to_csv(assessment_feedback)
feedback_option_set.update(set(
......@@ -146,8 +151,8 @@ class CsvWriter(object):
"""
num_results = 0
start = 0
total_results = AssessmentWorkflow.objects.filter(
course_id=course_id
total_results = self._use_read_replica(
AssessmentWorkflow.objects.filter(course_id=course_id)
).count()
while num_results < total_results:
......@@ -156,9 +161,11 @@ class CsvWriter(object):
# so if we counted N at the start of the loop,
# there should be >= N for us to process.
end = start + self.QUERY_INTERVAL
query = AssessmentWorkflow.objects.filter(
course_id=course_id
).order_by('created').values('submission_uuid')[start:end]
query = self._use_read_replica(
AssessmentWorkflow.objects
.filter(course_id=course_id)
.order_by('created')
).values('submission_uuid')[start:end]
for workflow_dict in query:
num_results += 1
......@@ -184,7 +191,7 @@ class CsvWriter(object):
None
"""
submission = sub_api.get_submission_and_student(submission_uuid)
submission = sub_api.get_submission_and_student(submission_uuid, read_replica=True)
self._write_unicode('submission', [
submission['uuid'],
submission['student_item']['student_id'],
......@@ -194,7 +201,7 @@ class CsvWriter(object):
json.dumps(submission['answer'])
])
score = sub_api.get_latest_score_for_submission(submission_uuid)
score = sub_api.get_latest_score_for_submission(submission_uuid, read_replica=True)
if score is not None:
self._write_unicode('score', [
score['submission_uuid'],
......@@ -221,9 +228,9 @@ class CsvWriter(object):
for part in assessment_parts:
self._write_unicode('assessment_part', [
part.assessment.id,
part.option.points,
part.option.criterion.name,
part.option.name,
part.points_earned,
part.criterion.name,
part.option.name if part.option is not None else u"",
part.feedback
])
......@@ -307,3 +314,20 @@ class CsvWriter(object):
if writer is not None:
encoded_row = [unicode(field).encode('utf-8') for field in row]
writer.writerow(encoded_row)
def _use_read_replica(self, queryset):
"""
Use the read replica if it's available.
Args:
queryset (QuerySet)
Returns:
QuerySet
"""
return (
queryset.using("read_replica")
if "read_replica" in settings.DATABASES
else queryset
)
\ No newline at end of file
"""
The File Upload application is designed to allow the management of files
associated with submissions. This can be used to upload new files and provide
URLs to the new location.
"""
import boto
import logging
from django.conf import settings
logger = logging.getLogger("openassessment.fileupload.api")
class FileUploadError(Exception):
"""An error related to uploading files
This is the generic error raised when a file could not be uploaded.
"""
pass
class FileUploadInternalError(FileUploadError):
"""An error internal to the File Upload API.
This is an error raised when file upload failed due to internal problems in
the File Upload API, beyond the intervention of the requester.
"""
pass
class FileUploadRequestError(FileUploadError):
"""This error is raised when the request has invalid parameters for upload.
This error will be raised if the file being uploaded is somehow invalid,
based on type restrictions, size restrictions, upload limits, etc.
"""
pass
# The setting used to find the name of the AWS Bucket used for uploading
# content.
BUCKET_SETTING = "FILE_UPLOAD_STORAGE_BUCKET_NAME"
# The setting used to prefix uploaded files using this service.
FILE_STORAGE_SETTING = "FILE_UPLOAD_STORAGE_PREFIX"
# The default file storage prefix.
FILE_STORAGE = "submissions_attachments"
def get_upload_url(key, content_type):
"""Request a one-time upload URL to upload files.
Requests a URL for a one-time file upload.
Args:
key (str): A unique identifier used to construct the upload location and
later, can be used to retrieve the same information. This service
must be able to identify data for both upload and download using
this key.
content_type (str): The content type for the file.
Returns:
A URL (str) to use for a one-time upload.
Raises:
FileUploadInternalError: Raised when an internal error occurs while
retrieving a one-time URL.
FileUploadRequestError: Raised when the request failed due to
request restrictions
"""
bucket_name, key_name = _retrieve_parameters(key)
try:
conn = _connect_to_s3()
upload_url = conn.generate_url(
3600,
'PUT',
bucket_name,
key_name,
headers={'Content-Length': '5242880', 'Content-Type': content_type}
)
return upload_url
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating an upload URL."
)
raise FileUploadInternalError(ex)
def get_download_url(key):
"""Requests a URL to download the related file from.
Requests a URL for the given student_item.
Args:
key (str): A unique identifier used to identify the data requested for
download. This service must be able to identify data for both
upload and download using this key.
Returns:
A URL (str) to use for downloading related files. If no file is found,
returns an empty string.
"""
bucket_name, key_name = _retrieve_parameters(key)
try:
conn = _connect_to_s3()
bucket = conn.get_bucket(bucket_name)
s3_key = bucket.get_key(key_name)
return s3_key.generate_url(expires_in=1000) if s3_key else ""
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating a download URL."
)
raise FileUploadInternalError(ex)
def _connect_to_s3():
"""Connect to s3
Creates a connection to s3 for file URLs.
"""
# Try to get the AWS credentials from settings if they are available
# If not, these will default to `None`, and boto will try to use
# environment vars or configuration files instead.
aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
return boto.connect_s3(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
def _retrieve_parameters(key):
"""
Simple utility function to validate settings and arguments before compiling
bucket names and key names.
Args:
key (str): Custom key passed in with the request.
Returns:
A tuple of the bucket name and the complete key.
Raises:
FileUploadRequestError
FileUploadInternalError
"""
if not key:
raise FileUploadRequestError("Key required for URL request")
bucket_name = getattr(settings, BUCKET_SETTING, None)
if not bucket_name:
raise FileUploadInternalError("No bucket name configured for FileUpload Service.")
return bucket_name, _get_key_name(key)
def _get_key_name(key):
"""Construct a key name with the given string and configured prefix.
Constructs a unique key with the specified path and the service-specific
configured prefix.
Args:
key (str): Key to identify data for both upload and download.
Returns:
A key name (str) to use constructing URLs.
"""
# The specified file prefix for the storage must be publicly viewable
# or all uploaded images will not be seen.
prefix = getattr(settings, FILE_STORAGE_SETTING, FILE_STORAGE)
return u"{prefix}/{key}".format(
prefix=prefix,
key=key
)
\ No newline at end of file
import boto
from boto.s3.key import Key
import ddt
from django.test import TestCase
from django.test.utils import override_settings
from moto import mock_s3
from mock import patch
from nose.tools import raises
from openassessment.fileupload import api
@ddt.ddt
class TestFileUploadService(TestCase):
@mock_s3
@override_settings(
AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
def test_get_upload_url(self):
conn = boto.connect_s3()
conn.create_bucket('mybucket')
uploadUrl = api.get_upload_url("foo", "bar")
self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", uploadUrl)
@mock_s3
@override_settings(
AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
def test_get_download_url(self):
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = "submissions_attachments/foo"
key.set_contents_from_string("How d'ya do?")
downloadUrl = api.get_download_url("foo")
self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", downloadUrl)
@raises(api.FileUploadInternalError)
def test_get_upload_url_no_bucket(self):
api.get_upload_url("foo", "bar")
@raises(api.FileUploadRequestError)
def test_get_upload_url_no_key(self):
api.get_upload_url("", "bar")
@mock_s3
@override_settings(
AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
@patch.object(boto, 'connect_s3')
@raises(api.FileUploadInternalError)
def test_get_upload_url_error(self, mock_s3):
mock_s3.side_effect = Exception("Oh noes")
api.get_upload_url("foo", "bar")
@mock_s3
@override_settings(
AWS_ACCESS_KEY_ID='foobar',
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
@patch.object(boto, 'connect_s3')
@raises(api.FileUploadInternalError, mock_s3)
def test_get_download_url_error(self, mock_s3):
mock_s3.side_effect = Exception("Oh noes")
api.get_download_url("foo")
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:21-0400\n"
"POT-Creation-Date: 2014-07-11 09:17-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Amharic (http://www.transifex.com/projects/p/edx-platform/"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:21-0400\n"
"POT-Creation-Date: 2014-07-11 09:17-0400\n"
"PO-Revision-Date: 2014-06-23 07:51+0000\n"
"Last-Translator: Nabeel El-Dughailib <nabeel@qordoba.com>\n"
"Language-Team: Arabic (http://www.transifex.com/projects/p/edx-platform/"
......@@ -21,7 +21,7 @@ msgstr ""
"&& n%100<=10 ? 3 : n%100>=11 && n%100<=99 ? 4 : 5;\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr "لا يمكن إجراء عملية التحميل"
......@@ -35,12 +35,12 @@ msgstr ""
"فقط.."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr "حالة الرد الخاص بك."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
......@@ -49,26 +49,36 @@ msgstr ""
"في ما يتعلّق بالرد."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr "لم يتمّ حفظ هذا الردّ."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr "جاري الحفظ..."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr "تمّ حفظ هذا الردّ لكن لم يتم تقديمه."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr "خطأ"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -117,3 +127,13 @@ msgstr "لا يمكن حفظ هذه المسألة."
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr "لا يمكن الاتصال بالمخدّم."
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:22-0400\n"
"POT-Creation-Date: 2014-07-11 09:18-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Azerbaijani (http://www.transifex.com/projects/p/edx-platform/"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:22-0400\n"
"POT-Creation-Date: 2014-07-11 09:18-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Bulgarian (Bulgaria) (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:23-0400\n"
"POT-Creation-Date: 2014-07-11 09:19-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Bengali (Bangladesh) (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:23-0400\n"
"POT-Creation-Date: 2014-07-11 09:19-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Bengali (India) (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:24-0400\n"
"POT-Creation-Date: 2014-07-11 09:20-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Bosnian (http://www.transifex.com/projects/p/edx-platform/"
......@@ -20,7 +20,7 @@ msgstr ""
"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -32,38 +32,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -112,3 +122,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:24-0400\n"
"POT-Creation-Date: 2014-07-11 09:20-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Catalan (http://www.transifex.com/projects/p/edx-platform/"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:25-0400\n"
"POT-Creation-Date: 2014-07-11 09:21-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Catalan (Valencian) (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:25-0400\n"
"POT-Creation-Date: 2014-07-11 09:21-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Czech (http://www.transifex.com/projects/p/edx-platform/"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:26-0400\n"
"POT-Creation-Date: 2014-07-11 09:22-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Welsh (http://www.transifex.com/projects/p/edx-platform/"
......@@ -20,7 +20,7 @@ msgstr ""
"11) ? 2 : 3;\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -32,38 +32,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -112,3 +122,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:26-0400\n"
"POT-Creation-Date: 2014-07-11 09:22-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Danish (http://www.transifex.com/projects/p/edx-platform/"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:27-0400\n"
"POT-Creation-Date: 2014-07-11 09:23-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: German (Germany) (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:27-0400\n"
"POT-Creation-Date: 2014-07-11 09:23-0400\n"
"PO-Revision-Date: 2014-06-24 10:07+0000\n"
"Last-Translator: STERGIOU IOANNIS <stergiou_john@yahoo.gr>\n"
"Language-Team: Greek (http://www.transifex.com/projects/p/edx-platform/"
......@@ -20,7 +20,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -32,38 +32,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -112,3 +122,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: 0.1a\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:28-0400\n"
"POT-Creation-Date: 2014-07-11 09:24-0400\n"
"PO-Revision-Date: 2014-06-04 15:41-0400\n"
"Last-Translator: \n"
"Language-Team: openedx-translation <openedx-translation@googlegroups.com>\n"
......@@ -18,7 +18,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -30,38 +30,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -110,3 +120,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:28-0400\n"
"POT-Creation-Date: 2014-07-11 09:24-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: LOLCAT English (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
......@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2014-06-24 09:29-0400\n"
"POT-Creation-Date: 2014-07-11 09:25-0400\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: \n"
"Language-Team: Pirate English (http://www.transifex.com/projects/p/edx-"
......@@ -19,7 +19,7 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_base.js:151
#: openassessment/xblock/static/js/src/oa_base.js:146
msgid "Unable to Load"
msgstr ""
......@@ -31,38 +31,48 @@ msgid ""
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:174
#: openassessment/xblock/static/js/src/oa_response.js:191
msgid "Status of Your Response"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:200
#: openassessment/xblock/static/js/src/oa_response.js:217
msgid ""
"If you leave this page without saving or submitting your response, you'll "
"lose any work you've done on the response."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:274
#: openassessment/xblock/static/js/src/oa_response.js:290
msgid "This response has not been saved."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:292
#: openassessment/xblock/static/js/src/oa_response.js:308
msgid "Saving..."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:310
#: openassessment/xblock/static/js/src/oa_response.js:326
msgid "This response has been saved but not submitted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:313
#: openassessment/xblock/static/js/src/oa_response.js:329
msgid "Error"
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:430
msgid "File size must be 5MB or less."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_response.js:434
msgid "File must be an image."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:59
#: openassessment/xblock/static/js/src/oa_server.js:90
#: openassessment/xblock/static/js/src/oa_server.js:109
......@@ -111,3 +121,13 @@ msgstr ""
#: openassessment/xblock/static/js/src/oa_server.js:479
msgid "The server could not be contacted."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:505
msgid "Could not retrieve upload url."
msgstr ""
#: openassessment/xblock/static/js/openassessment.min.js:1
#: openassessment/xblock/static/js/src/oa_server.js:526
msgid "Could not retrieve download url."
msgstr ""
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment