Commit 8b54300f by Will Daly

Allow criteria with no options, just written feedback.

parent 14bc13f3
......@@ -91,8 +91,8 @@ class AssessmentAdmin(admin.ModelAdmin):
u"{}/{} - {}: {} - {}".format(
part.points_earned,
part.points_possible,
part.option.criterion.name,
part.option.name,
part.criterion.name,
part.option.name if part.option else "None",
part.feedback,
)
)
......
......@@ -14,7 +14,7 @@ from openassessment.assessment.errors import (
)
from openassessment.assessment.models import (
Assessment, AITrainingWorkflow, AIGradingWorkflow,
InvalidOptionSelection, NoTrainingExamples,
InvalidRubricSelection, NoTrainingExamples,
AI_ASSESSMENT_TYPE, AIClassifierSet
)
from openassessment.assessment.worker import training as training_tasks
......@@ -268,7 +268,7 @@ def train_classifiers(rubric_dict, examples, course_id, item_id, algorithm_id):
# Get or create the rubric and training examples
try:
examples = deserialize_training_examples(examples, rubric_dict)
except (InvalidRubric, InvalidTrainingExample, InvalidOptionSelection) as ex:
except (InvalidRubric, InvalidTrainingExample, InvalidRubricSelection) as ex:
msg = u"Could not parse rubric and/or training examples: {ex}".format(ex=ex)
raise AITrainingRequestError(msg)
......
......@@ -8,7 +8,8 @@ from dogapi import dog_stats_api
from openassessment.assessment.models import (
AITrainingWorkflow, AIGradingWorkflow,
ClassifierUploadError, ClassifierSerializeError,
IncompleteClassifierSet, NoTrainingExamples
IncompleteClassifierSet, NoTrainingExamples,
InvalidRubricSelection
)
from openassessment.assessment.errors import (
AITrainingRequestError, AITrainingInternalError,
......@@ -274,7 +275,7 @@ def create_classifiers(training_workflow_uuid, classifier_set):
except NoTrainingExamples as ex:
logger.exception(ex)
raise AITrainingInternalError(ex)
except IncompleteClassifierSet as ex:
except (IncompleteClassifierSet, InvalidRubricSelection) as ex:
msg = (
u"An error occurred while creating the classifier set "
u"for the training workflow with UUID {uuid}: {ex}"
......
......@@ -11,11 +11,12 @@ from dogapi import dog_stats_api
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
InvalidOptionSelection, PeerWorkflow, PeerWorkflowItem,
InvalidRubricSelection, PeerWorkflow, PeerWorkflowItem,
)
from openassessment.assessment.serializers import (
AssessmentSerializer, AssessmentFeedbackSerializer, RubricSerializer,
AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
)
from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentInternalError
......@@ -199,7 +200,8 @@ def create_assessment(
overall_feedback,
rubric_dict,
num_required_grades,
scored_at=None):
scored_at=None
):
"""Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular
......@@ -244,24 +246,9 @@ def create_assessment(
>>> feedback = "Your submission was thrilling."
>>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
"""
# Ensure that this variables is declared so if an error occurs
# we don't get an error when trying to log it!
assessment_dict = None
try:
rubric = rubric_from_dict(rubric_dict)
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
try:
option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
# Retrieve workflow information
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
if peer_workflow_item is None:
message = (
......@@ -270,55 +257,50 @@ def create_assessment(
).format(scorer_submission_uuid)
logger.warning(message)
raise PeerAssessmentWorkflowError(message)
peer_submission_uuid = peer_workflow_item.author.submission_uuid
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission_uuid": peer_submission_uuid,
"score_type": PEER_TYPE,
"feedback": overall_feedback[0:Assessment.MAXSIZE],
}
if scored_at is not None:
peer_assessment["scored_at"] = scored_at
peer_serializer = AssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid():
msg = (
u"An error occurred while serializing "
u"the peer assessment associated with "
u"the scorer's submission UUID {}."
).format(scorer_submission_uuid)
raise PeerAssessmentRequestError(msg)
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
assessment = peer_serializer.save()
# Create the peer assessment
assessment = Assessment.create(
rubric,
scorer_id,
peer_submission_uuid,
PEER_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)
# Create assessment parts for each criterion in the rubric
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
# Close the active assessment
scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, scorer_workflow)
return assessment_dict
except DatabaseError:
error_message = (
u"An error occurred while creating assessment {} by: {}"
).format(assessment_dict, scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
return full_assessment_dict(assessment)
except PeerWorkflow.DoesNotExist:
message = (
u"There is no Peer Workflow associated with the given "
u"submission UUID {}."
).format(scorer_submission_uuid)
logger.error(message)
logger.exception(message)
raise PeerAssessmentWorkflowError(message)
except InvalidRubric:
msg = u"Rubric definition was not valid"
logger.exception(msg)
raise PeerAssessmentRequestError(msg)
except InvalidRubricSelection:
msg = u"Invalid options selected in the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
except DatabaseError:
error_message = (
u"An error occurred while retrieving the peer workflow item by scorer with ID: {}"
).format(scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_rubric_max_scores(submission_uuid):
......
......@@ -7,11 +7,10 @@ from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError
from openassessment.assessment.serializers import (
AssessmentSerializer, InvalidRubric,
full_assessment_dict, rubric_from_dict, serialize_assessments
InvalidRubric, full_assessment_dict, rubric_from_dict, serialize_assessments
)
from openassessment.assessment.models import (
Assessment, AssessmentPart, InvalidOptionSelection
Assessment, AssessmentPart, InvalidRubricSelection
)
from openassessment.assessment.errors import (
SelfAssessmentRequestError, SelfAssessmentInternalError
......@@ -139,50 +138,25 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
# Get or create the rubric
try:
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected)
# Create the self assessment
assessment = Assessment.create(rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at)
AssessmentPart.create_from_option_names(assessment, options_selected)
_log_assessment(assessment, submission)
except InvalidRubric:
msg = "Invalid rubric definition"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection:
except InvalidRubricSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
# Create the assessment
# Since we have already retrieved the submission, we can assume that
# the user who created the submission exists.
self_assessment = {
"rubric": rubric.id,
"scorer_id": user_id,
"submission_uuid": submission_uuid,
"score_type": SELF_TYPE,
"feedback": u"",
}
if scored_at is not None:
self_assessment['scored_at'] = scored_at
# Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid():
msg = "Could not create self assessment: {errors}".format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg)
assessment = serializer.save()
# We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission)
# Return the serialized assessment
return assessment_dict
return full_assessment_dict(assessment)
def get_assessment(submission_uuid):
......
......@@ -10,7 +10,7 @@ import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.assessment.models import StudentTrainingWorkflow, InvalidRubricSelection
from openassessment.assessment.serializers import (
deserialize_training_examples, serialize_training_example,
validate_training_example_format,
......@@ -179,6 +179,21 @@ def validate_training_examples(rubric, examples):
logger.warning("Could not parse serialized rubric", exc_info=True)
return [_(u"Could not parse serialized rubric")]
# Check that at least one criterion in the rubric has options
# If this is not the case (that is, if all rubric criteria are written feedback only),
# then it doesn't make sense to do student training.
criteria_without_options = [
criterion_name
for criterion_name, criterion_option_list in criteria_options.iteritems()
if len(criterion_option_list) == 0
]
if len(set(criteria_options) - set(criteria_without_options)) == 0:
return [_(
u"When you include a student training assessment, "
u"the rubric for the assessment must contain at least one criterion, "
u"and each criterion must contain at least two options."
)]
# Check each example
for order_num, example_dict in enumerate(examples, start=1):
......@@ -219,7 +234,9 @@ def validate_training_examples(rubric, examples):
errors.append(msg)
# Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()):
# Ignore options
all_example_criteria = set(options_selected.keys() + criteria_without_options)
for missing_criterion in set(criteria_options.keys()) - all_example_criteria:
msg = _(
u"Example {example_number} is missing an option "
u"for \"{criterion_name}\""
......@@ -353,7 +370,7 @@ def get_training_example(submission_uuid, rubric, examples):
# If the student already started a training example, then return that instead.
next_example = workflow.next_training_example(examples)
return None if next_example is None else serialize_training_example(next_example)
except (InvalidRubric, InvalidTrainingExample) as ex:
except (InvalidRubric, InvalidRubricSelection, InvalidTrainingExample) as ex:
logger.exception(
"Could not deserialize training examples for submission UUID {}".format(submission_uuid)
)
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""
Fill in the criteria field in the `AssessmentPart` model.
"""
for criterion in orm.Criterion.objects.select_related().all():
orm.AssessmentPart.objects.filter(
criterion__isnull=True,
option__in=criterion.options.all()
).update(criterion=criterion)
def backwards(self, orm):
"""
No backwards migration -- if we re-run the migration later,
any criteria values will be rewritten.
"""
pass
models = {
'assessment.aiclassifier': {
'Meta': {'object_name': 'AIClassifier'},
'classifier_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classifiers'", 'to': "orm['assessment.AIClassifierSet']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.aiclassifierset': {
'Meta': {'ordering': "['-created_at', '-id']", 'object_name': 'AIClassifierSet'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"})
},
'assessment.aigradingworkflow': {
'Meta': {'object_name': 'AIGradingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.Assessment']"}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'essay_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.aitrainingworkflow': {
'Meta': {'object_name': 'AITrainingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'training_examples': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'to': "orm['assessment.TrainingExample']"}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'unique_together': "(('workflow', 'order_num'),)", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
symmetrical = True
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Make the AssessmentPart.criterion field NOT nullable
db.alter_column('assessment_assessmentpart', 'criterion_id',
self.gf('django.db.models.fields.related.ForeignKey')(
related_name='+', null=False, to=orm['assessment.CriterionOption']
)
)
def backwards(self, orm):
# Make the AssessmentPart.criterion field nullable
db.alter_column('assessment_assessmentpart', 'criterion_id',
self.gf('django.db.models.fields.related.ForeignKey')(
related_name='+', null=True, to=orm['assessment.CriterionOption']
)
)
models = {
'assessment.aiclassifier': {
'Meta': {'object_name': 'AIClassifier'},
'classifier_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classifiers'", 'to': "orm['assessment.AIClassifierSet']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.aiclassifierset': {
'Meta': {'ordering': "['-created_at', '-id']", 'object_name': 'AIClassifierSet'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"})
},
'assessment.aigradingworkflow': {
'Meta': {'object_name': 'AIGradingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.Assessment']"}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'essay_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.aitrainingworkflow': {
'Meta': {'object_name': 'AITrainingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'training_examples': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'to': "orm['assessment.TrainingExample']"}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.Criterion']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'unique_together': "(('workflow', 'order_num'),)", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Make the AssessmentPart.option field nullable
db.alter_column('assessment_assessmentpart', 'option_id',
self.gf('django.db.models.fields.related.ForeignKey')(
related_name='+', null=True, to=orm['assessment.CriterionOption']
)
)
def backwards(self, orm):
# Some records may contain null values now, so we can't re-introduce the constraint.
pass
models = {
'assessment.aiclassifier': {
'Meta': {'object_name': 'AIClassifier'},
'classifier_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classifiers'", 'to': "orm['assessment.AIClassifierSet']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.aiclassifierset': {
'Meta': {'ordering': "['-created_at', '-id']", 'object_name': 'AIClassifierSet'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"})
},
'assessment.aigradingworkflow': {
'Meta': {'object_name': 'AIGradingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.Assessment']"}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'essay_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.aitrainingworkflow': {
'Meta': {'object_name': 'AITrainingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'training_examples': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'to': "orm['assessment.TrainingExample']"}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'unique_together': "(('workflow', 'order_num'),)", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
......@@ -12,7 +12,6 @@ from django.utils.timezone import now
from django_extensions.db.fields import UUIDField
from dogapi import dog_stats_api
from submissions import api as sub_api
from openassessment.assessment.serializers import rubric_from_dict
from .base import Rubric, Criterion, Assessment, AssessmentPart
from .training import TrainingExample
......@@ -45,16 +44,14 @@ class IncompleteClassifierSet(Exception):
"""
The classifier set is missing a classifier for a criterion in the rubric.
"""
def __init__(self, expected_criteria, actual_criteria):
def __init__(self, missing_criteria):
"""
Construct an error message that explains which criteria were missing.
Args:
expected_criteria (iterable of unicode): The criteria in the rubric.
actual_criteria (iterable of unicode): The criteria specified by the classifier set.
missing_criteria (list): The list of criteria names that were missing.
"""
missing_criteria = set(expected_criteria) - set(actual_criteria)
msg = (
u"Missing classifiers for the following "
u"criteria: {missing}"
......@@ -136,6 +133,7 @@ class AIClassifierSet(models.Model):
Raises:
ClassifierSerializeError
ClassifierUploadError
InvalidRubricSelection
DatabaseError
"""
......@@ -146,12 +144,8 @@ class AIClassifierSet(models.Model):
# Retrieve the criteria for this rubric,
# then organize them by criterion name
try:
criteria = {
criterion.name: criterion
for criterion in Criterion.objects.filter(rubric=rubric)
}
rubric_index = rubric.index
except DatabaseError as ex:
msg = (
u"An unexpected error occurred while retrieving rubric criteria with the"
......@@ -161,15 +155,22 @@ class AIClassifierSet(models.Model):
raise
# Check that we have classifiers for all criteria in the rubric
if set(criteria.keys()) != set(classifiers_dict.keys()):
raise IncompleteClassifierSet(criteria.keys(), classifiers_dict.keys())
# Ignore criteria that have no options: since these have only written feedback,
# we can't assign them a score.
all_criteria = set(classifiers_dict.keys())
all_criteria |= set(
criterion.name for criterion in
rubric_index.find_criteria_without_options()
)
missing_criteria = rubric_index.find_missing_criteria(all_criteria)
if missing_criteria:
raise IncompleteClassifierSet(missing_criteria)
# Create classifiers for each criterion
for criterion_name, classifier_data in classifiers_dict.iteritems():
criterion = criteria.get(criterion_name)
classifier = AIClassifier.objects.create(
classifier_set=classifier_set,
criterion=criterion
criterion=rubric_index.find_criterion(criterion_name)
)
# Serialize the classifier data and upload
......@@ -279,7 +280,6 @@ class AIClassifierSet(models.Model):
Returns:
dict: keys are criteria names, values are JSON-serializable classifier data
If there are no classifiers in the set, returns None
Raises:
ValueError
......@@ -328,7 +328,7 @@ class AIClassifierSet(models.Model):
).format(key=cache_key)
logger.info(msg)
return classifiers_dict if classifiers_dict else None
return classifiers_dict
@property
def valid_scores_by_criterion(self):
......@@ -698,6 +698,7 @@ class AITrainingWorkflow(AIWorkflow):
IncompleteClassifierSet
ClassifierSerializeError
ClassifierUploadError
InvalidRubricSelection
DatabaseError
"""
self.classifier_set = AIClassifierSet.create_classifier_set(
......@@ -788,6 +789,7 @@ class AIGradingWorkflow(AIWorkflow):
submission = sub_api.get_submission_and_student(submission_uuid)
# Get or create the rubric
from openassessment.assessment.serializers import rubric_from_dict
rubric = rubric_from_dict(rubric_dict)
# Retrieve the submission text
......@@ -828,18 +830,12 @@ class AIGradingWorkflow(AIWorkflow):
criterion_scores (dict): Dictionary mapping criteria names to integer scores.
Raises:
InvalidRubricSelection
DatabaseError
"""
assessment = Assessment.objects.create(
submission_uuid=self.submission_uuid,
rubric=self.rubric,
scorer_id=self.algorithm_id,
score_type=AI_ASSESSMENT_TYPE
self.assessment = Assessment.create(
self.rubric, self.algorithm_id, self.submission_uuid, AI_ASSESSMENT_TYPE
)
option_ids = self.rubric.options_ids_for_points(criterion_scores)
AssessmentPart.add_to_assessment(assessment, option_ids)
self.assessment = assessment
AssessmentPart.create_from_option_points(self.assessment, criterion_scores)
self.mark_complete_and_save()
......@@ -12,6 +12,7 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto
"""
import math
from collections import defaultdict
from copy import deepcopy
from hashlib import sha1
......@@ -20,15 +21,15 @@ import json
from django.core.cache import cache
from django.db import models
from django.utils.timezone import now
import math
from lazy import lazy
import logging
logger = logging.getLogger("openassessment.assessment.models")
class InvalidOptionSelection(Exception):
class InvalidRubricSelection(Exception):
"""
The user selected options that do not match the rubric.
The specified criterion/option do not exist in the rubric.
"""
pass
......@@ -71,6 +72,18 @@ class Rubric(models.Model):
criteria_points = [crit.points_possible for crit in self.criteria.all()]
return sum(criteria_points) if criteria_points else 0
@lazy
def index(self):
"""
Load the rubric's data and return an index that allows
the user to query for specific criteria/options.
Returns:
RubricIndex
"""
return RubricIndex(self)
@staticmethod
def content_hash_from_dict(rubric_dict):
"""Given a dict of rubric information, return a unique hash.
......@@ -125,135 +138,6 @@ class Rubric(models.Model):
canonical_form = json.dumps(structure, sort_keys=True)
return sha1(canonical_form).hexdigest()
def options_ids(self, options_selected):
"""Given a mapping of selected options, return the option IDs.
We use this to map user selection during assessment to the
:class:`CriterionOption` IDs that are in our database. These IDs are
never shown to the user.
Args:
options_selected (dict): Mapping of criteria names to the names of
the option that was selected for that criterion.
Returns:
set of option ids
Examples:
>>> options_selected = {"secret": "yes", "safe": "no"}
>>> rubric.options_ids(options_selected)
[10, 12]
Raises:
InvalidOptionSelection: the selected options do not match the rubric.
"""
# Cache based on the content_hash, not the id. It's slightly safer, and
# we don't have to worry about invalidation of the cache while running
# tests.
rubric_criteria_dict_cache_key = (
"assessment.rubric_criteria_dict.{}".format(self.content_hash)
)
# Create a dict of dicts that maps:
# criterion names --> option names --> option ids
#
# If we've already generated one of these for this rubric, grab it from
# the cache instead of hitting the database again.
rubric_criteria_dict = cache.get(rubric_criteria_dict_cache_key)
if not rubric_criteria_dict:
rubric_criteria_dict = defaultdict(dict)
# Select all criteria and options for this rubric
# We use `select_related()` to minimize the number of database queries
rubric_options = CriterionOption.objects.filter(
criterion__rubric=self
).select_related()
# Construct dictionaries for each option in the rubric
for option in rubric_options:
rubric_criteria_dict[option.criterion.name][option.name] = option.id
# Save it in our cache
cache.set(rubric_criteria_dict_cache_key, rubric_criteria_dict)
# Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict):
msg = (
u"Incorrect number of options for this rubric "
u"({actual} instead of {expected})"
).format(
actual=len(options_selected),
expected=len(rubric_criteria_dict)
)
raise InvalidOptionSelection(msg)
# Look up each selected option
option_id_set = set()
for criterion_name, option_name in options_selected.iteritems():
if (criterion_name in rubric_criteria_dict and
option_name in rubric_criteria_dict[criterion_name]
):
option_id = rubric_criteria_dict[criterion_name][option_name]
option_id_set.add(option_id)
else:
msg = (
"{criterion}: {option} not found in rubric"
).format(criterion=criterion_name, option=option_name)
raise InvalidOptionSelection(msg)
return option_id_set
def options_ids_for_points(self, criterion_points):
"""
Given a mapping of selected point values, return the option IDs.
If there are multiple options with the same point value,
this will return the first one (lower order number).
Args:
criterion_points (dict): Mapping of criteria names to point values.
Returns:
list of option IDs
Raises:
InvalidOptionSelection
"""
# Retrieve the mapping of criterion names/points to option IDs
# from the cache, if it's available
cache_key = "assessment.rubric_points_dict.{}".format(self.content_hash)
rubric_points_dict = cache.get(cache_key)
# Otherwise, create the dict by querying the database
if not rubric_points_dict:
rubric_options = CriterionOption.objects.filter(
criterion__rubric=self
).select_related()
rubric_points_dict = defaultdict(dict)
for option in rubric_options:
if option.points not in rubric_points_dict[option.criterion.name]:
rubric_points_dict[option.criterion.name][option.points] = option.id
# Store the dict in the cache
cache.set(cache_key, rubric_points_dict)
# Find the IDs for the options matching the specified point value
option_id_set = set()
for criterion_name, option_points in criterion_points.iteritems():
if (criterion_name in rubric_points_dict and option_points in rubric_points_dict[criterion_name]):
option_id = rubric_points_dict[criterion_name][option_points]
option_id_set.add(option_id)
else:
msg = u"{criterion} option with point value {points} not found in rubric".format(
criterion=criterion_name, points=option_points
)
raise InvalidOptionSelection(msg)
return option_id_set
class Criterion(models.Model):
"""A single aspect of a submission that needs assessment.
......@@ -280,7 +164,9 @@ class Criterion(models.Model):
@property
def points_possible(self):
"""The total number of points that could be earned in this Criterion."""
return max(option.points for option in self.options.all())
# By convention, criteria with 0 options (only feedback) have 0 points possible
option_points = [option.points for option in self.options.all()]
return max(option_points) if option_points else 0
class CriterionOption(models.Model):
......@@ -327,6 +213,178 @@ class CriterionOption(models.Model):
return repr(self)
class RubricIndex(object):
"""
Loads a rubric's criteria and options into memory so that they
can be repeatedly queried without hitting the database.
"""
def __init__(self, rubric):
"""
Load the rubric's data.
Args:
rubric (Rubric): The Rubric model to load.
Returns:
RubricIndex
"""
self.rubric = rubric
# Load the rubric's criteria and options from the database
criteria = Criterion.objects.select_related().filter(rubric=rubric)
options = CriterionOption.objects.select_related().filter(
criterion__rubric=rubric
).order_by("-order_num")
# Create dictionaries indexing the criteria/options
self._criteria_index = {
criterion.name: criterion
for criterion in criteria
}
self._option_index = {
(option.criterion.name, option.name): option
for option in options
}
# By convention, if multiple options in the same criterion have the
# same point value, we return the *first* option.
# Since the options are in descending order by order number,
# the option with the lowest order number takes precedence.
self._option_points_index = {
(option.criterion.name, option.points): option
for option in options
}
def find_criterion(self, criterion_name):
"""
Find a criterion by its name.
Args:
criterion_name (unicode): The name of the criterion to retrieve.
Returns:
Criterion
Raises:
InvalidRubricSelection
"""
if criterion_name not in self._criteria_index:
msg = (
u"Could not find criterion named \"{criterion}\" "
u"in the rubric with content hash \"{rubric_hash}\""
).format(
criterion=criterion_name,
rubric_hash=self.rubric.content_hash
)
raise InvalidRubricSelection(msg)
else:
return self._criteria_index[criterion_name]
def find_option(self, criterion_name, option_name):
"""
Find a rubric option by criterion name and option name.
Args:
criterion_name (unicode): The name of the criterion containing the option.
option_name (unicode): The name of the option to retrieve.
Returns:
CriterionOption
Raises:
InvalidRubricSelection
"""
key = (criterion_name, option_name)
if key not in self._option_index:
msg = (
u"Option \"{option}\" not found in rubric "
u"with hash {rubric_hash} for criterion \"{criterion}\""
).format(
option=option_name,
criterion=criterion_name,
rubric_hash=self.rubric.content_hash
)
raise InvalidRubricSelection(msg)
else:
return self._option_index[key]
def find_option_for_points(self, criterion_name, option_points):
"""
Find a rubric option by criterion name and option point value.
If multiple options in a criterion have the same point value,
return the first one (based on order number).
Args:
criterion_name (unicode): The name of the criterion containing the option.
option_points (int): The point value of the option.
Returns:
CriterionOption
Raises:
InvalidRubricSelection
"""
key = (criterion_name, option_points)
if key not in self._option_points_index:
msg = (
u"Option with points {option_points} not found in rubric "
u"with hash {rubric_hash} for criterion {criterion}"
).format(
option_points=option_points,
criterion=criterion_name,
rubric_hash=self.rubric.content_hash
)
raise InvalidRubricSelection(msg)
else:
# Assume that we gave priority to options with lower
# order numbers when we created the index.
return self._option_points_index[key]
@property
def criteria_names(self):
"""
Return a list of all criteria names in the rubric.
Returns:
set of unicode
"""
return set(self._criteria_index.keys())
def find_missing_criteria(self, criteria_names):
"""
Return a set of criteria names in the rubric that
are not in the provided list.
Args:
criteria_names (list of unicode): The criteria names to check.
Returns:
set of unicode: The missing criteria
"""
return set(self.criteria_names) - set(criteria_names)
def find_criteria_without_options(self):
"""
Return a set of `Criterion` models that do not have options.
(only written feedback).
Returns:
set of `Criterion`
"""
return set(
criterion for criterion in self._criteria_index.values()
if criterion.options.count() == 0
)
class Assessment(models.Model):
"""An evaluation made against a particular Submission and Rubric.
......@@ -335,7 +393,7 @@ class Assessment(models.Model):
objects that map to each :class:`Criterion` in the :class:`Rubric` we're
assessing against.
"""
MAXSIZE = 1024 * 100 # 100KB
MAX_FEEDBACK_SIZE = 1024 * 100
submission_uuid = models.CharField(max_length=128, db_index=True)
rubric = models.ForeignKey(Rubric)
......@@ -376,6 +434,41 @@ class Assessment(models.Model):
return u"Assessment {}".format(self.id)
@classmethod
def create(cls, rubric, scorer_id, submission_uuid, score_type, feedback=None, scored_at=None):
"""
Create a new assessment.
Args:
rubric (Rubric): The rubric associated with this assessment.
scorer_id (unicode): The ID of the scorer.
submission_uuid (str): The UUID of the submission being assessed.
score_type (unicode): The type of assessment (e.g. peer, self, or AI)
Kwargs:
feedback (unicode): Overall feedback on the submission.
scored_at (datetime): The time the assessment was created. Defaults to the current time.
Returns:
Assessment
"""
assessment_params = {
'rubric': rubric,
'scorer_id': scorer_id,
'submission_uuid': submission_uuid,
'score_type': score_type,
}
if scored_at is not None:
assessment_params['scored_at'] = scored_at
# Truncate the feedback if it exceeds the maximum size
if feedback is not None:
assessment_params['feedback'] = feedback[0:cls.MAX_FEEDBACK_SIZE]
return cls.objects.create(**assessment_params)
@classmethod
def get_median_score_dict(cls, scores_dict):
"""Determine the median score in a dictionary of lists of scores
......@@ -477,9 +570,9 @@ class Assessment(models.Model):
scores = defaultdict(list)
for assessment in assessments:
for part in assessment.parts.all().select_related("option__criterion"):
criterion_name = part.option.criterion.name
scores[criterion_name].append(part.option.points)
for part in assessment.parts.all().select_related():
criterion_name = part.criterion.name
scores[criterion_name].append(part.points_earned)
cache.set(cache_key, scores)
return scores
......@@ -500,8 +593,15 @@ class AssessmentPart(models.Model):
MAX_FEEDBACK_SIZE = 1024 * 100
assessment = models.ForeignKey(Assessment, related_name='parts')
criterion = models.ForeignKey(Criterion, null=True, related_name="+")
option = models.ForeignKey(CriterionOption, related_name="+")
# Assessment parts are usually associated with an option
# (representing the point value selected for a particular criterion)
# It's possible, however, for an assessment part to contain
# only written feedback, with no point value.
# In this case, the assessment part is associated with a criterion,
# but not with any option (the `option` field is set to null).
criterion = models.ForeignKey(Criterion, related_name="+")
option = models.ForeignKey(CriterionOption, null=True, related_name="+")
# Free-form text feedback for the specific criterion
# Note that the `Assessment` model also has a feedback field,
......@@ -513,41 +613,163 @@ class AssessmentPart(models.Model):
@property
def points_earned(self):
return self.option.points
# By convention, an assessment with no options (only feedback) earns 0 points.
return self.option.points if self.option is not None else 0
@property
def points_possible(self):
return self.option.criterion.points_possible
return self.criterion.points_possible
@classmethod
def add_to_assessment(cls, assessment, option_ids, criterion_feedback=None):
def create_from_option_names(cls, assessment, selected, feedback=None):
"""
Creates AssessmentParts and adds them to `assessment`.
Create new assessment parts and add them to an assessment.
Args:
assessment (Assessment): The assessment model we're adding parts to.
option_ids (list of int): List of primary keys for options the user selected.
assessment (Assessment): The assessment we're adding parts to.
selected (dict): A dictionary mapping criterion names to option names.
Kwargs:
criterion_feedback (dict): Dictionary mapping criterion names
to free-form text feedback on the criterion.
You don't need to include all the rubric criteria,
and keys that don't match any criterion will be ignored.
feedback (dict): A dictionary mapping criterion names to written
feedback for the criterion.
Returns:
None
list of `AssessmentPart`s
Raises:
InvalidRubricSelection
DatabaseError
"""
options = CriterionOption.objects.select_related().filter(pk__in=option_ids)
# Use the rubric index so we can retrieve options/criteria
# without repeatedly hitting the database.
# This will also validate our selections against the rubric.
rubric_index = assessment.rubric.index
# If the assessment type doesn't explicitly provide feedback,
# then fill in feedback-only criteria with an empty string for feedback.
if feedback is None:
feedback = {
criterion.name: u""
for criterion in rubric_index.find_criteria_without_options()
}
cls.objects.bulk_create([
cls(assessment=assessment, option_id=option.pk, criterion_id=option.criterion.pk)
for option in options
# Validate that we have selections for all criteria
# This will raise an exception if we're missing any criteria
cls._check_has_all_criteria(rubric_index, set(selected.keys() + feedback.keys()))
# Retrieve the criteria/option/feedback for criteria that have options.
# Since we're using the rubric's index, we'll get an `InvalidRubricSelection` error
# if we select an invalid criterion/option.
assessment_parts = [
{
'criterion': rubric_index.find_criterion(criterion_name),
'option': rubric_index.find_option(criterion_name, option_name),
'feedback': feedback.get(criterion_name, u"")[0:cls.MAX_FEEDBACK_SIZE],
}
for criterion_name, option_name in selected.iteritems()
]
# Some criteria may have feedback but no options, only feedback.
# For these, we set `option` to None, indicating that the assessment part
# is not associated with any option, only a criterion.
for criterion_name, feedback_text in feedback.iteritems():
if criterion_name not in selected:
assessment_parts.append({
'criterion': rubric_index.find_criterion(criterion_name),
'option': None,
'feedback': feedback_text[0:cls.MAX_FEEDBACK_SIZE]
})
# Create assessment parts for each criterion and associate them with the assessment
# We use the dictionary we created earlier, which may have null options
# for feedback-only assessment parts.
return cls.objects.bulk_create([
cls(
assessment=assessment,
criterion=assessment_part['criterion'],
option=assessment_part['option'],
feedback=assessment_part['feedback']
)
for assessment_part in assessment_parts
])
@classmethod
def create_from_option_points(cls, assessment, selected):
"""
Create new assessment parts and add them to an assessment.
Args:
assessment (Assessment): The assessment we're adding parts to.
selected (dict): A dictionary mapping criterion names to option point values.
Kwargs:
feedback (dict): A dictionary mapping criterion names to written
feedback for the criterion.
Returns:
list of `AssessmentPart`s
Raises:
InvalidRubricSelection
DatabaseError
"""
rubric_index = assessment.rubric.index
# Retrieve the criteria/option/feedback for criteria that have options.
# Since we're using the rubric's index, we'll get an `InvalidRubricSelection` error
# if we select an invalid criterion/option.
assessment_parts = [
{
'criterion': rubric_index.find_criterion(criterion_name),
'option': rubric_index.find_option_for_points(criterion_name, option_points),
}
for criterion_name, option_points in selected.iteritems()
]
# Add in feedback-only criteria
# (criteria that have 0 options)
for criterion in rubric_index.find_criteria_without_options():
assessment_parts.append({
'criterion': criterion,
'option': None
})
# Validate that we have selections for all criteria
# This will raise an exception if we're missing any criteria
cls._check_has_all_criteria(rubric_index, set(
part['criterion'].name for part in assessment_parts
))
# Create assessment parts for each criterion and associate them with the assessment
# Since we're not accepting written feedback, set all feedback to an empty string.
return cls.objects.bulk_create([
cls(
assessment=assessment,
criterion=assessment_part['criterion'],
option=assessment_part['option'],
feedback=u""
)
for assessment_part in assessment_parts
])
if criterion_feedback is not None:
for criterion_name, feedback in criterion_feedback.iteritems():
feedback = feedback[0:cls.MAX_FEEDBACK_SIZE]
assessment.parts.filter(
option__criterion__name=criterion_name
).update(feedback=feedback)
@classmethod
def _check_has_all_criteria(cls, rubric_index, selected_criteria):
"""
Verify that we've selected options for all criteria in the rubric.
Args:
rubric_index (RubricIndex): The index of the rubric's data.
selected_criteria (list): list of criterion names
Returns:
None
Raises:
InvalidRubricSelection
"""
missing_criteria = rubric_index.find_missing_criteria(selected_criteria)
if len(missing_criteria) > 0:
msg = u"Missing selections for criteria: {missing}".format(missing=missing_criteria)
raise InvalidRubricSelection(msg)
......@@ -39,6 +39,9 @@ class TrainingExample(models.Model):
Returns:
TrainingExample
Raises:
InvalidRubricSelection
"""
content_hash = cls.calculate_hash(answer, options_selected, rubric)
example = TrainingExample.objects.create(
......@@ -46,11 +49,12 @@ class TrainingExample(models.Model):
raw_answer=json.dumps(answer),
rubric=rubric
)
options_ids = rubric.options_ids(options_selected)
for option in CriterionOption.objects.filter(pk__in=list(options_ids)):
# This will raise `InvalidRubricSelection` if the selected options
# do not match the rubric.
for criterion_name, option_name in options_selected.iteritems():
option = rubric.index.find_option(criterion_name, option_name)
example.options_selected.add(option)
return example
@property
......
......@@ -75,15 +75,6 @@ class CriterionSerializer(NestedModelSerializer):
model = Criterion
fields = ('order_num', 'name', 'prompt', 'options', 'points_possible')
def validate_options(self, attrs, source):
"""Make sure we have at least one CriterionOption in a Criterion."""
options = attrs[source]
if not options:
raise serializers.ValidationError(
"Criterion must have at least one option."
)
return attrs
class RubricSerializer(NestedModelSerializer):
"""Serializer for :class:`Rubric`."""
......@@ -150,7 +141,7 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class Meta:
model = AssessmentPart
fields = ('option', 'feedback')
fields = ('option', 'criterion', 'feedback')
class AssessmentSerializer(serializers.ModelSerializer):
......@@ -219,12 +210,15 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `CriterionOption` again, we simply index into the places we expect them to
# be from the big, saved `Rubric` serialization.
parts = []
for part in assessment.parts.all().select_related("option__criterion"):
criterion_dict = rubric_dict["criteria"][part.option.criterion.order_num]
for part in assessment.parts.all().select_related("criterion", "option"):
criterion_dict = rubric_dict["criteria"][part.criterion.order_num]
options_dict = None
if part.option is not None:
options_dict = criterion_dict["options"][part.option.order_num]
options_dict["criterion"] = criterion_dict
parts.append({
"option": options_dict,
"criterion": criterion_dict,
"feedback": part.feedback
})
......@@ -232,7 +226,9 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `Assessment` so we can again avoid DB calls.
assessment_dict["parts"] = parts
assessment_dict["points_earned"] = sum(
part_dict["option"]["points"] for part_dict in parts
part_dict["option"]["points"]
if part_dict["option"] is not None else 0
for part_dict in parts
)
assessment_dict["points_possible"] = rubric_dict["points_possible"]
......
......@@ -80,6 +80,7 @@ def deserialize_training_examples(examples, rubric_dict):
Raises:
InvalidRubric
InvalidRubricSelection
InvalidTrainingExample
Example usage:
......
......@@ -474,5 +474,110 @@
"Example 3 has a validation error: Training example must contain an \"answer\" field.",
"Example 3 has a validation error: Training example must contain an \"options_selected\" field."
]
},
"feedback_only_criterion": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": { "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭" }
}
],
"errors": []
},
"feedback_only_criterion_extra_score": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"feedback only": "𝓰𝓸𝓸𝓭"
}
}
],
"errors": ["Example 1 has an invalid option for \"feedback only\": \"𝓰𝓸𝓸𝓭\""]
},
"feedback_only_all_criteria": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {}
}
],
"errors": ["When you include a student training assessment, the rubric for the assessment must contain at least one criterion, and each criterion must contain at least two options."]
}
}
......@@ -14,8 +14,7 @@ from openassessment.assessment.api import ai as ai_api
from openassessment.assessment.models import (
AITrainingWorkflow, AIGradingWorkflow, AIClassifierSet, Assessment
)
from openassessment.assessment.models import AITrainingWorkflow, AIGradingWorkflow, AIClassifierSet
from openassessment.assessment.worker.algorithm import AIAlgorithm, AIAlgorithmError
from openassessment.assessment.worker.algorithm import AIAlgorithm
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.errors import (
AITrainingRequestError, AITrainingInternalError, AIGradingRequestError,
......@@ -133,6 +132,50 @@ class AITrainingTest(CacheResetTest):
self.assertEqual(received_example.score, expected_score)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_train_classifiers_feedback_only_criterion(self):
# Modify the rubric to include a feedback-only criterion
# (a criterion with no options, just written feedback)
rubric = copy.deepcopy(RUBRIC)
rubric['criteria'].append({
'name': 'feedback only',
'prompt': 'feedback',
'options': []
})
# Schedule a training task
# (we use training examples that do NOT include the feedback-only criterion)
workflow_uuid = ai_api.train_classifiers(rubric, EXAMPLES, COURSE_ID, ITEM_ID, ALGORITHM_ID)
# Verify that no classifier was created for the feedback-only criterion
# Since there's no points associated with that criterion,
# there's no way for the AI algorithm to score it anyway.
workflow = AITrainingWorkflow.objects.get(uuid=workflow_uuid)
classifier_data = workflow.classifier_set.classifier_data_by_criterion
self.assertNotIn('feedback only', classifier_data)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_train_classifiers_all_feedback_only_criteria(self):
# Modify the rubric to include only feedback-only criteria
# (a criterion with no options, just written feedback)
rubric = copy.deepcopy(RUBRIC)
for criterion in rubric['criteria']:
criterion['options'] = []
# Modify the training examples to provide no scores
examples = copy.deepcopy(EXAMPLES)
for example in examples:
example['options_selected'] = {}
# Schedule a training task
# Our training examples have no options
workflow_uuid = ai_api.train_classifiers(rubric, examples, COURSE_ID, ITEM_ID, ALGORITHM_ID)
# Verify that no classifier was created for the feedback-only criteria
workflow = AITrainingWorkflow.objects.get(uuid=workflow_uuid)
classifier_data = workflow.classifier_set.classifier_data_by_criterion
self.assertEqual(classifier_data, {})
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_train_classifiers_invalid_examples(self):
# Mutate an example so it does not match the rubric
mutated_examples = copy.deepcopy(EXAMPLES)
......@@ -202,6 +245,69 @@ class AIGradingTest(CacheResetTest):
self.assertEquals(score["points_earned"], 3)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_grade_essay_feedback_only_criterion(self):
# Modify the rubric to include a feedback-only criterion
# (a criterion with no options, just written feedback)
rubric = copy.deepcopy(RUBRIC)
rubric['criteria'].append({
'name': 'feedback only',
'prompt': 'feedback',
'options': []
})
# Train classifiers for the rubric
train_classifiers(rubric, self.CLASSIFIER_SCORE_OVERRIDES)
# Schedule a grading task and retrieve the assessment
ai_api.on_init(self.submission_uuid, rubric=rubric, algorithm_id=ALGORITHM_ID)
assessment = ai_api.get_latest_assessment(self.submission_uuid)
# Verify that the criteria with options were given scores
# (from the score override used by our fake classifiers)
self.assertEqual(assessment['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
self.assertEqual(assessment['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
self.assertEqual(assessment['parts'][1]['option']['points'], 2)
# Verify that the criteria with no options (only feedback)
# has no score and empty feedback
self.assertEqual(assessment['parts'][2]['criterion']['name'], u"feedback only")
self.assertIs(assessment['parts'][2]['option'], None)
self.assertEqual(assessment['parts'][2]['feedback'], u"")
# Check the scores by criterion dict
score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
self.assertEqual(score_dict[u"vøȼȺƀᵾłȺɍɏ"], 1)
self.assertEqual(score_dict[u"ﻭɼค๓๓คɼ"], 2)
self.assertEqual(score_dict['feedback only'], 0)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_grade_essay_all_feedback_only_criteria(self):
# Modify the rubric to include only feedback-only criteria
rubric = copy.deepcopy(RUBRIC)
for criterion in rubric['criteria']:
criterion['options'] = []
# Train classifiers for the rubric
train_classifiers(rubric, {})
# Schedule a grading task and retrieve the assessment
ai_api.on_init(self.submission_uuid, rubric=rubric, algorithm_id=ALGORITHM_ID)
assessment = ai_api.get_latest_assessment(self.submission_uuid)
# Verify that all assessment parts have feedback set to an empty string
for part in assessment['parts']:
self.assertEqual(part['feedback'], u"")
# Check the scores by criterion dict
# Since none of the criteria had options, the scores should all default to 0
score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
self.assertItemsEqual(score_dict, {
u"vøȼȺƀᵾłȺɍɏ": 0,
u"ﻭɼค๓๓คɼ": 0,
})
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_get_assessment_scores_by_criteria(self):
ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
......@@ -331,7 +437,7 @@ class AIReschedulingTest(CacheResetTest):
training_done (bool): whether the user expects there to be unfinished training workflows
grading_done (bool): whether the user expects there to be unfinished grading workflows
"""
incomplete_training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id=COURSE_ID,item_id=ITEM_ID)
incomplete_training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID)
incomplete_grading_workflows = AIGradingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID)
if training_done is not None:
self.assertEqual(self._is_empty_generator(incomplete_training_workflows), training_done)
......@@ -371,7 +477,7 @@ class AIReschedulingTest(CacheResetTest):
"""
try:
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID, task_type=task_type)
except Exception as ex:
except Exception: # pylint: disable=W0703
# This exception is being raised because of a timeout.
pass
......@@ -392,7 +498,7 @@ class AIReschedulingTest(CacheResetTest):
self._assert_complete(grading_done=True, training_done=True)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_reschedule_training_success(self):
def test_reschedule_training_and_grading_success(self):
# Reschedule everything, expect all successes
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID, task_type=None)
......@@ -411,7 +517,7 @@ class AIReschedulingTest(CacheResetTest):
holds up for querysets with 125+ entries
"""
# Creates 125 more grades (for a total of 135)
for i in range(0, 125):
for _ in range(0, 125):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid']
ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
......@@ -430,27 +536,27 @@ class AIReschedulingTest(CacheResetTest):
with mock.patch(patched_method) as mock_grade:
mock_grade.side_effect = NotConfigured
with self.assertRaises(AIGradingInternalError):
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID,item_id=ITEM_ID)
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID)
def test_reschedule_train_celery_error(self):
patched_method = 'openassessment.assessment.api.ai.training_tasks.reschedule_training_tasks.apply_async'
with mock.patch(patched_method) as mock_train:
mock_train.side_effect = NotConfigured
with self.assertRaises(AITrainingInternalError):
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID,item_id=ITEM_ID, task_type=None)
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID, task_type=None)
@mock.patch.object(AIGradingWorkflow, 'get_incomplete_workflows')
def test_get_incomplete_workflows_error_grading(self, mock_incomplete):
mock_incomplete.side_effect = DatabaseError
with self.assertRaises(AIReschedulingInternalError):
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID,item_id=ITEM_ID)
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID)
def test_get_incomplete_workflows_error_training(self):
patched_method = 'openassessment.assessment.models.ai.AIWorkflow.get_incomplete_workflows'
with mock.patch(patched_method) as mock_incomplete:
mock_incomplete.side_effect = DatabaseError
with self.assertRaises(Exception):
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID,item_id=ITEM_ID, task_type=u"train")
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID, task_type=u"train")
def test_reschedule_train_internal_celery_error(self):
patched_method = 'openassessment.assessment.worker.training.train_classifiers.apply_async'
......@@ -458,7 +564,7 @@ class AIReschedulingTest(CacheResetTest):
mock_train.side_effect = NotConfigured("NotConfigured")
with mock.patch('openassessment.assessment.worker.training.logger.exception') as mock_logger:
with self.assertRaises(Exception):
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID,item_id=ITEM_ID, task_type=u"train")
ai_api.reschedule_unfinished_tasks(course_id=COURSE_ID, item_id=ITEM_ID, task_type=u"train")
last_call = mock_logger.call_args[0][0]
self.assertTrue(u"NotConfigured" in last_call)
......
# coding=utf-8
"""
Tests for the assessment Django models.
"""
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
class AssessmentTest(CacheResetTest):
"""
Tests for the `Assessment` and `AssessmentPart` models.
"""
def test_create_with_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts
# We can't select an option for the last criterion, but we do
# provide written feedback.
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {
u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐."
}
AssessmentPart.create_from_option_names(
assessment, selected, feedback=feedback
)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
def test_create_with_all_feedback_only_criteria(self):
rubric = self._rubric_with_all_feedback_only_criteria()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts, each of which are feedback-only (no points)
selected = {}
feedback = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
AssessmentPart.create_from_option_names(
assessment, selected, feedback=feedback
)
# Check the score (should be 0, since we haven't selected any points)
self.assertEqual(assessment.points_earned, 0)
self.assertEqual(assessment.points_possible, 0)
def test_create_from_option_points_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts by providing scores for options
# but NO feedback. This simulates how an example-based AI
# assessment is created.
selected = {
u"vøȼȺƀᵾłȺɍɏ": 2,
u"ﻭɼค๓๓คɼ": 1,
}
AssessmentPart.create_from_option_points(assessment, selected)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text (should default to an empty string)
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"")
def test_create_from_option_points_all_feedback_only_criteria(self):
rubric = self._rubric_with_all_feedback_only_criteria()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Since there are no criteria with options, and we're not
# providing written feedback, pass in an empty selection.
selected = {}
AssessmentPart.create_from_option_points(assessment, selected)
# Score should be zero, since none of the criteria have options
self.assertEqual(assessment.points_earned, 0)
self.assertEqual(assessment.points_possible, 0)
def test_default_feedback_for_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts, but do NOT provide any feedback
# This simulates how non-peer assessments are created
# Note that this is different from providing an empty feedback dict;
# here, we're not providing the `feedback` kwarg at all.
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
AssessmentPart.create_from_option_names(assessment, selected)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text, which should default to an empty string
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"")
def test_no_feedback_provided_for_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts
# Do NOT provide feedback for the feedback-only criterion
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {}
# Expect an error when we try to create the assessment parts
with self.assertRaises(InvalidRubricSelection):
AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
def _rubric_with_one_feedback_only_criterion(self):
"""Create a rubric with one feedback-only criterion."""
rubric_dict = copy.deepcopy(RUBRIC)
rubric_dict['criteria'].append({
"order_num": 2,
"name": u"feedback",
"prompt": u"only feedback, no points",
"options": []
})
return rubric_from_dict(rubric_dict)
def _rubric_with_all_feedback_only_criteria(self):
"""Create a rubric with all feedback-only criteria."""
rubric_dict = copy.deepcopy(RUBRIC)
for criterion in rubric_dict['criteria']:
criterion['options'] = []
return rubric_from_dict(rubric_dict)
# -*- coding: utf-8 -*-
"""
Tests for assessment models.
"""
from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection,
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
)
class TestRubricOptionIds(CacheResetTest):
"""
Test selection of options from a rubric.
"""
NUM_CRITERIA = 4
NUM_OPTIONS = 3
def setUp(self):
"""
Create a rubric in the database.
"""
self.rubric = Rubric.objects.create()
self.criteria = [
Criterion.objects.create(
rubric=self.rubric,
name="test criterion {num}".format(num=num),
order_num=num,
) for num in range(self.NUM_CRITERIA)
]
self.options = dict()
for criterion in self.criteria:
self.options[criterion.name] = [
CriterionOption.objects.create(
criterion=criterion,
name="test option {num}".format(num=num),
order_num=num,
points=num
) for num in range(self.NUM_OPTIONS)
]
def test_option_ids(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_different_order(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 3": "test option 2",
})
def test_option_ids_extra_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
"extra criterion": "test",
})
def test_option_ids_mutated_criterion_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test mutated criterion": "test option 1",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_option_ids_mutated_option_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 1",
"test criterion 1": "test mutated option",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
class AssessmentFeedbackTest(CacheResetTest):
"""
Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive.
"""
def setUp(self):
self.feedback = AssessmentFeedback.objects.create(
submission_uuid='test_submission',
feedback_text='test feedback',
)
def test_default_options(self):
self.assertEqual(self.feedback.options.count(), 0)
def test_add_options_all_new(self):
# We haven't created any feedback options yet, so these should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_some_new(self):
# Create one feedback option in the database
AssessmentFeedbackOption.objects.create(text='I liked my assessment')
# Add feedback options. The one that's new should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_empty(self):
# No options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 0)
# Add an option
self.feedback.add_options(['test'])
self.assertEqual(len(self.feedback.options.all()), 1)
# Add an empty list of options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 1)
def test_add_options_duplicates(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, one of which is a duplicate
self.feedback.add_options(['I liked my assessment', 'I disliked my assessment'])
# There should be three options
options = self.feedback.options.all()
self.assertEqual(len(options), 3)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
self.assertEqual(options[2].text, 'I disliked my assessment')
# There should be only three options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 3)
def test_add_options_all_old(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, all of which are duplicates
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# There should be two options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
def test_unicode(self):
# Create options with unicode
self.feedback.add_options([u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽', u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'])
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
class PeerWorkflowTest(CacheResetTest):
"""
Tests for the peer workflow model.
"""
STUDENT_ITEM = {
'student_id': 'test_student',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
OTHER_STUDENT = {
'student_id': 'test_student_2',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
def test_create_item_multiple_available(self):
# Bugfix TIM-572
submitter_sub = sub_api.create_submission(self.STUDENT_ITEM, 'test answer')
submitter_workflow = PeerWorkflow.objects.create(
student_id=self.STUDENT_ITEM['student_id'],
item_id=self.STUDENT_ITEM['item_id'],
course_id=self.STUDENT_ITEM['course_id'],
submission_uuid=submitter_sub['uuid']
)
scorer_sub = sub_api.create_submission(self.OTHER_STUDENT, 'test answer 2')
scorer_workflow = PeerWorkflow.objects.create(
student_id=self.OTHER_STUDENT['student_id'],
item_id=self.OTHER_STUDENT['item_id'],
course_id=self.OTHER_STUDENT['course_id'],
submission_uuid=scorer_sub['uuid']
)
for _ in range(2):
PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=submitter_workflow,
submission_uuid=submitter_sub['uuid']
)
# This used to cause an error when `get_or_create` returned multiple workflow items
PeerWorkflow.create_item(scorer_workflow, submitter_sub['uuid'])
# coding=utf-8
import datetime
import pytz
import copy
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
......@@ -117,7 +118,7 @@ ASSESSMENT_DICT_PASS = {
# Answers are against RUBRIC_DICT -- this is worth 12 points
# Feedback text is one character over the limit.
LONG_FEEDBACK_TEXT = u"是" * Assessment.MAXSIZE + "."
LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "."
ASSESSMENT_DICT_HUGE = {
'overall_feedback': LONG_FEEDBACK_TEXT,
'criterion_feedback': {
......@@ -150,7 +151,7 @@ class TestPeerApi(CacheResetTest):
Tests for the peer assessment API functions.
"""
CREATE_ASSESSMENT_NUM_QUERIES = 61
CREATE_ASSESSMENT_NUM_QUERIES = 59
def test_create_assessment_points(self):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -173,9 +174,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Creating feedback per criterion should need one additional query to update
# for each criterion that has feedback.
with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES + 1):
with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES):
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......@@ -196,6 +195,43 @@ class TestPeerApi(CacheResetTest):
expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "")
self.assertEqual(part['feedback'], expected_feedback)
def test_create_assessment_criterion_with_zero_options(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Modify the rubric to include a criterion with no options,
# only written feedback.
rubric = copy.deepcopy(RUBRIC_DICT)
rubric["criteria"].append({
"name": "feedback only",
"prompt": "feedback only",
"options": []
})
# Provide written feedback for the feedback-only criterion
feedback = {
"feedback only": u"This is some feedback"
}
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT['options_selected'],
feedback, "",
rubric,
REQUIRED_GRADED_BY,
)
# Verify that the point values are the same
# (the feedback-only criterion isn't worth any points)
self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
# Verify the feedback-only criterion assessment part
self.assertEqual(assessment["parts"][4]["criterion"]["name"], "feedback only")
self.assertIs(assessment["parts"][4]["option"], None)
self.assertEqual(assessment["parts"][4]["feedback"], u"This is some feedback")
def test_create_assessment_unknown_criterion_feedback(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
......@@ -203,7 +239,10 @@ class TestPeerApi(CacheResetTest):
# Create an assessment where the criterion feedback uses
# a criterion name that isn't in the rubric.
assessment = peer_api.create_assessment(
# An exception should be raised, since this will be interpreted
# as adding an extra criterion with no options, just feedback.
with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT['options_selected'],
......@@ -213,10 +252,6 @@ class TestPeerApi(CacheResetTest):
REQUIRED_GRADED_BY,
)
# The criterion feedback should be ignored
for part_num in range(3):
self.assertEqual(assessment["parts"][part_num]["feedback"], "")
def test_create_huge_overall_feedback_error(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
......@@ -234,12 +269,12 @@ class TestPeerApi(CacheResetTest):
)
# The assessment feedback text should be truncated
self.assertEqual(len(assessment_dict['feedback']), Assessment.MAXSIZE)
self.assertEqual(len(assessment_dict['feedback']), Assessment.MAX_FEEDBACK_SIZE)
# The length of the feedback text in the database should
# equal what we got from the API.
assessment = Assessment.objects.get()
self.assertEqual(len(assessment.feedback), Assessment.MAXSIZE)
self.assertEqual(len(assessment.feedback), Assessment.MAX_FEEDBACK_SIZE)
def test_create_huge_per_criterion_feedback_error(self):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -259,11 +294,11 @@ class TestPeerApi(CacheResetTest):
# Verify that the feedback has been truncated
for part in assessment['parts']:
self.assertEqual(len(part['feedback']), Assessment.MAXSIZE)
self.assertEqual(len(part['feedback']), Assessment.MAX_FEEDBACK_SIZE)
# Verify that the feedback in the database matches what we got back from the API
for part in AssessmentPart.objects.all():
self.assertEqual(len(part.feedback), Assessment.MAXSIZE)
self.assertEqual(len(part.feedback), Assessment.MAX_FEEDBACK_SIZE)
@file_data('data/valid_assessments.json')
def test_get_assessments(self, assessment_dict):
......@@ -1022,7 +1057,7 @@ class TestPeerApi(CacheResetTest):
peer_api.set_assessment_feedback(
{
'submission_uuid': tim_answer['uuid'],
'feedback_text': 'Boo'*AssessmentFeedback.MAXSIZE,
'feedback_text': 'Boo' * AssessmentFeedback.MAXSIZE,
}
)
......@@ -1265,6 +1300,39 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(len(scored_assessments), 1)
self.assertEqual(scored_assessments[0]['scorer_id'], tim['student_id'])
@raises(peer_api.PeerAssessmentInternalError)
def test_create_assessment_database_error(self):
self._create_student_and_submission("Bob", "Bob's answer")
submission, student = self._create_student_and_submission("Jim", "Jim's answer")
peer_api.get_submission_to_assess(submission['uuid'], 1)
with patch.object(PeerWorkflow.objects, 'get') as mock_call:
mock_call.side_effect = DatabaseError("Kaboom!")
peer_api.create_assessment(
submission['uuid'],
student['student_id'],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY
)
@raises(peer_api.PeerAssessmentRequestError)
def test_create_assessment_invalid_rubric_error(self):
self._create_student_and_submission("Bob", "Bob's answer")
submission, student = self._create_student_and_submission("Jim", "Jim's answer")
peer_api.get_submission_to_assess(submission['uuid'], 1)
peer_api.create_assessment(
submission['uuid'],
student['student_id'],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
{"invalid_rubric!": "is invalid"},
REQUIRED_GRADED_BY
)
@staticmethod
def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy()
......
......@@ -6,12 +6,12 @@ Tests for assessment models.
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection
Rubric, Criterion, CriterionOption, InvalidRubricSelection
)
from openassessment.assessment.test.constants import RUBRIC
class TestRubricOptionIds(CacheResetTest):
class RubricIndexTest(CacheResetTest):
"""
Test selection of options from a rubric.
"""
......@@ -23,6 +23,8 @@ class TestRubricOptionIds(CacheResetTest):
"""
Create a rubric in the database.
"""
super(RubricIndexTest, self).setUp()
self.rubric = Rubric.objects.create()
self.criteria = [
Criterion.objects.create(
......@@ -43,104 +45,73 @@ class TestRubricOptionIds(CacheResetTest):
) for num in range(self.NUM_OPTIONS)
]
def test_option_ids(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_different_order(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 3": "test option 2",
})
def test_option_ids_extra_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
"extra criterion": "test",
})
def test_option_ids_mutated_criterion_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test mutated criterion": "test option 1",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_option_ids_mutated_option_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 1",
"test criterion 1": "test mutated option",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_options_ids_points(self):
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 0,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][1].id
]))
def test_options_ids_points_caching(self):
# First call: the dict is not cached
with self.assertNumQueries(1):
self.rubric.options_ids_for_points({
'test criterion 0': 0,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
# Second call: the dict is not cached
with self.assertNumQueries(0):
self.rubric.options_ids_for_points({
'test criterion 0': 1,
'test criterion 1': 2,
'test criterion 2': 1,
'test criterion 3': 0
})
def test_options_ids_first_of_duplicate_points(self):
def test_find_option(self):
self.assertEqual(
self.rubric.index.find_option("test criterion 0", "test option 0"),
self.options["test criterion 0"][0]
)
self.assertEqual(
self.rubric.index.find_option("test criterion 1", "test option 1"),
self.options["test criterion 1"][1]
)
self.assertEqual(
self.rubric.index.find_option("test criterion 2", "test option 2"),
self.options["test criterion 2"][2]
)
self.assertEqual(
self.rubric.index.find_option("test criterion 3", "test option 0"),
self.options["test criterion 3"][0]
)
def test_find_missing_criteria(self):
missing = self.rubric.index.find_missing_criteria([
'test criterion 0', 'test criterion 1', 'test criterion 3'
])
expected_missing = set(['test criterion 2'])
self.assertEqual(missing, expected_missing)
def test_invalid_option(self):
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option("test criterion 0", "invalid")
def test_valid_option_wrong_criterion(self):
# Add another option to the first criterion
new_option = CriterionOption.objects.create(
criterion=self.criteria[0],
name="extra option",
order_num=(self.NUM_OPTIONS + 1),
points=4
)
# We should be able to find it in the first criterion
self.assertEqual(
new_option,
self.rubric.index.find_option("test criterion 0", "extra option")
)
# ... but not from another criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option("test criterion 1", "extra option")
def test_find_option_for_points(self):
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 0", 0),
self.options["test criterion 0"][0]
)
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 1", 1),
self.options["test criterion 1"][1]
)
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 2", 2),
self.options["test criterion 2"][2]
)
self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 3", 1),
self.options["test criterion 3"][1]
)
def test_find_option_for_points_first_of_duplicate_points(self):
# Change the first criterion options so that the second and third
# option have the same point value
self.options['test criterion 0'][1].points = 5
......@@ -149,23 +120,42 @@ class TestRubricOptionIds(CacheResetTest):
self.options['test criterion 0'][2].save()
# Should get the first option back
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 5,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertIn(self.options['test criterion 0'][1].id, options_ids)
def test_options_ids_points_invalid_selection(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids_for_points({
'test criterion 0': self.NUM_OPTIONS + 1,
'test criterion 1': 2,
'test criterion 2': 1,
'test criterion 3': 0
})
option = self.rubric.index.find_option_for_points("test criterion 0", 5)
self.assertEqual(option, self.options['test criterion 0'][1])
def test_find_option_for_points_invalid_selection(self):
# No such point value
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("test criterion 0", 10)
# No such criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("no such criterion", 0)
def test_valid_points_wrong_criterion(self):
# Add another option to the first criterion
new_option = CriterionOption.objects.create(
criterion=self.criteria[0],
name="extra option",
order_num=(self.NUM_OPTIONS + 1),
points=10
)
# We should be able to find it in the first criterion
self.assertEqual(
new_option,
self.rubric.index.find_option_for_points("test criterion 0", 10)
)
# ... but not from another criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("test criterion 1", 10)
class RubricHashTest(CacheResetTest):
"""
Tests of the rubric content and structure hash.
"""
def test_structure_hash_identical(self):
first_hash = Rubric.structure_hash_from_dict(RUBRIC)
......
......@@ -91,7 +91,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_wrong_user(self):
......@@ -103,7 +103,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
'invalid_submission_uuid', u'another user',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_invalid_criterion(self):
......@@ -119,7 +119,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_invalid_option(self):
......@@ -135,7 +135,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_missing_criterion(self):
......@@ -151,7 +151,7 @@ class TestSelfApi(CacheResetTest):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_timestamp(self):
......@@ -200,3 +200,51 @@ class TestSelfApi(CacheResetTest):
def test_is_complete_no_submission(self):
# This submission uuid does not exist
self.assertFalse(submitter_is_finished('abc1234', {}))
def test_create_assessment_criterion_with_zero_options(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Modify the rubric to include a criterion with no options (only written feedback)
rubric = copy.deepcopy(self.RUBRIC)
rubric['criteria'].append({
"name": "feedback only",
"prompt": "feedback only",
"options": []
})
# Create a self-assessment for the submission
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# the criterion with no options to an empty string
self.assertEqual(assessment["parts"][2]["option"], None)
self.assertEqual(assessment["parts"][2]["feedback"], u"")
def test_create_assessment_all_criteria_have_zero_options(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Use a rubric with only criteria with no options (only written feedback)
rubric = copy.deepcopy(self.RUBRIC)
for criterion in rubric["criteria"]:
criterion["options"] = []
# Create a self-assessment for the submission
# We don't select any options, since none of the criteria have options
options_selected = {}
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options_selected, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# all criteria to an empty string.
for part in assessment["parts"]:
self.assertEqual(part["option"], None)
self.assertEqual(part["feedback"], u"")
# coding=utf-8
"""
Tests for assessment serializers.
"""
import json
import os.path
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback
from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback
)
from openassessment.assessment.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict,
AssessmentFeedbackSerializer
rubric_from_dict, full_assessment_dict,
AssessmentFeedbackSerializer, InvalidRubric
)
from .constants import RUBRIC
def json_data(filename):
curr_dir = os.path.dirname(__file__)
......@@ -14,7 +24,7 @@ def json_data(filename):
return json.load(json_file)
class TestRubricDeserialization(CacheResetTest):
class RubricDeserializationTest(CacheResetTest):
def test_rubric_only_created_once(self):
# Make sure sending the same Rubric data twice only creates one Rubric,
......@@ -35,7 +45,7 @@ class TestRubricDeserialization(CacheResetTest):
rubric_from_dict(json_data('data/rubric/no_points.json'))
class TestCriterionDeserialization(CacheResetTest):
class CriterionDeserializationTest(CacheResetTest):
def test_empty_criteria(self):
with self.assertRaises(InvalidRubric) as cm:
......@@ -54,20 +64,11 @@ class TestCriterionDeserialization(CacheResetTest):
)
class TestCriterionOptionDeserialization(CacheResetTest):
class CriterionOptionDeserializationTest(CacheResetTest):
def test_empty_options(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('data/rubric/empty_options.json'))
self.assertEqual(
cm.exception.errors,
{
'criteria': [
{}, # There are no errors in the first criterion
{'options': [u'Criterion must have at least one option.']}
]
}
)
rubric = rubric_from_dict(json_data('data/rubric/empty_options.json'))
self.assertEqual(rubric.criteria.count(), 2)
def test_missing_options(self):
with self.assertRaises(InvalidRubric) as cm:
......@@ -83,7 +84,7 @@ class TestCriterionOptionDeserialization(CacheResetTest):
)
class TestAssessmentFeedbackSerializer(CacheResetTest):
class AssessmentFeedbackSerializerTest(CacheResetTest):
def test_serialize(self):
feedback = AssessmentFeedback.objects.create(
......@@ -114,3 +115,41 @@ class TestAssessmentFeedbackSerializer(CacheResetTest):
'options': [],
'assessments': [],
})
class AssessmentSerializerTest(CacheResetTest):
def test_full_assessment_dict_criteria_no_options(self):
# Create a rubric with a criterion that has no options (just feedback)
rubric_dict = copy.deepcopy(RUBRIC)
rubric_dict['criteria'].append({
'order_num': 2,
'name': 'feedback only',
'prompt': 'feedback only',
'options': []
})
rubric = rubric_from_dict(rubric_dict)
# Create an assessment for the rubric
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {
u"feedback only": u"enjoy the feedback!"
}
AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
# Serialize the assessment
serialized = full_assessment_dict(assessment)
# Verify that the assessment dict correctly serialized the criterion with options.
self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")
# Verify that the assessment dict correctly serialized the criterion with no options.
self.assertIs(serialized['parts'][2]['option'], None)
self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
......@@ -221,9 +221,9 @@ class CsvWriter(object):
for part in assessment_parts:
self._write_unicode('assessment_part', [
part.assessment.id,
part.option.points,
part.option.criterion.name,
part.option.name,
part.points_earned,
part.criterion.name,
part.option.name if part.option is not None else u"",
part.feedback
])
......
......@@ -4,12 +4,16 @@
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
<span class="wrapper--copy">
{% if score %}
<span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value">
<span class="grade__value__title">
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}<span class="grade__value__earned">{{ points_earned }}</span> out of <span class="grade__value__potential">{{ points_possible }}</span>{% endblocktrans %}
</span>
</span>
{% else %}
<span class="step__label">{% trans "Your Grade" %}</span>
{% endif %}
</span>
</h2>
</header>
......@@ -133,13 +137,15 @@
{% endfor %}
{% if criterion.feedback %}
<li class="answer--feedback ui-toggle-visibility is--collapsed">
<li class="answer--feedback ui-toggle-visibility {% if criterion.options %}is--collapsed{% endif %}">
{% if criterion.options %}
<h5 class="answer--feedback__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span>
</h5>
{% endif %}
<ul class="answer--feedback__content ui-toggle-visibility__content">
<ul class="answer--feedback__content {% if criterion.options %}ui-toggle-visibility__content{% endif %}">
{% for feedback in criterion.feedback %}
<li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source">
......
......@@ -60,7 +60,10 @@
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}">
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
......@@ -88,7 +91,7 @@
</li>
{% endfor %}
{% if criterion.feedback == 'optional' %}
{% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
......@@ -98,6 +101,7 @@
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
{% if criterion.feedback == 'required' %}required{% endif %}
>
</textarea>
</div>
......
......@@ -52,7 +52,10 @@
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}">
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
......@@ -80,7 +83,7 @@
</li>
{% endfor %}
{% if criterion.feedback == 'optional' %}
{% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
......@@ -90,6 +93,7 @@
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
{% if criterion.feedback == 'required' %}required{% endif %}
>
</textarea>
</div>
......
......@@ -50,7 +50,11 @@
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}">
{% if criterion.options %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility has--options"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
......@@ -79,6 +83,7 @@
</ol>
</div>
</li>
{% endif %}
{% endfor %}
</ol>
</fieldset>
......
......@@ -73,7 +73,11 @@
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in training_rubric.criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}">
{% if criterion.options %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility has--options"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
......@@ -115,6 +119,7 @@
</ol>
</div>
</li>
{% endif %}
{% endfor %}
</ol>
</fieldset>
......
......@@ -506,6 +506,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 5,
"option": 25,
"feedback": ""
}
......@@ -515,6 +516,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 4,
"option": 19,
"feedback": "Elit nonumy m\u00eal ut, nam \u00e9sse fabul\u00e1s n\u00f3"
}
......@@ -524,6 +526,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 6,
"option": 30,
"feedback": "Per in\u00e2n\u00ed dol\u00f3re an, \u00fat s\u00e9a t\u00f4ta qu\u00e0eque d\u00edssenti\u00fant"
}
......@@ -533,6 +536,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 5,
"option": 25,
"feedback": ""
}
......@@ -542,6 +546,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 4,
"option": 20,
"feedback": ""
}
......@@ -551,6 +556,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 6,
"option": 30,
"feedback": ""
}
......@@ -560,6 +566,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 5,
"option": 24,
"feedback": ""
}
......@@ -569,6 +576,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 4,
"option": 19,
"feedback": ""
}
......@@ -578,6 +586,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 6,
"option": 31,
"feedback": ""
}
......@@ -587,6 +596,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 4,
"option": 18,
"feedback": ""
}
......@@ -596,6 +606,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 6,
"option": 30,
"feedback": ""
}
......@@ -605,6 +616,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 5,
"option": 23,
"feedback": ""
}
......
[
{
"pk": 2,
"model": "workflow.assessmentworkflow",
"fields": {
"status": "done",
"uuid": "3884f3cc-d0ae-11e3-9820-14109fd8dc43",
"created": "2014-04-30T21:27:24.236Z",
"submission_uuid": "28cebeca-d0ab-11e3-a6ab-14109fd8dc43",
"modified": "2014-04-30T21:28:41.814Z",
"item_id": "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"course_id": "edX/Enchantment_101/April_1",
"status_changed": "2014-04-30T21:28:41.814Z"
}
},
{
"pk": 1,
"model": "workflow.assessmentworkflow",
"fields": {
"status": "done",
"uuid": "178beedc-d0ae-11e3-afc3-14109fd8dc43",
"created": "2014-04-30T21:26:28.917Z",
"submission_uuid": "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"modified": "2014-04-30T21:28:49.284Z",
"item_id": "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"course_id": "edX/Enchantment_101/April_1",
"status_changed": "2014-04-30T21:28:49.284Z"
}
},
{
"pk": 4,
"model": "assessment.rubric",
"fields": {
"content_hash": "1641a7cd3ab1cca196ba04db334641478b636199"
}
},
{
"pk": 1,
"model": "assessment.rubric",
"fields": {
"content_hash": "7405a513d9f99b62dd561816f20cdb90b09b8060"
}
},
{
"pk": 2,
"model": "assessment.rubric",
"fields": {
"content_hash": "bfc465aed851e45c8c4c7635d11f3114aa21f865"
}
},
{
"pk": 3,
"model": "assessment.rubric",
"fields": {
"content_hash": "d722b38507cda59c113983bc2c6014b848a2ae65"
}
},
{
"pk": 1,
"model": "assessment.criterion",
"fields": {
"order_num": 0,
"prompt": "How concise is it?",
"rubric": 1,
"name": "concise"
}
},
{
"pk": 2,
"model": "assessment.criterion",
"fields": {
"order_num": 1,
"prompt": "How clear is the thinking?",
"rubric": 1,
"name": "clear-headed"
}
},
{
"pk": 3,
"model": "assessment.criterion",
"fields": {
"order_num": 2,
"prompt": "Lastly, how is its form? Punctuation, grammar, and spelling all count.",
"rubric": 1,
"name": "form"
}
},
{
"pk": 4,
"model": "assessment.criterion",
"fields": {
"order_num": 0,
"prompt": "How concise is it?",
"rubric": 2,
"name": "concise"
}
},
{
"pk": 5,
"model": "assessment.criterion",
"fields": {
"order_num": 1,
"prompt": "How clear is the thinking?",
"rubric": 2,
"name": "clear-headed"
}
},
{
"pk": 6,
"model": "assessment.criterion",
"fields": {
"order_num": 2,
"prompt": "Lastly, how is its form? Punctuation, grammar, and spelling all count.",
"rubric": 2,
"name": "form"
}
},
{
"pk": 7,
"model": "assessment.criterion",
"fields": {
"order_num": 0,
"prompt": "How concise is it?",
"rubric": 3,
"name": "concise"
}
},
{
"pk": 8,
"model": "assessment.criterion",
"fields": {
"order_num": 0,
"prompt": "How concise is it?",
"rubric": 4,
"name": "concise"
}
},
{
"pk": 9,
"model": "assessment.criterion",
"fields": {
"order_num": 1,
"prompt": "How clear is the thinking?",
"rubric": 4,
"name": "clear-headed"
}
},
{
"pk": 10,
"model": "assessment.criterion",
"fields": {
"order_num": 2,
"prompt": "Lastly, how is its form? Punctuation, grammar, and spelling all count.",
"rubric": 4,
"name": "form"
}
},
{
"pk": 11,
"model": "assessment.criterion",
"fields": {
"order_num": 3,
"prompt": "Feedback only, no options",
"rubric": 4,
"name": "feedback only"
}
},
{
"pk": 1,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "\n In \"Cryptonomicon\", Stephenson spent multiple pages talking about breakfast cereal.\n While hilarious, in recent years his work has been anything but 'concise'.\n ",
"points": 0,
"criterion": 1,
"name": "Neal Stephenson (late)"
}
},
{
"pk": 2,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "\n If the author wrote something cyclopean that staggers the mind, score it thus.\n ",
"points": 1,
"criterion": 1,
"name": "HP Lovecraft"
}
},
{
"pk": 3,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "\n Tight prose that conveys a wealth of information about the world in relatively\n few words. Example, \"The door irised open and he stepped inside.\"\n ",
"points": 3,
"criterion": 1,
"name": "Robert Heinlein"
}
},
{
"pk": 4,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "\n When Stephenson still had an editor, his prose was dense, with anecdotes about\n nitrox abuse implying main characters' whole life stories.\n ",
"points": 4,
"criterion": 1,
"name": "Neal Stephenson (early)"
}
},
{
"pk": 5,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "\n Score the work this way if it makes you weep, and the removal of a single\n word would make you sneer.\n ",
"points": 5,
"criterion": 1,
"name": "Earnest Hemingway"
}
},
{
"pk": 6,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 2,
"name": "Yogi Berra"
}
},
{
"pk": 7,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 2,
"name": "Hunter S. Thompson"
}
},
{
"pk": 8,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 2,
"name": "Robert Heinlein"
}
},
{
"pk": 9,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "",
"points": 3,
"criterion": 2,
"name": "Isaac Asimov"
}
},
{
"pk": 10,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "\n Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,\n and unemotional examination of the facts. This is the only item explained in this category,\n to show that explained and unexplained items can be mixed.\n ",
"points": 10,
"criterion": 2,
"name": "Spock"
}
},
{
"pk": 11,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 3,
"name": "lolcats"
}
},
{
"pk": 12,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 3,
"name": "Facebook"
}
},
{
"pk": 13,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 3,
"name": "Reddit"
}
},
{
"pk": 14,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "",
"points": 3,
"criterion": 3,
"name": "metafilter"
}
},
{
"pk": 15,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "",
"points": 4,
"criterion": 3,
"name": "Usenet, 1996"
}
},
{
"pk": 16,
"model": "assessment.criterionoption",
"fields": {
"order_num": 5,
"explanation": "",
"points": 5,
"criterion": 3,
"name": "The Elements of Style"
}
},
{
"pk": 17,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 4,
"name": "The Bible"
}
},
{
"pk": 18,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 4,
"name": "Earnest Hemingway"
}
},
{
"pk": 19,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 3,
"criterion": 4,
"name": "Matsuo Basho"
}
},
{
"pk": 20,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 5,
"name": "Eric"
}
},
{
"pk": 21,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 5,
"name": "John"
}
},
{
"pk": 22,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 5,
"name": "Ian"
}
},
{
"pk": 23,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 6,
"name": "IRC"
}
},
{
"pk": 24,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 6,
"name": "Real Email"
}
},
{
"pk": 25,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 6,
"name": "Old-timey letters"
}
},
{
"pk": 26,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 7,
"name": "The Bible"
}
},
{
"pk": 27,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 7,
"name": "Earnest Hemingway"
}
},
{
"pk": 28,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 3,
"criterion": 7,
"name": "Matsuo Basho"
}
},
{
"pk": 29,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "\n In \"Cryptonomicon\", Stephenson spent multiple pages talking about breakfast cereal.\n While hilarious, in recent years his work has been anything but 'concise'.\n ",
"points": 0,
"criterion": 8,
"name": "Neal Stephenson (late)"
}
},
{
"pk": 30,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "\n If the author wrote something cyclopean that staggers the mind, score it thus.\n ",
"points": 1,
"criterion": 8,
"name": "HP Lovecraft"
}
},
{
"pk": 31,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "\n Tight prose that conveys a wealth of information about the world in relatively\n few words. Example, \"The door irised open and he stepped inside.\"\n ",
"points": 3,
"criterion": 8,
"name": "Robert Heinlein"
}
},
{
"pk": 32,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "\n When Stephenson still had an editor, his prose was dense, with anecdotes about\n nitrox abuse implying main characters' whole life stories.\n ",
"points": 4,
"criterion": 8,
"name": "Neal Stephenson (early)"
}
},
{
"pk": 33,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "\n Score the work this way if it makes you weep, and the removal of a single\n word would make you sneer.\n ",
"points": 5,
"criterion": 8,
"name": "Earnest Hemingway"
}
},
{
"pk": 34,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 9,
"name": "Yogi Berra"
}
},
{
"pk": 35,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 9,
"name": "Hunter S. Thompson"
}
},
{
"pk": 36,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 9,
"name": "Robert Heinlein"
}
},
{
"pk": 37,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "",
"points": 3,
"criterion": 9,
"name": "Isaac Asimov"
}
},
{
"pk": 38,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "\n Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,\n and unemotional examination of the facts. This is the only item explained in this category,\n to show that explained and unexplained items can be mixed.\n ",
"points": 10,
"criterion": 9,
"name": "Spock"
}
},
{
"pk": 39,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 10,
"name": "lolcats"
}
},
{
"pk": 40,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 10,
"name": "Facebook"
}
},
{
"pk": 41,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 10,
"name": "Reddit"
}
},
{
"pk": 42,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "",
"points": 3,
"criterion": 10,
"name": "metafilter"
}
},
{
"pk": 43,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "",
"points": 4,
"criterion": 10,
"name": "Usenet, 1996"
}
},
{
"pk": 44,
"model": "assessment.criterionoption",
"fields": {
"order_num": 5,
"explanation": "",
"points": 5,
"criterion": 10,
"name": "The Elements of Style"
}
},
{
"pk": 1,
"model": "assessment.assessment",
"fields": {
"scorer_id": "other",
"feedback": "Donec consequat vitae ante in pellentesque.",
"submission_uuid": "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"scored_at": "2014-04-30T21:06:35.019Z",
"rubric": 4,
"score_type": "PE"
}
},
{
"pk": 1,
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 8,
"option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
}
},
{
"pk": 2,
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 10,
"option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
}
},
{
"pk": 3,
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 9,
"option": 37,
"feedback": ""
}
},
{
"pk": 4,
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 11,
"option": null,
"feedback": "Feedback!"
}
},
{
"pk": 1,
"model": "assessment.peerworkflow",
"fields": {
"completed_at": null,
"student_id": "student_1",
"created_at": "2014-04-30T21:02:59.297Z",
"submission_uuid": "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"course_id": "edX/Enchantment_101/April_1",
"item_id": "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"grading_completed_at": "2014-04-30T21:06:35.090Z"
}
},
{
"pk": 2,
"model": "assessment.peerworkflow",
"fields": {
"completed_at": "2014-04-30T21:06:35.332Z",
"student_id": "other",
"created_at": "2014-04-30T21:05:29.427Z",
"submission_uuid": "28cebeca-d0ab-11e3-a6ab-14109fd8dc43",
"course_id": "edX/Enchantment_101/April_1",
"item_id": "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"grading_completed_at": null
}
},
{
"pk": 1,
"model": "assessment.peerworkflowitem",
"fields": {
"scored": false,
"author": 1,
"submission_uuid": "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"scorer": 2,
"started_at": "2014-04-30T21:05:29.994Z",
"assessment": 1
}
},
{
"pk": 1,
"model": "submissions.studentitem",
"fields": {
"course_id": "edX/Enchantment_101/April_1",
"student_id": "student_1",
"item_id": "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"item_type": "openassessment"
}
},
{
"pk": 2,
"model": "submissions.studentitem",
"fields": {
"course_id": "edX/Enchantment_101/April_1",
"student_id": "other",
"item_id": "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"item_type": "openassessment"
}
},
{
"pk": 2,
"model": "submissions.submission",
"fields": {
"submitted_at": "2014-04-30T21:05:29.372Z",
"student_item": 2,
"created_at": "2014-04-30T21:05:29.380Z",
"raw_answer": "{\"text\": \"Etiam vel neque id nunc lacinia tincidunt.\"}",
"attempt_number": 1,
"uuid": "28cebeca-d0ab-11e3-a6ab-14109fd8dc43"
}
},
{
"pk": 1,
"model": "submissions.submission",
"fields": {
"submitted_at": "2014-04-30T21:02:59.234Z",
"student_item": 1,
"created_at": "2014-04-30T21:02:59.241Z",
"raw_answer": "{\"text\": \"Lorem ipsum dolor sit amet\"}",
"attempt_number": 1,
"uuid": "cf5190b8-d0aa-11e3-a734-14109fd8dc43"
}
}
]
......@@ -656,6 +656,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 8,
"option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
}
......@@ -665,6 +666,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 10,
"option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
}
......@@ -674,6 +676,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 9,
"option": 37,
"feedback": ""
}
......
......@@ -692,6 +692,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 4,
"option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
}
......@@ -701,6 +702,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 10,
"option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
}
......@@ -710,6 +712,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 9,
"option": 37,
"feedback": ""
}
......@@ -719,6 +722,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 8,
"option": 33,
"feedback": ""
}
......@@ -728,6 +732,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 10,
"option": 44,
"feedback": ""
}
......@@ -737,6 +742,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 9,
"option": 38,
"feedback": ""
}
......@@ -746,6 +752,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 8,
"option": 32,
"feedback": "Aenean vehicula nunc quis semper porttitor. "
}
......@@ -755,6 +762,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 10,
"option": 42,
"feedback": "Etiam vitae facilisis ante, in tristique lacus."
}
......@@ -764,6 +772,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 9,
"option": 38,
"feedback": ""
}
......@@ -773,6 +782,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 10,
"option": 43,
"feedback": ""
}
......@@ -782,6 +792,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 9,
"option": 38,
"feedback": ""
}
......@@ -791,6 +802,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 8,
"option": 31,
"feedback": ""
}
......
......@@ -668,6 +668,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 8,
"option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
}
......@@ -677,6 +678,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 10,
"option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
}
......@@ -686,6 +688,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 9,
"option": 37,
"feedback": ""
}
......@@ -695,6 +698,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 8,
"option": 33,
"feedback": ""
}
......@@ -704,6 +708,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 10,
"option": 44,
"feedback": ""
}
......@@ -713,6 +718,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 9,
"option": 38,
"feedback": ""
}
......
......@@ -238,6 +238,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 3,
"option": 9,
"feedback": "\u0547\ufec9\u0e23\u0547 \u0e23\u0547\u027c\u0671\u0e01\ufeed"
}
......@@ -247,6 +248,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 1,
"criterion": 4,
"option": 11,
"feedback": ""
}
......@@ -256,6 +258,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 4,
"option": 12,
"feedback": ""
}
......@@ -265,6 +268,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 2,
"criterion": 3,
"option": 7,
"feedback": ""
}
......@@ -274,6 +278,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 3,
"option": 9,
"feedback": "\u0547\ufec9\u0e23\u0547 \u0e23\u0547\u027c\u0671\u0e01\ufeed"
}
......@@ -283,6 +288,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 3,
"criterion": 4,
"option": 12,
"feedback": ""
}
......@@ -292,6 +298,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 4,
"option": 12,
"feedback": ""
}
......@@ -301,6 +308,7 @@
"model": "assessment.assessmentpart",
"fields": {
"assessment": 4,
"criterion": 3,
"option": 7,
"feedback": ""
}
......
......@@ -151,5 +151,33 @@
["2", "I disagree with one or more of the peer assessments of my response."]
]
}
},
"feedback_only_criterion": {
"fixture": "db_fixtures/feedback_only_criterion.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"assessment": [
[
"id", "submission_uuid", "scored_at", "scorer_id", "score_type",
"points_possible", "feedback"
],
[
"1", "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"2014-04-30 21:06:35.019000+00:00",
"other",
"PE",
"20",
"Donec consequat vitae ante in pellentesque."
]
],
"assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""],
["1", "0", "feedback only", "", "Feedback!"]
]
}
}
}
......@@ -116,6 +116,8 @@ class GradeMixin(object):
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
# It's possible for the score to be `None` even if the workflow status is "done"
# when all the criteria in the rubric are feedback-only (no options).
score = workflow['score']
context = {
......@@ -246,7 +248,7 @@ class GradeMixin(object):
for assessment in peer_assessments:
for part in assessment['parts']:
if part['feedback']:
part_criterion_name = part['option']['criterion']['name']
part_criterion_name = part['criterion']['name']
criteria_feedback[part_criterion_name].append(part['feedback'])
for criterion in criteria:
......
......@@ -574,6 +574,57 @@ class OpenAssessmentBlock(
if assessment["name"] == mixin_name:
return assessment
def publish_assessment_event(self, event_name, assessment):
"""
Emit an analytics event for the peer assessment.
Args:
event_name (str): An identifier for this event type.
assessment (dict): The serialized assessment model.
Returns:
None
"""
parts_list = []
for part in assessment["parts"]:
# Some assessment parts do not include point values,
# only written feedback. In this case, the assessment
# part won't have an associated option.
option_dict = None
if part["option"] is not None:
option_dict = {
"name": part["option"]["name"],
"points": part["option"]["points"],
}
# All assessment parts are associated with criteria
criterion_dict = {
"name": part["criterion"]["name"],
"points_possible": part["criterion"]["points_possible"]
}
parts_list.append({
"option": option_dict,
"criterion": criterion_dict,
"feedback": part["feedback"]
})
self.runtime.publish(
self, event_name,
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": parts_list
}
)
def _serialize_opaque_key(self, key):
"""
Gracefully handle opaque keys, both before and after the transition.
......
......@@ -80,7 +80,8 @@ class PeerAssessmentMixin(object):
)
# Emit analytics event...
self._publish_peer_assessment_event(assessment)
self.publish_assessment_event("openassessmentblock.peer_assess", assessment)
except (PeerAssessmentRequestError, PeerAssessmentWorkflowError):
logger.warning(
u"Peer API error for submission UUID {}".format(self.submission_uuid),
......@@ -260,42 +261,6 @@ class PeerAssessmentMixin(object):
return peer_submission
def _publish_peer_assessment_event(self, assessment):
"""
Emit an analytics event for the peer assessment.
Args:
assessment (dict): The serialized assessment model.
Returns:
None
"""
self.runtime.publish(
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"],
},
"feedback": part["feedback"],
}
for part in assessment["parts"]
]
}
)
def _clean_criterion_feedback(self, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
......@@ -312,5 +277,5 @@ class PeerAssessmentMixin(object):
criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') == 'optional'
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
......@@ -117,29 +117,8 @@ class SelfAssessmentMixin(object):
data['options_selected'],
{"criteria": self.rubric_criteria}
)
self.runtime.publish(
self,
"openassessmentblock.self_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"]
}
}
for part in assessment["parts"]
]
}
)
self.publish_assessment_event("openassessmentblock.self_assess", assessment)
# After we've created the self-assessment, we need to update the workflow.
self.update_workflow_status()
except (self_api.SelfAssessmentRequestError, workflow_api.AssessmentWorkflowRequestError):
......
......@@ -418,5 +418,55 @@
]
},
"output": "oa_staff_info.html"
},
{
"template": "openassessmentblock/peer/oa_peer_assessment.html",
"context": {
"rubric_criteria": [
{
"name": "vocabulary",
"prompt": "vocabulary",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "grammar",
"prompt": "grammar",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "feedback_only",
"prompt": "Feedback only, no options!",
"order_num": 2,
"feedback": "required",
"options": []
}
]
},
"output": "oa_rubric.html"
}
]
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.trainingView=new OpenAssessment.StudentTrainingView(this.element,this.server,this);this.selfView=new OpenAssessment.SelfView(this.element,this.server,this);this.peerView=new OpenAssessment.PeerView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this);this.messageView=new OpenAssessment.MessageView(this.element,this.server,this);this.staffInfoView=new OpenAssessment.StaffInfoView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.loadAssessmentModules();this.staffInfoView.load()},loadAssessmentModules:function(){this.trainingView.load();this.peerView.load();this.selfView.load();this.gradeView.load()},loadMessageView:function(){this.messageView.load()},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"||type=="student-training"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html(gettext("Unable to Load"))}};function OpenAssessmentBlock(runtime,element){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg=gettext("This problem has already been released. Any changes will apply only to future assessments.");if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};OpenAssessment.MessageView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.MessageView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("message").done(function(html){$("#openassessment__message",view.element).replaceWith(html)}).fail(function(errMsg){baseView.showLoadError("message",errMsg)})}};OpenAssessment.PeerView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.rubric=null};OpenAssessment.PeerView.prototype={load:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers(false)}).fail(function(errMsg){view.baseView.showLoadError("peer-assessment")});view.baseView.loadMessageView()},loadContinuedAssessment:function(){var view=this;view.continueAssessmentEnabled(false);this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers(true)}).fail(function(errMsg){view.baseView.showLoadError("peer-assessment");view.continueAssessmentEnabled(true)})},continueAssessmentEnabled:function(enabled){var button=$("#peer-assessment__continue__grading",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},installHandlers:function(isContinuedAssessment){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);var rubricSelector=$("#peer-assessment--001__assessment",this.element);if(rubricSelector.size()>0){var rubricElement=rubricSelector.get(0);this.rubric=new OpenAssessment.Rubric(rubricElement)}if(this.rubric!==null){this.rubric.canSubmitCallback($.proxy(view.peerSubmitEnabled,view))}sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();if(!isContinuedAssessment){view.peerAssess()}else{view.continuedPeerAssess()}});sel.find("#peer-assessment__continue__grading").click(function(eventObject){eventObject.preventDefault();view.loadContinuedAssessment()})},peerSubmitEnabled:function(enabled){var button=$("#peer-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},peerAssess:function(){var view=this;var baseView=view.baseView;this.peerAssessRequest(function(){view.load();baseView.loadAssessmentModules();baseView.scrollToTop()})},continuedPeerAssess:function(){var view=this;var gradeView=this.baseView.gradeView;var baseView=view.baseView;view.peerAssessRequest(function(){view.loadContinuedAssessment();gradeView.load();baseView.scrollToTop()})},peerAssessRequest:function(successFunction){var view=this;view.baseView.toggleActionError("peer",null);view.peerSubmitEnabled(false);this.server.peerAssess(this.rubric.optionsSelected(),this.rubric.criterionFeedback(),this.overallFeedback()).done(successFunction).fail(function(errMsg){view.baseView.toggleActionError("peer",errMsg);view.peerSubmitEnabled(true)})},overallFeedback:function(overallFeedback){var selector="#assessment__rubric__question--feedback__value";if(typeof overallFeedback==="undefined"){return $(selector,this.element).val()}else{$(selector,this.element).val(overallFeedback)}}};OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse="";this.lastChangeTime=Date.now();this.errorOnLastSave=false;this.autoSaveTimerId=null};OpenAssessment.ResponseView.prototype={AUTO_SAVE_POLL_INTERVAL:2e3,AUTO_SAVE_WAIT:3e4,load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers();view.setAutoSaveEnabled(true)}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.handleResponseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},setAutoSaveEnabled:function(enabled){if(enabled){if(this.autoSaveTimerId===null){this.autoSaveTimerId=setInterval($.proxy(this.autoSave,this),this.AUTO_SAVE_POLL_INTERVAL)}}else{if(this.autoSaveTimerId!==null){clearInterval(this.autoSaveTimerId)}}},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{var label=gettext("Status of Your Response");sel.html('<span class="sr">'+label+":"+"</span>\n"+msg)}},unsavedWarningEnabled:function(enabled){if(typeof enabled==="undefined"){return window.onbeforeunload!==null}else{if(enabled){window.onbeforeunload=function(){return gettext("If you leave this page without saving or submitting your response, you'll lose any work you've done on the response.")}}else{window.onbeforeunload=null}}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var savedResponse=$.trim(this.savedResponse);return savedResponse!==currentResponse},autoSave:function(){var timeSinceLastChange=Date.now()-this.lastChangeTime;if(this.responseChanged()&&timeSinceLastChange>this.AUTO_SAVE_WAIT&&!this.errorOnLastSave){this.save()}},handleResponseChanged:function(){var isBlank=$.trim(this.response())!=="";this.submitEnabled(isBlank);if(this.responseChanged()){this.saveEnabled(isBlank);this.saveStatus(gettext("This response has not been saved."));this.unsavedWarningEnabled(true)}this.lastChangeTime=Date.now()},save:function(){this.errorOnLastSave=false;this.saveStatus(gettext("Saving..."));this.baseView.toggleActionError("save",null);this.unsavedWarningEnabled(false);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus(gettext("This response has been saved but not submitted."))}}).fail(function(errMsg){view.saveStatus(gettext("Error"));view.baseView.toggleActionError("save",errMsg);view.errorOnLastSave=true})},submit:function(){this.submitEnabled(false);var view=this;var baseView=this.baseView;this.confirmSubmission().pipe(function(){var submission=$("#submission__answer__value",view.element).val();baseView.toggleActionError("response",null);return view.server.submit(submission)}).done($.proxy(view.moveToNextStep,view)).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){view.moveToNextStep()}else{if(errMsg){baseView.toggleActionError("submit",errMsg)}view.submitEnabled(true)}})},moveToNextStep:function(){this.load();this.baseView.loadAssessmentModules();this.unsavedWarningEnabled(false)},confirmSubmission:function(){var msg="You're about to submit your response for this assignment. "+"After you submit this response, you can't change it or submit a new response.";return $.Deferred(function(defer){if(confirm(msg)){defer.resolve()}else{defer.reject()}})}};OpenAssessment.Rubric=function(element){this.element=element};OpenAssessment.Rubric.prototype={criterionFeedback:function(criterionFeedback){var selector="textarea.answer__value";var feedback={};$(selector,this.element).each(function(index,sel){if(typeof criterionFeedback!=="undefined"){$(sel).val(criterionFeedback[sel.name]);feedback[sel.name]=criterionFeedback[sel.name]}else{feedback[sel.name]=$(sel).val()}});return feedback},optionsSelected:function(optionsSelected){var selector="input[type=radio]";if(typeof optionsSelected==="undefined"){var options={};$(selector+":checked",this.element).each(function(index,sel){options[sel.name]=sel.value});return options}else{$(selector,this.element).prop("checked",false);$(selector,this.element).each(function(index,sel){if(optionsSelected.hasOwnProperty(sel.name)){if(sel.value==optionsSelected[sel.name]){$(sel).prop("checked",true)}}})}},canSubmitCallback:function(callback){$(this.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;var canSubmit=numChecked==numAvailable;callback(canSubmit)})},showCorrections:function(corrections){var selector="input[type=radio]";var hasErrors=false;$(selector,this.element).each(function(index,sel){var listItem=$(sel).parents(".assessment__rubric__question");if(corrections.hasOwnProperty(sel.name)){hasErrors=true;listItem.find(".message--incorrect").removeClass("is--hidden");listItem.find(".message--correct").addClass("is--hidden")}else{listItem.find(".message--correct").removeClass("is--hidden");listItem.find(".message--incorrect").addClass("is--hidden")}});return hasErrors}};OpenAssessment.SelfView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.rubric=null};OpenAssessment.SelfView.prototype={load:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.showLoadError("self-assessment")})},installHandlers:function(){var view=this;var sel=$("#openassessment__self-assessment",view.element);this.baseView.setUpCollapseExpand(sel);var rubricSelector=$("#self-assessment--001__assessment",this.element);if(rubricSelector.size()>0){var rubricElement=rubricSelector.get(0);this.rubric=new OpenAssessment.Rubric(rubricElement)}if(this.rubric!==null){this.rubric.canSubmitCallback($.proxy(this.selfSubmitEnabled,this))}sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})},selfSubmitEnabled:function(enabled){var button=$("#self-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},selfAssess:function(){var view=this;var baseView=this.baseView;baseView.toggleActionError("self",null);view.selfSubmitEnabled(false);var options=this.rubric.optionsSelected();this.server.selfAssess(options).done(function(){baseView.loadAssessmentModules();baseView.scrollToTop()}).fail(function(errMsg){baseView.toggleActionError("self",errMsg);view.selfSubmitEnabled(true)})}};OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},studentInfo:function(student_id){var url=this.url("render_student_info");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{student_id:student_id}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX",gettext("This response could not be submitted.")])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This response could not be saved.")])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This feedback could not be submitted.")])})}).promise()},peerAssess:function(optionsSelected,criterionFeedback,overallFeedback){var url=this.url("peer_assess");var payload=JSON.stringify({options_selected:optionsSelected,criterion_feedback:criterionFeedback,overall_feedback:overallFeedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},trainingAssess:function(optionsSelected){var url=this.url("training_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.corrections])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},scheduleTraining:function(){var url=this.url("schedule_training");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.msg])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},rescheduleUnfinishedTasks:function(){var url=this.url("reschedule_unfinished_tasks");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.msg])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("One or more rescheduling tasks failed.")])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be loaded.")])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be saved.")])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("The server could not be contacted.")])})}).promise()}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.StaffInfoView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.StaffInfoView.prototype={load:function(){var view=this;if($("#openassessment__staff-info",view.element).length>0){this.server.render("staff_info").done(function(html){$("#openassessment__staff-info",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("staff_info")})}},loadStudentInfo:function(){var view=this;var sel=$("#openassessment__staff-info",this.element);var student_id=sel.find("#openassessment__student_id").val();this.server.studentInfo(student_id).done(function(html){$("#openassessment__student-info",view.element).replaceWith(html)}).fail(function(errMsg){view.showLoadError("student_info")})},installHandlers:function(){var sel=$("#openassessment__staff-info",this.element);var view=this;if(sel.length<=0){return}this.baseView.setUpCollapseExpand(sel,function(){});sel.find("#openassessment_student_info_form").submit(function(eventObject){eventObject.preventDefault();view.loadStudentInfo()});sel.find("#submit_student_id").click(function(eventObject){eventObject.preventDefault();view.loadStudentInfo()});sel.find("#schedule_training").click(function(eventObject){eventObject.preventDefault();view.scheduleTraining()});sel.find("#reschedule_unfinished_tasks").click(function(eventObject){eventObject.preventDefault();view.rescheduleUnfinishedTasks()})},scheduleTraining:function(){var view=this;this.server.scheduleTraining().done(function(msg){$("#schedule_training_message",this.element).text(msg)}).fail(function(errMsg){$("#schedule_training_message",this.element).text(errMsg)})},rescheduleUnfinishedTasks:function(){var view=this;this.server.rescheduleUnfinishedTasks().done(function(msg){$("#reschedule_unfinished_tasks_message",this.element).text(msg)}).fail(function(errMsg){$("#reschedule_unfinished_tasks_message",this.element).text(errMsg)})}};OpenAssessment.StudentTrainingView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.rubric=null};OpenAssessment.StudentTrainingView.prototype={load:function(){var view=this;this.server.render("student_training").done(function(html){$("#openassessment__student-training",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("student-training")})},installHandlers:function(){var sel=$("#openassessment__student-training",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);var rubricSelector=$("#student-training--001__assessment",this.element);if(rubricSelector.size()>0){var rubricElement=rubricSelector.get(0);this.rubric=new OpenAssessment.Rubric(rubricElement)}if(this.rubric!==null){this.rubric.canSubmitCallback($.proxy(this.assessButtonEnabled,this))}sel.find("#student-training--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.assess()})},assess:function(){this.assessButtonEnabled(false);var options={};if(this.rubric!==null){options=this.rubric.optionsSelected()}var view=this;var baseView=this.baseView;this.server.trainingAssess(options).done(function(corrections){var incorrect=$("#openassessment__student-training--incorrect",this.element);var instructions=$("#openassessment__student-training--instructions",this.element);if(!view.rubric.showCorrections(corrections)){view.load();baseView.loadAssessmentModules();incorrect.addClass("is--hidden");instructions.removeClass("is--hidden")}else{instructions.addClass("is--hidden");incorrect.removeClass("is--hidden")}baseView.scrollToTop()}).fail(function(errMsg){baseView.toggleActionError("student-training",errMsg);view.assessButtonEnabled(true)})},assessButtonEnabled:function(isEnabled){var button=$("#student-training--001__assessment__submit",this.element);if(typeof isEnabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!isEnabled)}}};
\ No newline at end of file
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.trainingView=new OpenAssessment.StudentTrainingView(this.element,this.server,this);this.selfView=new OpenAssessment.SelfView(this.element,this.server,this);this.peerView=new OpenAssessment.PeerView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this);this.messageView=new OpenAssessment.MessageView(this.element,this.server,this);this.staffInfoView=new OpenAssessment.StaffInfoView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.loadAssessmentModules();this.staffInfoView.load()},loadAssessmentModules:function(){this.trainingView.load();this.peerView.load();this.selfView.load();this.gradeView.load()},loadMessageView:function(){this.messageView.load()},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"||type=="student-training"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html(gettext("Unable to Load"))}};function OpenAssessmentBlock(runtime,element){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg=gettext("This problem has already been released. Any changes will apply only to future assessments.");if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};OpenAssessment.MessageView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.MessageView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("message").done(function(html){$("#openassessment__message",view.element).replaceWith(html)}).fail(function(errMsg){baseView.showLoadError("message",errMsg)})}};OpenAssessment.PeerView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.rubric=null};OpenAssessment.PeerView.prototype={load:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers(false)}).fail(function(errMsg){view.baseView.showLoadError("peer-assessment")});view.baseView.loadMessageView()},loadContinuedAssessment:function(){var view=this;view.continueAssessmentEnabled(false);this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers(true)}).fail(function(errMsg){view.baseView.showLoadError("peer-assessment");view.continueAssessmentEnabled(true)})},continueAssessmentEnabled:function(enabled){var button=$("#peer-assessment__continue__grading",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},installHandlers:function(isContinuedAssessment){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);var rubricSelector=$("#peer-assessment--001__assessment",this.element);if(rubricSelector.size()>0){var rubricElement=rubricSelector.get(0);this.rubric=new OpenAssessment.Rubric(rubricElement)}if(this.rubric!==null){this.rubric.canSubmitCallback($.proxy(view.peerSubmitEnabled,view))}sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();if(!isContinuedAssessment){view.peerAssess()}else{view.continuedPeerAssess()}});sel.find("#peer-assessment__continue__grading").click(function(eventObject){eventObject.preventDefault();view.loadContinuedAssessment()})},peerSubmitEnabled:function(enabled){var button=$("#peer-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},peerAssess:function(){var view=this;var baseView=view.baseView;this.peerAssessRequest(function(){view.load();baseView.loadAssessmentModules();baseView.scrollToTop()})},continuedPeerAssess:function(){var view=this;var gradeView=this.baseView.gradeView;var baseView=view.baseView;view.peerAssessRequest(function(){view.loadContinuedAssessment();gradeView.load();baseView.scrollToTop()})},peerAssessRequest:function(successFunction){var view=this;view.baseView.toggleActionError("peer",null);view.peerSubmitEnabled(false);this.server.peerAssess(this.rubric.optionsSelected(),this.rubric.criterionFeedback(),this.overallFeedback()).done(successFunction).fail(function(errMsg){view.baseView.toggleActionError("peer",errMsg);view.peerSubmitEnabled(true)})},overallFeedback:function(overallFeedback){var selector="#assessment__rubric__question--feedback__value";if(typeof overallFeedback==="undefined"){return $(selector,this.element).val()}else{$(selector,this.element).val(overallFeedback)}}};OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse="";this.lastChangeTime=Date.now();this.errorOnLastSave=false;this.autoSaveTimerId=null};OpenAssessment.ResponseView.prototype={AUTO_SAVE_POLL_INTERVAL:2e3,AUTO_SAVE_WAIT:3e4,load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers();view.setAutoSaveEnabled(true)}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.handleResponseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},setAutoSaveEnabled:function(enabled){if(enabled){if(this.autoSaveTimerId===null){this.autoSaveTimerId=setInterval($.proxy(this.autoSave,this),this.AUTO_SAVE_POLL_INTERVAL)}}else{if(this.autoSaveTimerId!==null){clearInterval(this.autoSaveTimerId)}}},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{var label=gettext("Status of Your Response");sel.html('<span class="sr">'+label+":"+"</span>\n"+msg)}},unsavedWarningEnabled:function(enabled){if(typeof enabled==="undefined"){return window.onbeforeunload!==null}else{if(enabled){window.onbeforeunload=function(){return gettext("If you leave this page without saving or submitting your response, you'll lose any work you've done on the response.")}}else{window.onbeforeunload=null}}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var savedResponse=$.trim(this.savedResponse);return savedResponse!==currentResponse},autoSave:function(){var timeSinceLastChange=Date.now()-this.lastChangeTime;if(this.responseChanged()&&timeSinceLastChange>this.AUTO_SAVE_WAIT&&!this.errorOnLastSave){this.save()}},handleResponseChanged:function(){var isBlank=$.trim(this.response())!=="";this.submitEnabled(isBlank);if(this.responseChanged()){this.saveEnabled(isBlank);this.saveStatus(gettext("This response has not been saved."));this.unsavedWarningEnabled(true)}this.lastChangeTime=Date.now()},save:function(){this.errorOnLastSave=false;this.saveStatus(gettext("Saving..."));this.baseView.toggleActionError("save",null);this.unsavedWarningEnabled(false);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus(gettext("This response has been saved but not submitted."))}}).fail(function(errMsg){view.saveStatus(gettext("Error"));view.baseView.toggleActionError("save",errMsg);view.errorOnLastSave=true})},submit:function(){this.submitEnabled(false);var view=this;var baseView=this.baseView;this.confirmSubmission().pipe(function(){var submission=$("#submission__answer__value",view.element).val();baseView.toggleActionError("response",null);return view.server.submit(submission)}).done($.proxy(view.moveToNextStep,view)).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){view.moveToNextStep()}else{if(errMsg){baseView.toggleActionError("submit",errMsg)}view.submitEnabled(true)}})},moveToNextStep:function(){this.load();this.baseView.loadAssessmentModules();this.unsavedWarningEnabled(false)},confirmSubmission:function(){var msg="You're about to submit your response for this assignment. "+"After you submit this response, you can't change it or submit a new response.";return $.Deferred(function(defer){if(confirm(msg)){defer.resolve()}else{defer.reject()}})}};OpenAssessment.Rubric=function(element){this.element=element};OpenAssessment.Rubric.prototype={criterionFeedback:function(criterionFeedback){var selector="textarea.answer__value";var feedback={};$(selector,this.element).each(function(index,sel){if(typeof criterionFeedback!=="undefined"){$(sel).val(criterionFeedback[sel.name]);feedback[sel.name]=criterionFeedback[sel.name]}else{feedback[sel.name]=$(sel).val()}});return feedback},optionsSelected:function(optionsSelected){var selector="input[type=radio]";if(typeof optionsSelected==="undefined"){var options={};$(selector+":checked",this.element).each(function(index,sel){options[sel.name]=sel.value});return options}else{$(selector,this.element).prop("checked",false);$(selector,this.element).each(function(index,sel){if(optionsSelected.hasOwnProperty(sel.name)){if(sel.value==optionsSelected[sel.name]){$(sel).prop("checked",true)}}})}},canSubmitCallback:function(callback){var rubric=this;callback(rubric.canSubmit());$(this.element).on("change keyup drop paste",function(){callback(rubric.canSubmit())})},canSubmit:function(){var numChecked=$("input[type=radio]:checked",this.element).length;var numAvailable=$(".field--radio.assessment__rubric__question.has--options",this.element).length;var completedRequiredComments=true;$("textarea[required]",this.element).each(function(){var trimmedText=$.trim($(this).val());if(trimmedText===""){completedRequiredComments=false}});return numChecked==numAvailable&&completedRequiredComments},showCorrections:function(corrections){var selector="input[type=radio]";var hasErrors=false;$(selector,this.element).each(function(index,sel){var listItem=$(sel).parents(".assessment__rubric__question");if(corrections.hasOwnProperty(sel.name)){hasErrors=true;listItem.find(".message--incorrect").removeClass("is--hidden");listItem.find(".message--correct").addClass("is--hidden")}else{listItem.find(".message--correct").removeClass("is--hidden");listItem.find(".message--incorrect").addClass("is--hidden")}});return hasErrors}};OpenAssessment.SelfView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.rubric=null};OpenAssessment.SelfView.prototype={load:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.showLoadError("self-assessment")})},installHandlers:function(){var view=this;var sel=$("#openassessment__self-assessment",view.element);this.baseView.setUpCollapseExpand(sel);var rubricSelector=$("#self-assessment--001__assessment",this.element);if(rubricSelector.size()>0){var rubricElement=rubricSelector.get(0);this.rubric=new OpenAssessment.Rubric(rubricElement)}if(this.rubric!==null){this.rubric.canSubmitCallback($.proxy(this.selfSubmitEnabled,this))}sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})},selfSubmitEnabled:function(enabled){var button=$("#self-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},selfAssess:function(){var view=this;var baseView=this.baseView;baseView.toggleActionError("self",null);view.selfSubmitEnabled(false);var options=this.rubric.optionsSelected();this.server.selfAssess(options).done(function(){baseView.loadAssessmentModules();baseView.scrollToTop()}).fail(function(errMsg){baseView.toggleActionError("self",errMsg);view.selfSubmitEnabled(true)})}};OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},studentInfo:function(student_id){var url=this.url("render_student_info");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{student_id:student_id}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX",gettext("This response could not be submitted.")])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This response could not be saved.")])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This feedback could not be submitted.")])})}).promise()},peerAssess:function(optionsSelected,criterionFeedback,overallFeedback){var url=this.url("peer_assess");var payload=JSON.stringify({options_selected:optionsSelected,criterion_feedback:criterionFeedback,overall_feedback:overallFeedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},trainingAssess:function(optionsSelected){var url=this.url("training_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.corrections])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},scheduleTraining:function(){var url=this.url("schedule_training");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.msg])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},rescheduleUnfinishedTasks:function(){var url=this.url("reschedule_unfinished_tasks");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.msg])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("One or more rescheduling tasks failed.")])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be loaded.")])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be saved.")])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("The server could not be contacted.")])})}).promise()}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.StaffInfoView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.StaffInfoView.prototype={load:function(){var view=this;if($("#openassessment__staff-info",view.element).length>0){this.server.render("staff_info").done(function(html){$("#openassessment__staff-info",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("staff_info")})}},loadStudentInfo:function(){var view=this;var sel=$("#openassessment__staff-info",this.element);var student_id=sel.find("#openassessment__student_id").val();this.server.studentInfo(student_id).done(function(html){$("#openassessment__student-info",view.element).replaceWith(html)}).fail(function(errMsg){view.showLoadError("student_info")})},installHandlers:function(){var sel=$("#openassessment__staff-info",this.element);var view=this;if(sel.length<=0){return}this.baseView.setUpCollapseExpand(sel,function(){});sel.find("#openassessment_student_info_form").submit(function(eventObject){eventObject.preventDefault();view.loadStudentInfo()});sel.find("#submit_student_id").click(function(eventObject){eventObject.preventDefault();view.loadStudentInfo()});sel.find("#schedule_training").click(function(eventObject){eventObject.preventDefault();view.scheduleTraining()});sel.find("#reschedule_unfinished_tasks").click(function(eventObject){eventObject.preventDefault();view.rescheduleUnfinishedTasks()})},scheduleTraining:function(){var view=this;this.server.scheduleTraining().done(function(msg){$("#schedule_training_message",this.element).text(msg)}).fail(function(errMsg){$("#schedule_training_message",this.element).text(errMsg)})},rescheduleUnfinishedTasks:function(){var view=this;this.server.rescheduleUnfinishedTasks().done(function(msg){$("#reschedule_unfinished_tasks_message",this.element).text(msg)}).fail(function(errMsg){$("#reschedule_unfinished_tasks_message",this.element).text(errMsg)})}};OpenAssessment.StudentTrainingView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.rubric=null};OpenAssessment.StudentTrainingView.prototype={load:function(){var view=this;this.server.render("student_training").done(function(html){$("#openassessment__student-training",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("student-training")})},installHandlers:function(){var sel=$("#openassessment__student-training",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);var rubricSelector=$("#student-training--001__assessment",this.element);if(rubricSelector.size()>0){var rubricElement=rubricSelector.get(0);this.rubric=new OpenAssessment.Rubric(rubricElement)}if(this.rubric!==null){this.rubric.canSubmitCallback($.proxy(this.assessButtonEnabled,this))}sel.find("#student-training--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.assess()})},assess:function(){this.assessButtonEnabled(false);var options={};if(this.rubric!==null){options=this.rubric.optionsSelected()}var view=this;var baseView=this.baseView;this.server.trainingAssess(options).done(function(corrections){var incorrect=$("#openassessment__student-training--incorrect",this.element);var instructions=$("#openassessment__student-training--instructions",this.element);if(!view.rubric.showCorrections(corrections)){view.load();baseView.loadAssessmentModules();incorrect.addClass("is--hidden");instructions.removeClass("is--hidden")}else{instructions.addClass("is--hidden");incorrect.removeClass("is--hidden")}baseView.scrollToTop()}).fail(function(errMsg){baseView.toggleActionError("student-training",errMsg);view.assessButtonEnabled(true)})},assessButtonEnabled:function(isEnabled){var button=$("#student-training--001__assessment__submit",this.element);if(typeof isEnabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!isEnabled)}}};
\ No newline at end of file
/**
Tests for an Open Assessment rubric.
**/
describe("OpenAssessment.Rubric", function() {
var rubric = null;
beforeEach(function() {
jasmine.getFixtures().fixturesPath = 'base/fixtures';
loadFixtures('oa_rubric.html');
var el = $("#peer-assessment--001__assessment").get(0);
rubric = new OpenAssessment.Rubric(el);
});
it("enables the submit button only when all options and required feedback have been provided", function() {
// Initially, the submit button should be disabled
expect(rubric.canSubmit()).toBe(false);
// Select some, but not all, options
rubric.optionsSelected({vocabulary: 'Good'});
expect(rubric.canSubmit()).toBe(false);
// Select all options, but do not provide required feedback
rubric.optionsSelected({
vocabulary: 'Good',
grammar: 'Bad'
});
expect(rubric.canSubmit()).toBe(false);
// Provide required feedback, but do not provide all options
rubric.optionsSelected({vocabulary: 'Good'});
rubric.criterionFeedback({
feedback_only: 'This is some feedback.'
});
expect(rubric.canSubmit()).toBe(false);
// Provide all options AND required feedback
rubric.optionsSelected({
vocabulary: 'Good',
grammar: 'Bad'
});
rubric.criterionFeedback({
feedback_only: 'This is some feedback.'
});
expect(rubric.canSubmit()).toBe(true);
});
});
......@@ -98,17 +98,40 @@ OpenAssessment.Rubric.prototype = {
**/
canSubmitCallback: function(callback) {
$(this.element).change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
var canSubmit = numChecked == numAvailable;
callback(canSubmit);
}
var rubric = this;
// Set the initial state
callback(rubric.canSubmit());
// Install a handler to update on change
$(this.element).on('change keyup drop paste',
function() { callback(rubric.canSubmit()); }
);
},
/**
Check whether the user has filled in all the required fields
to be able to submit an assessment.
Returns:
boolean
**/
canSubmit: function() {
var numChecked = $('input[type=radio]:checked', this.element).length;
var numAvailable = $('.field--radio.assessment__rubric__question.has--options', this.element).length;
var completedRequiredComments = true;
$('textarea[required]', this.element).each(function() {
var trimmedText = $.trim($(this).val());
if (trimmedText === "") {
completedRequiredComments = false;
}
});
return (numChecked == numAvailable && completedRequiredComments);
},
/**
Updates the rubric to display positive and negative messages on each
criterion. For each correction provided, the associated criterion will have
an appropriate message displayed.
......
......@@ -8,7 +8,7 @@
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion feedback='optional'>
<criterion feedback="optional">
<name>concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
......@@ -74,7 +74,7 @@
</explanation>
</option>
</criterion>
<criterion feedback='optional'>
<criterion feedback="optional">
<name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
......@@ -102,6 +102,10 @@
<explanation></explanation>
</option>
</criterion>
<criterion feedback="required">
<name>Feedback only</name>
<prompt>This criterion has only written feedback, no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment"
......
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>This is my answer.</answer>
<select criterion="vocabulary" option="good" />
</example>
<example>
<answer>тєѕт αηѕωєя</answer>
<select criterion="vocabulary" option="bad" />
</example>
</assessment>
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2"/>
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="student-training">
<example>
<answer>This is my answer.</answer>
<select criterion="vocabulary" option="good" />
</example>
<example>
<answer>тєѕт αηѕωєя</answer>
<select criterion="vocabulary" option="bad" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
</assessments>
</openassessment>
......@@ -40,28 +40,6 @@
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Good" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Good" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -9,7 +9,15 @@
<criterion>
<name>Test criterion</name>
<prompt>Test criterion prompt</prompt>
<!-- no options -->
<!-- duplicate option names -->
<option points="0">
<name>DUPLICATE</name>
<explanation></explanation>
</option>
<option points="1">
<name>DUPLICATE</name>
<explanation></explanation>
</option>
</criterion>
</rubric>
</openassessment>
......@@ -6,20 +6,6 @@
}
},
"zero_options": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": []
}
]
}
},
"negative_points": {
"rubric": {
"prompt": "Test Prompt",
......@@ -544,5 +530,49 @@
}
]
}
},
"zero_options_feedback_optional": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [],
"feedback": "optional"
}
]
}
},
"zero_options_feedback_disabled": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [],
"feedback": "disabled"
}
]
}
},
"zero_options_no_feedback": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": []
}
]
}
}
}
......@@ -706,6 +706,71 @@
]
},
"criterion_feedback_required": {
"title": "Foo",
"prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "required",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": null,
"due": null
}
],
"expected_xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion feedback=\"required\">",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<feedbackprompt>Test Feedback Prompt</feedbackprompt>",
"</rubric>",
"</openassessment>"
]
},
"student_training_no_examples": {
"title": "Foo",
"prompt": "Test prompt",
......
......@@ -478,28 +478,28 @@
"criterion_feedback_optional": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"<criterion feedback=\"optional\">",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"<name>second criterion</name>",
"<prompt>second criterion prompt</prompt>",
"<option points=\"1\"><name>maybe</name><explanation>maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"title": "foo",
"prompt": "test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
......@@ -507,35 +507,111 @@
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"name": "test criterion",
"prompt": "test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
"name": "no",
"explanation": "no explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
"name": "yes",
"explanation": "yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"name": "second criterion",
"prompt": "second criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
"name": "maybe",
"explanation": "maybe explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"criterion_feedback_required": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"<criterion feedback=\"required\">",
"<name>second criterion</name>",
"<prompt>second criterion prompt</prompt>",
"<option points=\"1\"><name>maybe</name><explanation>maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "foo",
"prompt": "test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "test criterion",
"prompt": "test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "no",
"explanation": "no explanation"
},
{
"order_num": 1,
"points": 2,
"name": "yes",
"explanation": "yes explanation"
}
]
},
{
"order_num": 1,
"name": "second criterion",
"prompt": "second criterion prompt",
"feedback": "required",
"options": [
{
"order_num": 0,
"points": 1,
"name": "maybe",
"explanation": "maybe explanation"
}
]
}
......
......@@ -596,5 +596,20 @@
]
},
"is_released": true
},
"zero_options": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [],
"feedback": "required"
}
]
}
}
}
......@@ -36,3 +36,19 @@ class AIAssessmentIntegrationTest(XBlockHandlerTestCase):
score = sub_api.get_score(xblock.get_student_item_dict())
self.assertIsNot(score, None)
self.assertEqual(score['submission_uuid'], xblock.submission_uuid)
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/feedback_only_criterion_ai.xml', user_id='Bob')
def test_feedback_only_criterion(self, xblock, mock_is_admin):
# Test that AI grading, which creates assessments asynchronously,
# updates the workflow so students can receive a score.
mock_is_admin.return_value = True
# Train classifiers for the problem and submit a response
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
self.request(xblock, 'submit', self.SUBMISSION, response_format='json')
# Render the grade page
resp = self.request(xblock, 'render_grade', json.dumps({}))
self.assertIn('example-based', resp.lower())
......@@ -5,22 +5,15 @@ Tests for grade handlers in Open Assessment XBlock.
import copy
import ddt
import json
import mock
from django.test.utils import override_settings
from submissions import api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.xblock.data_conversion import create_rubric_dict
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario
# Test dependency on Stub AI Algorithm configuration
from openassessment.assessment.test.test_ai import (
ALGORITHM_ID, AI_ALGORITHMS, train_classifiers
)
CLASSIFIER_SCORE_OVERRIDES = {
u"𝓒𝓸𝓷𝓬𝓲𝓼𝓮": {'score_override': 1},
u"Form": {'score_override': 2}
}
@ddt.ddt
class TestGrade(XBlockHandlerTestCase):
......@@ -52,12 +45,12 @@ class TestGrade(XBlockHandlerTestCase):
STEPS = ['peer', 'self']
AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock):
rubric = create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
train_classifiers(rubric, CLASSIFIER_SCORE_OVERRIDES)
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
......@@ -84,15 +77,12 @@ class TestGrade(XBlockHandlerTestCase):
self.assertIn('self', resp.lower())
self.assertIn('complete', resp.lower())
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario_self_only.xml', user_id='Greggs')
def test_render_grade_self_only(self, xblock):
rubric = create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
train_classifiers(rubric, CLASSIFIER_SCORE_OVERRIDES)
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, [], [], self.ASSESSMENTS[0],
waiting_for_peer=True, waiting_for_ai=True
waiting_for_peer=True
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
......@@ -116,11 +106,36 @@ class TestGrade(XBlockHandlerTestCase):
self.assertIn('self', resp.lower())
self.assertIn('complete', resp.lower())
@scenario('data/feedback_only_criterion_grade.xml', user_id='Greggs')
def test_render_grade_feedback_only_criterion(self, xblock):
# Add in per-criterion feedback for the feedback-only criterion
peer_assessments = copy.deepcopy(self.ASSESSMENTS)
for asmnt in peer_assessments:
asmnt['criterion_feedback'] = {
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u"Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ."
}
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, peer_assessments, self.ASSESSMENTS[0]
)
# Render the grade section
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
self.assertIn('your response', resp.lower())
# Verify that feedback from each scorer appears in the view
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario_ai_only.xml', user_id='Greggs')
def test_render_grade_ai_only(self, xblock):
rubric = create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
train_classifiers(rubric, CLASSIFIER_SCORE_OVERRIDES)
def test_render_grade_ai_only(self, xblock, mock_is_admin):
# Train classifiers using the fake AI algorithm
mock_is_admin.return_value = True
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, [], [], None, waiting_for_peer=True
......@@ -129,7 +144,7 @@ class TestGrade(XBlockHandlerTestCase):
# Verify that feedback from each scorer appears in the view
self.assertNotIn(u'єאςєɭɭєภՇ', resp.decode('utf-8'))
self.assertIn(u'Fair', resp.decode('utf-8'))
self.assertIn(u'Poor', resp.decode('utf-8'))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
......@@ -147,7 +162,6 @@ class TestGrade(XBlockHandlerTestCase):
self.assertNotIn('self', resp.lower())
self.assertNotIn('complete', resp.lower())
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/feedback_per_criterion.xml', user_id='Bernard')
def test_render_grade_feedback_per_criterion(self, xblock):
# Submit, assess, and render the grade view
......@@ -179,22 +193,27 @@ class TestGrade(XBlockHandlerTestCase):
self.assertIn(u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!', resp.decode('utf-8'))
self.assertIn(u'Peer 2: ƒαιя נσв', resp.decode('utf-8'))
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@ddt.file_data('data/waiting_scenarios.json')
@scenario('data/grade_scenario.xml', user_id='Omar')
@scenario('data/grade_waiting_scenario.xml', user_id='Omar')
def test_grade_waiting(self, xblock, data):
train_classifiers({'criteria': xblock.rubric_criteria}, CLASSIFIER_SCORE_OVERRIDES)
# If AI classifiers are not trained, then we should see a "waiting for AI" display
if not data["waiting_for_ai"]:
with mock.patch.object(
OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock
) as mock_is_admin:
mock_is_admin.return_value = True
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Waiting to be assessed by a peer
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0],
waiting_for_peer=data["waiting_for_peer"], waiting_for_ai=data["waiting_for_ai"]
waiting_for_peer=data["waiting_for_peer"]
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that we're on the waiting template
self.assertIn(data["expected_response"], resp.decode('utf-8').lower())
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_incomplete_scenario.xml', user_id='Bunk')
def test_grade_incomplete_missing_self(self, xblock):
# Graded peers, but haven't completed self assessment
......@@ -206,7 +225,6 @@ class TestGrade(XBlockHandlerTestCase):
# Verify that we're on the right template
self.assertIn(u'not completed', resp.decode('utf-8').lower())
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_incomplete_scenario.xml', user_id='Daniels')
def test_grade_incomplete_missing_peer(self, xblock):
# Have not yet completed peer assessment
......@@ -218,7 +236,6 @@ class TestGrade(XBlockHandlerTestCase):
# Verify that we're on the right template
self.assertIn(u'not completed', resp.decode('utf-8').lower())
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_submit_feedback(self, xblock):
# Create submissions and assessments
......@@ -242,7 +259,6 @@ class TestGrade(XBlockHandlerTestCase):
feedback['options'], [{'text': u'Option 1'}, {'text': u'Option 2'}]
)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario.xml', user_id='Bob')
def test_submit_feedback_no_options(self, xblock):
# Create submissions and assessments
......@@ -263,7 +279,6 @@ class TestGrade(XBlockHandlerTestCase):
self.assertIsNot(feedback, None)
self.assertItemsEqual(feedback['options'], [])
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario.xml', user_id='Bob')
def test_submit_feedback_invalid_options(self, xblock):
# Create submissions and assessments
......@@ -282,7 +297,7 @@ class TestGrade(XBlockHandlerTestCase):
def _create_submission_and_assessments(
self, xblock, submission_text, peers, peer_assessments, self_assessment,
waiting_for_peer=False, waiting_for_ai=False
waiting_for_peer=False,
):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
......@@ -296,7 +311,6 @@ class TestGrade(XBlockHandlerTestCase):
Kwargs:
waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.
waiting_for_ai (bool): If True, skip creation of ai assessment.
Returns:
None
......@@ -305,10 +319,6 @@ class TestGrade(XBlockHandlerTestCase):
# Create a submission from the user
student_item = xblock.get_student_item_dict()
student_id = student_item['student_id']
example_based_assessment = xblock.get_assessment_module('example-based-assessment')
if not waiting_for_ai and example_based_assessment:
train_classifiers({'criteria': xblock.rubric_criteria}, CLASSIFIER_SCORE_OVERRIDES)
example_based_assessment['algorithm_id'] = ALGORITHM_ID
submission = xblock.create_submission(student_item, submission_text)
# Create submissions and assessments from other users
......
......@@ -603,6 +603,26 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
for part in assessment['parts']:
self.assertEqual(part['feedback'], '')
@scenario('data/feedback_only_criterion_peer.xml', user_id='Bob')
def test_peer_assess_feedback_only_criterion(self, xblock):
# Submit a peer assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'criterion_feedback': {u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ'},
'overall_feedback': u''
}
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', assessment_dict)
# Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_submission_uuid_input_regression(self, xblock):
# Submit a peer assessment
......
......@@ -78,6 +78,32 @@ class TestSelfAssessment(XBlockHandlerTestCase):
}
mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs)
@scenario('data/feedback_only_criterion_self.xml', user_id='Bob')
def test_self_assess_feedback_only_criterion(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'overall_feedback': u''
}
resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
self.assertTrue(resp['success'])
assessment = self_api.get_assessment(submission["uuid"])
# Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
# The written feedback should default to an empty string
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock):
# Create a submission for the student
......
......@@ -13,20 +13,55 @@ from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class StudentTrainingAssessTest(XBlockHandlerTestCase):
class StudentTrainingTest(XBlockHandlerTestCase):
"""
Tests for student training assessment.
Base class for student training tests.
"""
SUBMISSION = {
'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.'
}
def assert_path_and_context(self, xblock, expected_path, expected_context):
"""
Render the student training step and verify that the expected template
and context were used. Also check that the template renders without error.
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Raises:
AssertionError
"""
path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path)
self.assertEqual(len(context), len(expected_context))
for key in expected_context.keys():
if key == 'training_due':
iso_date = context['training_due'].isoformat()
self.assertEqual(iso_date, expected_context[key])
else:
self.assertEqual(context[key], expected_context[key])
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0)
@ddt.ddt
class StudentTrainingAssessTest(StudentTrainingTest):
"""
Tests for student training assessment.
"""
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
def test_correct(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
self.assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Agree with the course author's assessment
# (as defined in the scenario XML)
......@@ -46,7 +81,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
@ddt.file_data('data/student_training_mixin.json')
def test_correct_with_error(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
self.assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Agree with the course author's assessment
# (as defined in the scenario XML)
......@@ -69,7 +104,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
@ddt.file_data('data/student_training_mixin.json')
def test_incorrect(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
self.assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Disagree with the course author's assessment
# (as defined in the scenario XML)
......@@ -91,7 +126,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
# Agree with the course author's assessment
# (as defined in the scenario XML)
......@@ -119,7 +154,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
expected_context["training_num_completed"] = 1
expected_context["training_num_current"] = 2
expected_context["training_essay"] = u"тєѕт αηѕωєя"
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
# Expect that we were correct
......@@ -127,7 +162,27 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
self.assertFalse(resp['corrections'])
expected_context = {}
expected_template = "openassessmentblock/student_training/student_training_complete.html"
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/feedback_only_criterion_student_training.xml', user_id='Bob')
def test_feedback_only_criterion(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self.request(xblock, 'render_student_training', json.dumps({}))
# Agree with the course author's assessment
# (as defined in the scenario XML)
# We do NOT pass in an option for the feedback-only criterion,
# because it doesn't have any options.
data = {
'options_selected': {
'vocabulary': 'good',
}
}
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were correct
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['corrections'])
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
......@@ -135,7 +190,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps({}), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg'))
......@@ -151,7 +206,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
selected_data = {
'options_selected': {
......@@ -174,36 +229,8 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
resp = self.request(xblock, 'training_assess', json.dumps(selected_data))
self.assertIn("Your scores could not be checked", resp.decode('utf-8'))
def _assert_path_and_context(self, xblock, expected_path, expected_context):
"""
Render the student training step and verify that the expected template
and context were used. Also check that the template renders without error.
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Raises:
AssertionError
"""
path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path)
self.assertEqual(len(context), len(expected_context))
for key in expected_context.keys():
if key == 'training_due':
iso_date = context['training_due'].isoformat()
self.assertEqual(iso_date, expected_context[key])
else:
self.assertEqual(context[key], expected_context[key])
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0)
class StudentTrainingRenderTest(StudentTrainingAssessTest):
class StudentTrainingRenderTest(StudentTrainingTest):
"""
Tests for student training step rendering.
"""
......@@ -230,7 +257,7 @@ class StudentTrainingRenderTest(StudentTrainingAssessTest):
expected_context = {
'training_due': "2000-01-01T00:00:00+00:00"
}
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato")
@patch.object(StudentTrainingWorkflow, "get_workflow")
......@@ -247,4 +274,4 @@ class StudentTrainingRenderTest(StudentTrainingAssessTest):
expected_context = {
'training_start': datetime.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
}
self._assert_path_and_context(xblock, expected_template, expected_context)
self.assert_path_and_context(xblock, expected_template, expected_context)
......@@ -102,7 +102,7 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
self.assertIn("for this assignment", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@data(('data/invalid_rubric.xml', 'duplicate'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid(self, xblock, data):
xml_path = data[0]
......
......@@ -106,7 +106,7 @@ def validate_assessments(assessments, current_assessments, is_released):
if len(assessments) == 0:
return (False, _("This problem must include at least one assessment."))
# Right now, there are two allowed scenarios: (peer -> self) and (self)
# Ensure that we support this sequence of assessments.
if not _is_valid_assessment_sequence(assessments):
msg = _(
"For this assignment, you can set a peer assessment only, a self "
......@@ -131,7 +131,7 @@ def validate_assessments(assessments, current_assessments, is_released):
if must_grade < must_be_graded_by:
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
# Example-based assessment MUST specify 'ease' as the algorithm ID,
# Example-based assessment MUST specify 'ease' or 'fake' as the algorithm ID,
# at least for now. Later, we may make this more flexible.
if assessment_dict.get('name') == 'example-based-assessment':
if assessment_dict.get('algorithm_id') not in ['ease', 'fake']:
......@@ -177,8 +177,8 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
)
return (False, msg)
# No duplicate option names within a criterion
for criterion in rubric_dict['criteria']:
# No duplicate option names within a criterion
duplicates = _duplicates([option['name'] for option in criterion['options']])
if len(duplicates) > 0:
msg = _(u"Options in '{criterion}' have duplicate name(s): {duplicates}").format(
......@@ -186,6 +186,12 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
)
return (False, msg)
# Some criteria may have no options, just written feedback.
# In this case, written feedback must be required (not optional or disabled).
if len(criterion['options']) == 0 and criterion.get('feedback', 'disabled') != 'required':
msg = _(u'Criteria with no options must require written feedback.')
return (False, msg)
# Example-based assessments impose the additional restriction
# that the point values for options must be unique within
# a particular rubric criterion.
......
......@@ -113,10 +113,10 @@ def _serialize_criteria(criteria_root, criteria_list):
criterion_prompt = etree.SubElement(criterion_el, 'prompt')
criterion_prompt.text = unicode(criterion.get('prompt', u''))
# Criterion feedback disabled or optional
# Criterion feedback disabled, optional, or required
# If disabled, do not set the attribute.
if criterion.get('feedback') == "optional":
criterion_el.set('feedback', 'optional')
if criterion.get('feedback') in ["optional", "required"]:
criterion_el.set('feedback', criterion['feedback'])
# Criterion options
options_list = criterion.get('options', None)
......@@ -266,12 +266,12 @@ def _parse_criteria_xml(criteria_root):
else:
raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criterion feedback (disabled or optional)
# Criterion feedback (disabled, optional, or required)
criterion_feedback = criterion.get('feedback', 'disabled')
if criterion_feedback in ['optional', 'disabled']:
if criterion_feedback in ['optional', 'disabled', 'required']:
criterion_dict['feedback'] = criterion_feedback
else:
raise UpdateFromXmlError(_('Invalid value for "feedback" attribute: if specified, it must be set set to "optional"'))
raise UpdateFromXmlError(_('Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".'))
# Criterion options
criterion_dict['options'] = _parse_options_xml(criterion)
......
......@@ -18,6 +18,7 @@ django-celery==3.0.17
django-extensions==1.2.5
django-model-utils==1.4.0
djangorestframework==2.3.5
lazy==1.1
loremipsum==1.0.2
python-dateutil==2.1
pytz==2012h
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment