Commit f94334ce by Will Daly

Merge pull request #323 from edx/will/student-training-def

Student Training (API and data model)
parents 40f5c887 717dc97f
"""
Public interface for student training:
* Staff can create assessments for example responses.
* Students assess an example response, then compare the scores
they gave to to the instructor's assessment.
"""
import logging
from django.db import DatabaseError
from django.utils.translation import ugettext as _
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.assessment.serializers import (
deserialize_training_examples, serialize_training_example,
validate_training_example_format,
InvalidTrainingExample, InvalidRubric
)
from openassessment.assessment.errors import (
StudentTrainingRequestError, StudentTrainingInternalError
)
logger = logging.getLogger(__name__)
def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W0613
"""
Check whether the student has correctly assessed
all the training example responses.
Args:
submission_uuid (str): The UUID of the student's submission.
requirements (dict): Not used.
Returns:
bool
"""
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
except StudentTrainingWorkflow.DoesNotExist:
return False
else:
return workflow.is_complete
def assessment_is_finished(submission_uuid, requirements): # pylint:disable=W0613
"""
Since the student is not being assessed by others,
this always returns true.
"""
return True
def get_score(submission_uuid, requirements): # pylint:disable=W0613
"""
Training is either complete or incomplete; there is no score.
"""
return None
def validate_training_examples(rubric, examples):
"""
Validate that the training examples match the rubric.
Args:
rubric (dict): Serialized rubric model.
examples (list): List of serialized training examples.
Returns:
list of errors (unicode)
Raises:
StudentTrainingRequestError
StudentTrainingInternalError
Example usage:
>>> options = [
>>> {
>>> "order_num": 0,
>>> "name": "poor",
>>> "explanation": "Poor job!",
>>> "points": 0,
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "good",
>>> "explanation": "Good job!",
>>> "points": 1,
>>> },
>>> {
>>> "order_num": 2,
>>> "name": "excellent",
>>> "explanation": "Excellent job!",
>>> "points": 2,
>>> },
>>> ]
>>>
>>> rubric = {
>>> "prompt": "Write an essay!",
>>> "criteria": [
>>> {
>>> "order_num": 0,
>>> "name": "vocabulary",
>>> "prompt": "How varied is the vocabulary?",
>>> "options": options
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "grammar",
>>> "prompt": "How correct is the grammar?",
>>> "options": options
>>> }
>>> ]
>>> }
>>>
>>> examples = [
>>> {
>>> 'answer': u'Lorem ipsum',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> },
>>> {
>>> 'answer': u'Doler',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> errors = validate_training_examples(rubric, examples)
"""
errors = []
# Construct a list of valid options for each criterion
try:
criteria_options = {
unicode(criterion['name']): [
unicode(option['name'])
for option in criterion['options']
]
for criterion in rubric['criteria']
}
except (ValueError, KeyError):
msg = _(u"Could not parse serialized rubric")
return [msg]
# Check each example
for order_num, example_dict in enumerate(examples, start=1):
# Check the structure of the example dict
is_format_valid, format_errors = validate_training_example_format(example_dict)
if not is_format_valid:
format_errors = [
_(u"Example {} has a validation error: {}").format(order_num, error)
for error in format_errors
]
errors.extend(format_errors)
else:
# Check each selected option in the example (one per criterion)
options_selected = example_dict['options_selected']
for criterion_name, option_name in options_selected.iteritems():
if criterion_name in criteria_options:
valid_options = criteria_options[criterion_name]
if option_name not in valid_options:
msg = u"Example {} has an invalid option for \"{}\": \"{}\"".format(
order_num, criterion_name, option_name
)
errors.append(msg)
else:
msg = _(u"Example {} has an extra option for \"{}\"").format(
order_num, criterion_name
)
errors.append(msg)
# Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()):
msg = _(u"Example {} is missing an option for \"{}\"").format(
order_num, missing_criterion
)
errors.append(msg)
return errors
def create_training_workflow(submission_uuid, rubric, examples):
"""
Start the training workflow.
Args:
submission_uuid (str): The UUID of the student's submission.
rubric (dict): Serialized rubric model.
examples (list): The serialized training examples the student will need to assess.
Returns:
None
Raises:
StudentTrainingRequestError
StudentTrainingInternalError
Example usage:
>>> options = [
>>> {
>>> "order_num": 0,
>>> "name": "poor",
>>> "explanation": "Poor job!",
>>> "points": 0,
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "good",
>>> "explanation": "Good job!",
>>> "points": 1,
>>> },
>>> {
>>> "order_num": 2,
>>> "name": "excellent",
>>> "explanation": "Excellent job!",
>>> "points": 2,
>>> },
>>> ]
>>>
>>> rubric = {
>>> "prompt": "Write an essay!",
>>> "criteria": [
>>> {
>>> "order_num": 0,
>>> "name": "vocabulary",
>>> "prompt": "How varied is the vocabulary?",
>>> "options": options
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "grammar",
>>> "prompt": "How correct is the grammar?",
>>> "options": options
>>> }
>>> ]
>>> }
>>>
>>> examples = [
>>> {
>>> 'answer': u'Lorem ipsum',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> },
>>> {
>>> 'answer': u'Doler',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> create_training_workflow("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples)
"""
try:
# Check that examples were provided
if len(examples) == 0:
msg = (
u"No examples provided for student training workflow "
u"(attempted to create workflow for student with submission UUID {})"
).format(submission_uuid)
raise StudentTrainingRequestError(msg)
# Ensure that a workflow doesn't already exist for this submission
already_exists = StudentTrainingWorkflow.objects.filter(
submission_uuid=submission_uuid
).exists()
if already_exists:
msg = (
u"Student training workflow already exists for the student "
u"associated with submission UUID {}"
).format(submission_uuid)
raise StudentTrainingRequestError(msg)
# Create the training examples
try:
examples = deserialize_training_examples(examples, rubric)
except (InvalidRubric, InvalidTrainingExample) as ex:
logger.exception(
"Could not deserialize training examples for submission UUID {}".format(submission_uuid)
)
raise StudentTrainingRequestError(ex.message)
# Create the workflow
try:
StudentTrainingWorkflow.create_workflow(submission_uuid, examples)
except sub_api.SubmissionNotFoundError as ex:
raise StudentTrainingRequestError(ex.message)
except DatabaseError:
msg = (
u"Could not create student training workflow "
u"with submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def get_workflow_status(submission_uuid):
"""
Get the student's position in the training workflow.
Args:
submission_uuid (str): The UUID of the student's submission.
Returns:
dict: Serialized TrainingStatus
Raises:
StudentTrainingRequestError
StudentTrainingInternalError
Example usage:
>>> get_workflow_status("5443ebbbe2297b30f503736e26be84f6c7303c57")
{
'num_items_completed': 1,
'num_items_available': 3
}
"""
try:
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
except StudentTrainingWorkflow.DoesNotExist:
msg = u"Student training workflow does not exist for submission UUID {}".format(submission_uuid)
raise StudentTrainingRequestError(msg)
num_completed, num_total = workflow.status
return {
"num_completed": num_completed,
"num_total": num_total
}
except DatabaseError:
msg = (
u"An unexpected error occurred while "
u"retrieving the student training workflow status for submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def get_training_example(submission_uuid):
"""
Retrieve a training example for the student to assess.
Args:
submission_uuid (str): The UUID of the student's submission.
Returns:
dict: The training example with keys "answer", "rubric", and "options_selected".
If no training examples are available (the student has already assessed every example,
or no examples are defined), returns None.
Raises:
StudentTrainingInternalError
Example usage:
>>> examples = [
>>> {
>>> 'answer': u'Doler',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57")
{
'answer': u'Lorem ipsum',
'rubric': {
"prompt": "Write an essay!",
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How varied is the vocabulary?",
"options": options
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How correct is the grammar?",
"options": options
}
],
},
'options_selected': {
'vocabulary': 'good',
'grammar': 'excellent'
}
}
"""
# Find a workflow for the student
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
# Find the next incomplete item in the workflow
item = workflow.next_incomplete_item
if item is None:
return None
else:
return serialize_training_example(item.training_example)
except StudentTrainingWorkflow.DoesNotExist:
msg = (
u"No student training workflow exists for the student "
u"associated with submission UUID {}"
).format(submission_uuid)
raise StudentTrainingRequestError(msg)
except DatabaseError:
msg = (
u"Could not retrieve next item in"
u" student training workflow with submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def assess_training_example(submission_uuid, options_selected, update_workflow=True):
"""
Assess a training example and update the workflow.
Args:
submission_uuid (str): The UUID of the student's submission.
options_selected (dict): The options the student selected.
Kwargs:
update_workflow (bool): If true, mark the current item complete
if the student has assessed the example correctly.
Returns:
corrections (dict): Dictionary containing the correct
options for criteria the student scored incorrectly.
Raises:
StudentTrainingRequestError
StudentTrainingInternalError
Example usage:
>>> options_selected = {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> assess_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", options_selected)
{'grammar': 'poor'}
"""
# Find a workflow for the student
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
# Find the next incomplete item in the workflow
item = workflow.next_incomplete_item
if item is None:
msg = (
u"No items are available in the student training workflow associated with "
u"submission UUID {}"
).format(submission_uuid)
raise StudentTrainingRequestError(msg)
# Check the student's scores against the staff's scores.
corrections = item.check(options_selected)
# Mark the item as complete if the student's selection
# matches the instructor's selection
if update_workflow and len(corrections) == 0:
item.mark_complete()
return corrections
except StudentTrainingWorkflow.DoesNotExist:
msg = u"Could not find student training workflow for submission UUID {}".format(submission_uuid)
raise StudentTrainingRequestError(msg)
except DatabaseError:
msg = (
u"An error occurred while comparing the student's assessment "
u"to the training example. The submission UUID for the student is {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
......@@ -6,3 +6,4 @@ Export errors from all modules defined in this package.
from .peer import *
from .self import *
from .student_training import *
"""
Errors for training assessment type.
"""
class StudentTrainingError(Exception):
"""
Error occurred in a training API call.
"""
pass
class StudentTrainingRequestError(StudentTrainingError):
"""
There was a problem with a request made to the training API.
"""
pass
class StudentTrainingInternalError(StudentTrainingError):
"""
An internal error occurred while processing a request to the training API.
"""
pass
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AssessmentPart.feedback'
db.alter_column('assessment_assessmentpart', 'feedback', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'AssessmentPart.feedback'
db.alter_column('assessment_assessmentpart', 'feedback', self.gf('django.db.models.fields.TextField')(max_length=10000))
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StudentTrainingWorkflowItem'
db.create_table('assessment_studenttrainingworkflowitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('workflow', self.gf('django.db.models.fields.related.ForeignKey')(related_name='items', to=orm['assessment.StudentTrainingWorkflow'])),
('order_num', self.gf('django.db.models.fields.PositiveIntegerField')()),
('started_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('training_example', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['assessment.TrainingExample'])),
))
db.send_create_signal('assessment', ['StudentTrainingWorkflowItem'])
# Adding model 'StudentTrainingWorkflow'
db.create_table('assessment_studenttrainingworkflow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('submission_uuid', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('student_id', self.gf('django.db.models.fields.CharField')(max_length=40, db_index=True)),
('item_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=40, db_index=True)),
))
db.send_create_signal('assessment', ['StudentTrainingWorkflow'])
# Adding model 'TrainingExample'
db.create_table('assessment_trainingexample', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('raw_answer', self.gf('django.db.models.fields.TextField')(blank=True)),
('rubric', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['assessment.Rubric'])),
('content_hash', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40, db_index=True)),
))
db.send_create_signal('assessment', ['TrainingExample'])
# Adding M2M table for field options_selected on 'TrainingExample'
db.create_table('assessment_trainingexample_options_selected', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('trainingexample', models.ForeignKey(orm['assessment.trainingexample'], null=False)),
('criterionoption', models.ForeignKey(orm['assessment.criterionoption'], null=False))
))
db.create_unique('assessment_trainingexample_options_selected', ['trainingexample_id', 'criterionoption_id'])
def backwards(self, orm):
# Deleting model 'StudentTrainingWorkflowItem'
db.delete_table('assessment_studenttrainingworkflowitem')
# Deleting model 'StudentTrainingWorkflow'
db.delete_table('assessment_studenttrainingworkflow')
# Deleting model 'TrainingExample'
db.delete_table('assessment_trainingexample')
# Removing M2M table for field options_selected on 'TrainingExample'
db.delete_table('assessment_trainingexample_options_selected')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
\ No newline at end of file
......@@ -5,3 +5,5 @@ Export models from each Python module in this package.
from .base import *
from .peer import *
from .training import *
from .student_training import *
"""
Django models specific to the student training assessment type.
"""
from django.db import models, transaction
from django.utils import timezone
from submissions import api as sub_api
from .training import TrainingExample
class StudentTrainingWorkflow(models.Model):
"""
Tracks a student's progress through the student training assessment step.
"""
# The submission UUID of the student being trained
submission_uuid = models.CharField(max_length=128, db_index=True)
# Information about the student and problem
# This duplicates information associated with the submission itself,
# but we include it here to make it easier to query workflows.
# Since submissions are immutable, we can do this without
# jeopardizing data integrity.
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=40, db_index=True)
class Meta:
app_label = "assessment"
@classmethod
@transaction.commit_on_success
def create_workflow(cls, submission_uuid, examples):
"""
Create a student training workflow.
Args:
submission_uuid (str): The UUID of the submission from the student being trained.
examples (list of TrainingExamples): The training examples to show the student.
Returns:
StudentTrainingWorkflow
Raises:
SubmissionError: There was an error retrieving the submission.
"""
# Retrieve the student item info
submission = sub_api.get_submission_and_student(submission_uuid)
student_item = submission['student_item']
# Create the workflow
workflow = cls.objects.create(
submission_uuid=submission_uuid,
student_id=student_item['student_id'],
item_id=student_item['item_id'],
course_id=student_item['course_id']
)
# Create workflow items for each example
for order_num, example in enumerate(examples):
StudentTrainingWorkflowItem.objects.create(
workflow=workflow,
order_num=order_num,
training_example=example,
)
return workflow
@property
def status(self):
"""
The student's status within the workflow (num steps completed / num steps available).
Returns:
tuple of `(num_completed, num_total)`, both integers
"""
items = self.items.all() # pylint:disable=E1101
num_complete = sum([1 if item.is_complete else 0 for item in items])
num_total = len(items)
return num_complete, num_total
@property
def is_complete(self):
"""
Check whether all items in the workflow are complete.
Returns:
bool
"""
num_incomplete = self.items.filter(completed_at__isnull=True).count() # pylint:disable=E1101
return num_incomplete == 0
@property
def next_incomplete_item(self):
"""
Find the next incomplete item in the workflow.
Returns:
StudentTrainingWorkflowItem or None
"""
next_incomplete = self.items.filter( # pylint:disable=E1101
completed_at__isnull=True
).order_by('order_num')[:1]
if len(next_incomplete) > 0:
return next_incomplete[0]
else:
return None
class StudentTrainingWorkflowItem(models.Model):
"""
A particular step in the training workflow. At each step,
a student must try assessing an example submission.
If the student gives the same scores as the instructor,
then the student proceeds to the next example;
if there are no examples left, the student has
successfully completed training.
"""
workflow = models.ForeignKey(StudentTrainingWorkflow, related_name="items")
order_num = models.PositiveIntegerField()
started_at = models.DateTimeField(auto_now_add=True)
completed_at = models.DateTimeField(default=None, null=True)
training_example = models.ForeignKey(TrainingExample)
class Meta:
app_label = "assessment"
ordering = ["workflow", "order_num"]
@property
def is_complete(self):
"""
Check whether the student has completed this workflow item.
Returns:
bool
"""
return self.completed_at is not None
def mark_complete(self):
"""
Mark the item as complete. Once an item is marked complete,
it should stay complete!
Returns:
None
"""
self.completed_at = timezone.now()
self.save()
def check(self, options_selected):
"""
Compare the options that the student selected to
the options set by the instructor in the training example.
Args:
options_selected (dict): Mapping of criterion names to option names.
Returns:
dict
Example usage:
>>> item.check({'vocabulary': 'good', 'grammar': 'poor'})
{'vocabulary': 'excellent'}
>>> item.check({'vocabulary': 'excellent', 'grammar': 'poor'})
{}
"""
staff_selected = self.training_example.options_selected_dict
corrections = dict()
for criterion_name, option_name in staff_selected.iteritems():
missing_option = criterion_name not in options_selected
incorrect_option = options_selected[criterion_name] != option_name
if missing_option or incorrect_option:
corrections[criterion_name] = option_name
return corrections
"""
Django models for training (both student and AI).
"""
import json
from hashlib import sha1
from django.db import models
from .base import Rubric, CriterionOption
class TrainingExample(models.Model):
"""
An example assessment used to train students (before peer assessment) or AI.
"""
# The answer (JSON-serialized)
raw_answer = models.TextField(blank=True)
rubric = models.ForeignKey(Rubric)
# Use a m2m to avoid changing the criterion option
options_selected = models.ManyToManyField(CriterionOption)
# SHA1 hash
content_hash = models.CharField(max_length=40, unique=True, db_index=True)
class Meta:
app_label = "assessment"
@classmethod
def create_example(cls, answer, options_ids, rubric):
"""
Create a new training example.
Args:
answer (JSON-serializable): The answer associated with the training example.
option_ids (iterable of int): Selected option IDs for the training example.
rubric (Rubric): The rubric associated with the training example.
Returns:
TrainingExample
"""
content_hash = cls.calculate_hash(answer, options_ids, rubric)
example = TrainingExample.objects.create(
content_hash=content_hash,
raw_answer=json.dumps(answer),
rubric=rubric
)
for option in CriterionOption.objects.filter(pk__in=list(options_ids)):
example.options_selected.add(option)
return example
@property
def answer(self):
"""
Return the JSON-decoded answer.
Returns:
JSON-serializable
"""
return json.loads(self.raw_answer)
@property
def options_selected_dict(self):
"""
Return a dictionary of the rubric options selected.
Returns:
dict: maps criterion names to selected option names
"""
return {
option.criterion.name: option.name
for option in self.options_selected.all() # pylint:disable=E1101
}
@staticmethod
def calculate_hash(answer, option_ids, rubric):
"""
Calculate a hash for the contents of training example.
Args:
answer (JSON-serializable): The answer associated with the training example.
option_ids (iterable of int): Selected option IDs for the training example.
rubric (Rubric): The rubric associated with the training example.
Returns:
str
"""
contents = json.dumps({
'answer': answer,
'option_ids': list(option_ids),
'rubric': rubric.id
})
return sha1(contents).hexdigest()
class Meta:
app_label = "assessment"
......@@ -6,3 +6,4 @@ Export serializers from each module in this package.
from .base import *
from .peer import *
from .training import *
"""
Serializers for the training assessment type.
"""
import json
from django.db import transaction, IntegrityError
from openassessment.assessment.models import TrainingExample
from .base import rubric_from_dict, RubricSerializer
class InvalidTrainingExample(Exception):
"""
The training example could not be deserialized.
"""
pass
def validate_training_example_format(example):
"""
Check whether the serialized training example dict
has the correct structure.
Args:
example (dict): The serialized training example.
Returns:
tuple of (is_valid, errors), where `is_valid` is a bool
and `errors` is a list of error messages.
"""
errors = []
if not isinstance(example, dict):
errors.append(u"Training example must be a dictionary")
if 'answer' not in example:
errors.append(u'Training example must contain an "answer" field.')
if 'options_selected' not in example:
errors.append(u'Training example must contain an "options_selected" field.')
is_valid = (len(errors) == 0)
return is_valid, errors
def serialize_training_example(example):
"""
Serialize a training example to a dictionary.
Args:
example (TrainingExample): The training example to serialize.
Returns:
dict
"""
return {
'answer': example.answer,
'options_selected': example.options_selected_dict,
'rubric': RubricSerializer.serialized_from_cache(example.rubric),
}
@transaction.commit_on_success
def deserialize_training_examples(examples, rubric_dict):
"""
Deserialize training examples to Django models.
Args:
examples (list of dict): The serialized training examples.
rubric_dict (dict): The serialized rubric.
Returns:
list of TrainingExamples
Raises:
InvalidRubric
InvalidTrainingExample
Example usage:
>>> options = [
>>> {
>>> "order_num": 0,
>>> "name": "poor",
>>> "explanation": "Poor job!",
>>> "points": 0,
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "good",
>>> "explanation": "Good job!",
>>> "points": 1,
>>> },
>>> {
>>> "order_num": 2,
>>> "name": "excellent",
>>> "explanation": "Excellent job!",
>>> "points": 2,
>>> },
>>> ]
>>>
>>> rubric = {
>>> "prompt": "Write an essay!",
>>> "criteria": [
>>> {
>>> "order_num": 0,
>>> "name": "vocabulary",
>>> "prompt": "How varied is the vocabulary?",
>>> "options": options
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "grammar",
>>> "prompt": "How correct is the grammar?",
>>> "options": options
>>> }
>>> ]
>>> }
>>>
>>> examples = [
>>> {
>>> 'answer': u'Lorem ipsum',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> },
>>> {
>>> 'answer': u'Doler',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> examples = deserialize_training_examples(examples, rubric)
"""
# Parse the rubric
# This will raise an exception if the serialized rubric is invalid.
rubric = rubric_from_dict(rubric_dict)
# Parse each example
created_examples = []
for example_dict in examples:
is_valid, errors = validate_training_example_format(example_dict)
if not is_valid:
raise InvalidTrainingExample("; ".join(errors))
options_ids = rubric.options_ids(example_dict['options_selected'])
# Calculate the content hash to look up the example
content_hash = TrainingExample.calculate_hash(example_dict['answer'], options_ids, rubric)
try:
example = TrainingExample.objects.get(content_hash=content_hash)
except TrainingExample.DoesNotExist:
try:
example = TrainingExample.create_example(
example_dict['answer'], options_ids, rubric
)
except IntegrityError:
example = TrainingExample.objects.get(content_hash=content_hash)
created_examples.append(example)
return created_examples
{
"valid": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "gɼค๓๓คɼ",
"prompt": "𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"point": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": "Sed lobortis, dolor quis rhoncus malesuada, nisi lorem hendrerit lorem, id dapibus risus dolor eu augue.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓",
"gɼค๓๓คɼ": "𝓰𝓸𝓸𝓭"
}
},
{
"answer": "Mauris vehicula euismod porttitor. Nunc fringilla sem nec orci tincidunt consectetur",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝓰𝓸𝓸𝓭"
}
},
{
"answer": "Phasellus ornare, risus sed mollis venenatis, nibh neque placerat orci, nec pellentesque risus lorem pulvinar justo.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
}
],
"errors": []
},
"missing_options": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "gɼค๓๓คɼ",
"prompt": "𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
}
]
},
"examples": [
{
"answer": "Nam elementum posuere nibh. Aliquam rhoncus diam tellus, quis luctus justo rhoncus at",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": "Curabitur diam elit, dictum in nunc sit amet, rhoncus suscipit felis.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": "Ut nec dui ac erat molestie cursus vitae in risus",
"options_selected": {}
}
],
"errors": [
"Example 2 is missing an option for \"gɼค๓๓คɼ\"",
"Example 3 is missing an option for \"vøȼȺƀᵾłȺɍɏ\"",
"Example 3 is missing an option for \"gɼค๓๓คɼ\""
]
},
"extra_options": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "gɼค๓๓คɼ",
"prompt": "𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
}
]
},
"examples": [
{
"answer": "In diam ante, ultricies in molestie quis, feugiat eget orci. Integer gravida, arcu lacinia convallis congue, ipsum eros interdum turpis, ut mollis libero purus vitae ante.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": "Duis at varius lorem, nec accumsan justo.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓",
"ëẍẗṛä": "ëẍẗṛä"
}
},
{
"answer": "Aenean sodales sapien eget lorem egestas, sed pulvinar elit fermentum",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓",
"ëẍẗṛä": "ëẍẗṛä",
"ëẍẗṛä 𝕖𝕩𝕥𝕣𝕒": "ëẍẗṛä ëẍẗṛä"
}
}
],
"errors": [
"Example 2 has an extra option for \"ëẍẗṛä\"",
"Example 3 has an extra option for \"ëẍẗṛä\"",
"Example 3 has an extra option for \"ëẍẗṛä 𝕖𝕩𝕥𝕣𝕒\""
]
},
"missing_and_extra_options": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "gɼค๓๓คɼ",
"prompt": "𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
}
]
},
"examples": [
{
"answer": " Proin eu lectus ut risus semper porttitor.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": "Donec sem nunc, volutpat nec tristique semper, sodales eu enim.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓",
"ëẍẗṛä": "ëẍẗṛä"
}
}
],
"errors": [
"Example 2 is missing an option for \"gɼค๓๓คɼ\"",
"Example 2 has an extra option for \"ëẍẗṛä\""
]
},
"invalid_option_name": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "gɼค๓๓คɼ",
"prompt": "𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
}
]
},
"examples": [
{
"answer": "Cras blandit justo eget nunc viverra, ut dignissim nibh lobortis",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "ïṅṿäḷïḋ",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": " Morbi dignissim enim et interdum congue.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝒑𝒐𝒐𝒓",
"gɼค๓๓คɼ": "ïṅṿäḷïḋ"
}
},
{
"answer": "Nunc consectetur nulla id fermentum vestibulum",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "ïṅṿäḷïḋ",
"gɼค๓๓คɼ": "שєгץ เภשคɭเ๔"
}
},
{
"answer": "Nulla vitae ante nec sapien consequat vehicula eu scelerisque est.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "ïṅṿäḷïḋ"
}
}
],
"errors": [
"Example 1 has an invalid option for \"vøȼȺƀᵾłȺɍɏ\": \"ïṅṿäḷïḋ\"",
"Example 2 has an invalid option for \"gɼค๓๓คɼ\": \"ïṅṿäḷïḋ\"",
"Example 3 has an invalid option for \"vøȼȺƀᵾłȺɍɏ\": \"ïṅṿäḷïḋ\"",
"Example 3 has an invalid option for \"gɼค๓๓คɼ\": \"שєгץ เภשคɭเ๔\"",
"Example 4 is missing an option for \"gɼค๓๓คɼ\"",
"Example 4 has an invalid option for \"vøȼȺƀᵾłȺɍɏ\": \"ïṅṿäḷïḋ\""
]
},
"rubric_missing_options_list": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?"
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
}
],
"errors": ["Could not parse serialized rubric"]
},
"rubric_missing_criteria_list": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
}
],
"errors": ["Could not parse serialized rubric"]
},
"example_missing_keys": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "gɼค๓๓คɼ",
"prompt": "𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"point": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
}
]
},
"examples": [
{
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"gɼค๓๓คɼ": "𝒑𝒐𝒐𝒓"
}
},
{
"answer": "Sed lobortis, dolor quis rhoncus malesuada, nisi lorem hendrerit lorem, id dapibus risus dolor eu augue."
},
{
}
],
"errors": [
"Example 1 has a validation error: Training example must contain an \"answer\" field.",
"Example 2 has a validation error: Training example must contain an \"options_selected\" field.",
"Example 3 has a validation error: Training example must contain an \"answer\" field.",
"Example 3 has a validation error: Training example must contain an \"options_selected\" field."
]
}
}
......@@ -138,7 +138,7 @@ class TestSelfApi(CacheResetTest):
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_missing_critieron(self):
def test_create_assessment_missing_criterion(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
......
# -*- coding: utf-8 -*-
"""
Tests for training assessment type.
"""
import copy
from django.db import DatabaseError
import ddt
from mock import patch
from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import StudentTrainingRequestError, StudentTrainingInternalError
from openassessment.assessment.models import StudentTrainingWorkflow
@ddt.ddt
class StudentTrainingAssessmentTest(CacheResetTest):
"""
Tests for the training assessment type.
"""
longMessage = True
STUDENT_ITEM = {
'student_id': u'𝓽𝓮𝓼𝓽 𝓼𝓽𝓾𝓭𝓮𝓷𝓽',
'item_id': u'𝖙𝖊𝖘𝖙 𝖎𝖙𝖊𝖒',
'course_id': u'ՇєรՇ ς๏ยгรє',
'item_type': u'openassessment'
}
ANSWER = u'ẗëṡẗ äṅṡẅëṛ'
RUBRIC_OPTIONS = [
{
"order_num": 0,
"name": u"𝒑𝒐𝒐𝒓",
"explanation": u"𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0,
},
{
"order_num": 1,
"name": u"𝓰𝓸𝓸𝓭",
"explanation": u"ﻭѻѻɗ ﻝѻ๒!",
"points": 1,
},
{
"order_num": 2,
"name": "єχ¢єℓℓєηт",
"explanation": "乇メc乇レレ乇刀イ フo乃!",
"points": 2,
},
]
RUBRIC = {
'prompt': u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ",
'criteria': [
{
"order_num": 0,
"name": u"vøȼȺƀᵾłȺɍɏ",
"prompt": u"Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": RUBRIC_OPTIONS
},
{
"order_num": 1,
"name": u"ﻭɼค๓๓คɼ",
"prompt": u"𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": RUBRIC_OPTIONS
}
]
}
EXAMPLES = [
{
'answer': (
u"𝕿𝖍𝖊𝖗𝖊 𝖆𝖗𝖊 𝖈𝖊𝖗𝖙𝖆𝖎𝖓 𝖖𝖚𝖊𝖊𝖗 𝖙𝖎𝖒𝖊𝖘 𝖆𝖓𝖉 𝖔𝖈𝖈𝖆𝖘𝖎𝖔𝖓𝖘 𝖎𝖓 𝖙𝖍𝖎𝖘 𝖘𝖙𝖗𝖆𝖓𝖌𝖊 𝖒𝖎𝖝𝖊𝖉 𝖆𝖋𝖋𝖆𝖎𝖗 𝖜𝖊 𝖈𝖆𝖑𝖑 𝖑𝖎𝖋𝖊"
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
}
},
{
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
]
def setUp(self):
"""
Create a submission.
"""
submission = sub_api.create_submission(self.STUDENT_ITEM, self.ANSWER)
self.submission_uuid = submission['uuid']
def test_training_workflow(self):
# Start a workflow
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
# Initially, we should be on the first step
self._assert_workflow_status(self.submission_uuid, 0, 2)
# Get a training example
self._assert_get_example(self.submission_uuid, 0, self.EXAMPLES, self.RUBRIC)
# Assess the training example the same way the instructor did
corrections = training_api.assess_training_example(
self.submission_uuid,
self.EXAMPLES[0]['options_selected']
)
self.assertEqual(corrections, dict())
self._assert_workflow_status(self.submission_uuid, 1, 2)
# Get another training example to assess
self._assert_get_example(self.submission_uuid, 1, self.EXAMPLES, self.RUBRIC)
# Give the example different scores than the instructor gave
incorrect_assessment = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝓰𝓸𝓸𝓭",
}
corrections = training_api.assess_training_example(
self.submission_uuid, incorrect_assessment
)
# Expect that we get corrected and stay on the current example
self.assertItemsEqual(corrections, self.EXAMPLES[1]['options_selected'])
self._assert_workflow_status(self.submission_uuid, 1, 2)
# Try again, and this time assess the same way as the instructor
corrections = training_api.assess_training_example(
self.submission_uuid, self.EXAMPLES[1]['options_selected']
)
self.assertEqual(corrections, dict())
# Now we should have completed both assessments
self._assert_workflow_status(self.submission_uuid, 2, 2)
def test_assess_without_update(self):
# Start a workflow
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
# Assess the first training example the same way the instructor did
# but do NOT update the workflow
corrections = training_api.assess_training_example(
self.submission_uuid,
self.EXAMPLES[0]['options_selected'],
update_workflow=False
)
# Expect that we're still on the first step
self.assertEqual(corrections, dict())
self._assert_workflow_status(self.submission_uuid, 0, 2)
@ddt.file_data('data/validate_training_examples.json')
def test_validate_training_examples(self, data):
errors = training_api.validate_training_examples(
data['rubric'], data['examples']
)
msg = u"Expected errors {} but got {}".format(data['errors'], errors)
self.assertItemsEqual(errors, data['errors'], msg=msg)
def test_is_finished_no_workflow(self):
# Without creating a workflow, we should not be finished
self.assertFalse(training_api.submitter_is_finished(self.submission_uuid, dict()))
# But since we're not being assessed by others, the "assessment" should be finished.
self.assertTrue(training_api.assessment_is_finished(self.submission_uuid, dict()))
def test_get_training_example_none_available(self):
# Start a workflow and assess all training examples
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
self._assert_workflow_status(self.submission_uuid, 0, 2)
for example in self.EXAMPLES:
training_api.assess_training_example(self.submission_uuid, example['options_selected'])
# Now we should be complete
self._assert_workflow_status(self.submission_uuid, 2, 2)
# ... and if we try to get another example, we should get None
self.assertIs(
training_api.get_training_example(self.submission_uuid), None
)
def test_get_training_example_no_workflow(self):
# With no workflow defined, we should get an error
with self.assertRaises(StudentTrainingRequestError):
training_api.get_training_example(self.submission_uuid)
def test_create_training_workflow_already_started(self):
# Create a workflow for training
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
# Try to create a second workflow for the same submission,
# expecting an error.
with self.assertRaises(StudentTrainingRequestError):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
def test_create_training_workflow_no_examples(self):
# Try to create a training workflow with no examples
# and expect an error.
with self.assertRaises(StudentTrainingRequestError):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, [])
def test_create_training_workflow_no_submission(self):
# Try to create a training workflow with an invalid submission UUID
with self.assertRaises(StudentTrainingRequestError):
training_api.create_training_workflow("not a submission!", self.RUBRIC, self.EXAMPLES)
def test_assess_training_example_completed_workflow(self):
# Start a workflow and assess all training examples
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
self._assert_workflow_status(self.submission_uuid, 0, 2)
for example in self.EXAMPLES:
training_api.assess_training_example(self.submission_uuid, example['options_selected'])
# Try to assess again, and expect an error
with self.assertRaises(StudentTrainingRequestError):
training_api.assess_training_example(
self.submission_uuid, self.EXAMPLES[0]['options_selected']
)
def test_assess_training_example_no_workflow(self):
# With no workflow defined, we should get an error
with self.assertRaises(StudentTrainingRequestError):
training_api.assess_training_example(
self.submission_uuid, self.EXAMPLES[0]['options_selected']
)
def test_get_workflow_status_no_workflow(self):
# With no workflow defined, we should get an error
# when we try to request the status.
with self.assertRaises(StudentTrainingRequestError):
training_api.get_workflow_status(self.submission_uuid)
def test_create_workflow_invalid_rubric(self):
# Rubric is missing a very important key!
invalid_rubric = copy.deepcopy(self.RUBRIC)
del invalid_rubric['criteria']
with self.assertRaises(StudentTrainingRequestError):
training_api.create_training_workflow(self.submission_uuid, invalid_rubric, self.EXAMPLES)
def test_create_workflow_invalid_examples(self):
# Training example is not a dictionary!
with self.assertRaises(StudentTrainingRequestError):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, ["not a dict!"])
@patch.object(StudentTrainingWorkflow, 'create_workflow')
def test_create_workflow_database_error(self, mock_db):
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
@patch.object(StudentTrainingWorkflow.objects, 'get')
def test_get_workflow_status_database_error(self, mock_db):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.get_workflow_status(self.submission_uuid)
@patch.object(StudentTrainingWorkflow.objects, 'get')
def test_get_training_example_database_error(self, mock_db):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.get_training_example(self.submission_uuid)
@patch.object(StudentTrainingWorkflow.objects, 'get')
def test_assess_training_example_database_error(self, mock_db):
training_api.create_training_workflow(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.assess_training_example(self.submission_uuid, self.EXAMPLES[0]['options_selected'])
def _assert_workflow_status(self, submission_uuid, num_completed, num_total):
"""
Check that the training workflow is on the expected step.
Args:
submission_uuid (str): Submission UUID of the student being trained.
num_completed (int): The expected number of examples assessed correctly.
num_total (int): The expected number of available examples.
Returns:
None
Raises:
AssertionError
"""
# Check the workflow status (what step are we on?)
status = training_api.get_workflow_status(submission_uuid)
self.assertEqual(status['num_completed'], num_completed)
self.assertEqual(status['num_total'], num_total)
# Check whether the assessment step is completed
# (used by the workflow API)
is_finished = bool(num_completed == num_total)
self.assertEqual(
training_api.submitter_is_finished(submission_uuid, dict()),
is_finished
)
# Assessment is finished should always be true,
# since we're not being assessed by others.
self.assertTrue(
training_api.assessment_is_finished(submission_uuid, dict()),
)
# At no point should we receive a score!
self.assertIs(training_api.get_score(submission_uuid, dict()), None)
def _expected_example(self, input_example, rubric):
"""
Return the training example we would expect to retrieve for an example.
The retrieved example will include the rubric.
Args:
input_example (dict): The example dict we passed to the API.
rubric (dict): The rubric for the example.
Returns:
dict
"""
output_dict = copy.deepcopy(input_example)
output_dict['rubric'] = rubric
return output_dict
def _assert_get_example(self, submission_uuid, order_num, input_examples, input_rubric):
"""
Check the training example we get from the API.
Args:
submission_uuid (str): The submission UUID associated with the student being trained.
order_num (int): The order number of the example we expect to retrieve.
input_examples (list of dict): The examples we used to configure the training workflow.
input_rubric (dict): The rubric we used to configure the training workflow.
Returns:
None
Raises:
AssertionError
"""
example = training_api.get_training_example(submission_uuid)
expected_example = self._expected_example(input_examples[order_num], input_rubric)
self.assertItemsEqual(example, expected_example)
# -*- coding: utf-8 -*-
"""
Tests for training models and serializers (common to student and AI training).
"""
import copy
import mock
from django.db import IntegrityError
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import TrainingExample
from openassessment.assessment.serializers import deserialize_training_examples
class TrainingExampleSerializerTest(CacheResetTest):
"""
Tests for serialization and deserialization of TrainingExample.
These functions are pretty well-covered by API-level tests,
so we focus on edge cases.
"""
RUBRIC_OPTIONS = [
{
"order_num": 0,
"name": u"𝒑𝒐𝒐𝒓",
"explanation": u"𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0,
},
{
"order_num": 1,
"name": u"𝓰𝓸𝓸𝓭",
"explanation": u"ﻭѻѻɗ ﻝѻ๒!",
"points": 1,
},
{
"order_num": 2,
"name": "єχ¢єℓℓєηт",
"explanation": "乇メc乇レレ乇刀イ フo乃!",
"points": 2,
},
]
RUBRIC = {
'prompt': u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ",
'criteria': [
{
"order_num": 0,
"name": u"vøȼȺƀᵾłȺɍɏ",
"prompt": u"Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": RUBRIC_OPTIONS
},
{
"order_num": 1,
"name": u"ﻭɼค๓๓คɼ",
"prompt": u"𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": RUBRIC_OPTIONS
}
]
}
EXAMPLES = [
{
'answer': (
u"𝕿𝖍𝖊𝖗𝖊 𝖆𝖗𝖊 𝖈𝖊𝖗𝖙𝖆𝖎𝖓 𝖖𝖚𝖊𝖊𝖗 𝖙𝖎𝖒𝖊𝖘 𝖆𝖓𝖉 𝖔𝖈𝖈𝖆𝖘𝖎𝖔𝖓𝖘 𝖎𝖓 𝖙𝖍𝖎𝖘 𝖘𝖙𝖗𝖆𝖓𝖌𝖊 𝖒𝖎𝖝𝖊𝖉 𝖆𝖋𝖋𝖆𝖎𝖗 𝖜𝖊 𝖈𝖆𝖑𝖑 𝖑𝖎𝖋𝖊"
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
}
},
{
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
{
'answer': (
u"Consider the subtleness of the sea; how its most dreaded creatures glide under water, "
u"unapparent for the most part, and treacherously hidden beneath the loveliest tints of "
u"azure..... Consider all this; and then turn to this green, gentle, and most docile earth; "
u"consider them both, the sea and the land; and do you not find a strange analogy to something in yourself?"
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
]
def test_duplicate_training_example(self):
# Deserialize some examples for a rubric
deserialize_training_examples(self.EXAMPLES[0:2], self.RUBRIC)
# Deserialize some more examples, of which two are duplicates
examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Check that only three examples were created in the database
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 3)
# Check that the examples match what we got from the deserializer
self.assertItemsEqual(examples, db_examples)
def test_similar_training_examples_different_rubric(self):
# Deserialize some examples
first_examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Deserialize one more example with the rubric mutated slightly
mutated_rubric = copy.deepcopy(self.RUBRIC)
mutated_rubric['criteria'][0]['options'][0]['points'] = 5
second_examples = deserialize_training_examples(self.EXAMPLES[0:2], mutated_rubric)
# There should be a total of 5 examples (3 for the first rubric + 2 for the second)
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 5)
# Check that each of the examples from the deserializer are in the database
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
def test_similar_training_examples_different_options(self):
# Deserialize some examples
first_examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Deserialize another example that's identical to the first example,
# with one option changed
mutated_examples = copy.deepcopy(self.EXAMPLES)
mutated_examples[0]['options_selected'][u'vøȼȺƀᵾłȺɍɏ'] = u"єχ¢єℓℓєηт"
second_examples = deserialize_training_examples(mutated_examples, self.RUBRIC)
# Expect that a total of 4 examples (3 for the first call, plus one new example in the second call)
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 4)
# Check that all the examples are in the database
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
def test_similar_training_examples_different_answer(self):
# Deserialize some examples
first_examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Deserialize another example that's identical to the first example,
# with a different answer
mutated_examples = copy.deepcopy(self.EXAMPLES)
mutated_examples[0]['answer'] = u"MUTATED!"
second_examples = deserialize_training_examples(mutated_examples, self.RUBRIC)
# Expect that a total of 4 examples (3 for the first call, plus one new example in the second call)
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 4)
# Check that all the examples are in the database
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
@mock.patch.object(TrainingExample.objects, 'get')
@mock.patch.object(TrainingExample, 'create_example')
def test_deserialize_integrity_error(self, mock_create, mock_get):
# Simulate an integrity error when creating the training example
# This can occur when using repeatable-read isolation mode.
mock_example = mock.MagicMock(TrainingExample)
mock_get.side_effect = [TrainingExample.DoesNotExist, mock_example]
mock_create.side_effect = IntegrityError
# Expect that we get the mock example back
# (proves that the function tried to retrieve the object again after
# catching the integrity error)
examples = deserialize_training_examples(self.EXAMPLES[:1], self.RUBRIC)
self.assertEqual(examples, [mock_example])
......@@ -32,6 +32,11 @@ Self Assessment
.. automodule:: openassessment.assessment.api.self
:members:
Student Training
***************
.. automodule:: openassessment.assessment.api.student_training
:members:
Workflow Assessment
*******************
.. automodule:: openassessment.workflow
......@@ -72,6 +77,15 @@ Models
.. automodule:: openassessment.assessment.models.peer
:members:
.. automodule:: openassessment.assessment.models.peer
:members:
.. automodule:: openassessment.assessment.models.training
:members:
.. automodule:: openassessment.assessment.models.student_training
:members:
Workflow
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment