Commit 717dc97f by Will Daly

Implement public API and data model for student training assessment

parent 406ca899
......@@ -6,3 +6,4 @@ Export errors from all modules defined in this package.
from .peer import *
from .self import *
from .student_training import *
"""
Errors for training assessment type.
"""
class StudentTrainingError(Exception):
"""
Error occurred in a training API call.
"""
pass
class StudentTrainingRequestError(StudentTrainingError):
"""
There was a problem with a request made to the training API.
"""
pass
class StudentTrainingInternalError(StudentTrainingError):
"""
An internal error occurred while processing a request to the training API.
"""
pass
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AssessmentPart.feedback'
db.alter_column('assessment_assessmentpart', 'feedback', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'AssessmentPart.feedback'
db.alter_column('assessment_assessmentpart', 'feedback', self.gf('django.db.models.fields.TextField')(max_length=10000))
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
......@@ -5,3 +5,5 @@ Export models from each Python module in this package.
from .base import *
from .peer import *
from .training import *
from .student_training import *
"""
Django models specific to the student training assessment type.
"""
from django.db import models, transaction
from django.utils import timezone
from submissions import api as sub_api
from .training import TrainingExample
class StudentTrainingWorkflow(models.Model):
"""
Tracks a student's progress through the student training assessment step.
"""
# The submission UUID of the student being trained
submission_uuid = models.CharField(max_length=128, db_index=True)
# Information about the student and problem
# This duplicates information associated with the submission itself,
# but we include it here to make it easier to query workflows.
# Since submissions are immutable, we can do this without
# jeopardizing data integrity.
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=40, db_index=True)
class Meta:
app_label = "assessment"
@classmethod
@transaction.commit_on_success
def create_workflow(cls, submission_uuid, examples):
"""
Create a student training workflow.
Args:
submission_uuid (str): The UUID of the submission from the student being trained.
examples (list of TrainingExamples): The training examples to show the student.
Returns:
StudentTrainingWorkflow
Raises:
SubmissionError: There was an error retrieving the submission.
"""
# Retrieve the student item info
submission = sub_api.get_submission_and_student(submission_uuid)
student_item = submission['student_item']
# Create the workflow
workflow = cls.objects.create(
submission_uuid=submission_uuid,
student_id=student_item['student_id'],
item_id=student_item['item_id'],
course_id=student_item['course_id']
)
# Create workflow items for each example
for order_num, example in enumerate(examples):
StudentTrainingWorkflowItem.objects.create(
workflow=workflow,
order_num=order_num,
training_example=example,
)
return workflow
@property
def status(self):
"""
The student's status within the workflow (num steps completed / num steps available).
Returns:
tuple of `(num_completed, num_total)`, both integers
"""
items = self.items.all() # pylint:disable=E1101
num_complete = sum([1 if item.is_complete else 0 for item in items])
num_total = len(items)
return num_complete, num_total
@property
def is_complete(self):
"""
Check whether all items in the workflow are complete.
Returns:
bool
"""
num_incomplete = self.items.filter(completed_at__isnull=True).count() # pylint:disable=E1101
return num_incomplete == 0
@property
def next_incomplete_item(self):
"""
Find the next incomplete item in the workflow.
Returns:
StudentTrainingWorkflowItem or None
"""
next_incomplete = self.items.filter( # pylint:disable=E1101
completed_at__isnull=True
).order_by('order_num')[:1]
if len(next_incomplete) > 0:
return next_incomplete[0]
else:
return None
class StudentTrainingWorkflowItem(models.Model):
"""
A particular step in the training workflow. At each step,
a student must try assessing an example submission.
If the student gives the same scores as the instructor,
then the student proceeds to the next example;
if there are no examples left, the student has
successfully completed training.
"""
workflow = models.ForeignKey(StudentTrainingWorkflow, related_name="items")
order_num = models.PositiveIntegerField()
started_at = models.DateTimeField(auto_now_add=True)
completed_at = models.DateTimeField(default=None, null=True)
training_example = models.ForeignKey(TrainingExample)
class Meta:
app_label = "assessment"
ordering = ["workflow", "order_num"]
@property
def is_complete(self):
"""
Check whether the student has completed this workflow item.
Returns:
bool
"""
return self.completed_at is not None
def mark_complete(self):
"""
Mark the item as complete. Once an item is marked complete,
it should stay complete!
Returns:
None
"""
self.completed_at = timezone.now()
self.save()
def check(self, options_selected):
"""
Compare the options that the student selected to
the options set by the instructor in the training example.
Args:
options_selected (dict): Mapping of criterion names to option names.
Returns:
dict
Example usage:
>>> item.check({'vocabulary': 'good', 'grammar': 'poor'})
{'vocabulary': 'excellent'}
>>> item.check({'vocabulary': 'excellent', 'grammar': 'poor'})
{}
"""
staff_selected = self.training_example.options_selected_dict
corrections = dict()
for criterion_name, option_name in staff_selected.iteritems():
missing_option = criterion_name not in options_selected
incorrect_option = options_selected[criterion_name] != option_name
if missing_option or incorrect_option:
corrections[criterion_name] = option_name
return corrections
"""
Django models for training (both student and AI).
"""
import json
from hashlib import sha1
from django.db import models
from .base import Rubric, CriterionOption
class TrainingExample(models.Model):
"""
An example assessment used to train students (before peer assessment) or AI.
"""
# The answer (JSON-serialized)
raw_answer = models.TextField(blank=True)
rubric = models.ForeignKey(Rubric)
# Use a m2m to avoid changing the criterion option
options_selected = models.ManyToManyField(CriterionOption)
# SHA1 hash
content_hash = models.CharField(max_length=40, unique=True, db_index=True)
class Meta:
app_label = "assessment"
@classmethod
def create_example(cls, answer, options_ids, rubric):
"""
Create a new training example.
Args:
answer (JSON-serializable): The answer associated with the training example.
option_ids (iterable of int): Selected option IDs for the training example.
rubric (Rubric): The rubric associated with the training example.
Returns:
TrainingExample
"""
content_hash = cls.calculate_hash(answer, options_ids, rubric)
example = TrainingExample.objects.create(
content_hash=content_hash,
raw_answer=json.dumps(answer),
rubric=rubric
)
for option in CriterionOption.objects.filter(pk__in=list(options_ids)):
example.options_selected.add(option)
return example
@property
def answer(self):
"""
Return the JSON-decoded answer.
Returns:
JSON-serializable
"""
return json.loads(self.raw_answer)
@property
def options_selected_dict(self):
"""
Return a dictionary of the rubric options selected.
Returns:
dict: maps criterion names to selected option names
"""
return {
option.criterion.name: option.name
for option in self.options_selected.all() # pylint:disable=E1101
}
@staticmethod
def calculate_hash(answer, option_ids, rubric):
"""
Calculate a hash for the contents of training example.
Args:
answer (JSON-serializable): The answer associated with the training example.
option_ids (iterable of int): Selected option IDs for the training example.
rubric (Rubric): The rubric associated with the training example.
Returns:
str
"""
contents = json.dumps({
'answer': answer,
'option_ids': list(option_ids),
'rubric': rubric.id
})
return sha1(contents).hexdigest()
class Meta:
app_label = "assessment"
......@@ -6,3 +6,4 @@ Export serializers from each module in this package.
from .base import *
from .peer import *
from .training import *
"""
Serializers for the training assessment type.
"""
import json
from django.db import transaction, IntegrityError
from openassessment.assessment.models import TrainingExample
from .base import rubric_from_dict, RubricSerializer
class InvalidTrainingExample(Exception):
"""
The training example could not be deserialized.
"""
pass
def validate_training_example_format(example):
"""
Check whether the serialized training example dict
has the correct structure.
Args:
example (dict): The serialized training example.
Returns:
tuple of (is_valid, errors), where `is_valid` is a bool
and `errors` is a list of error messages.
"""
errors = []
if not isinstance(example, dict):
errors.append(u"Training example must be a dictionary")
if 'answer' not in example:
errors.append(u'Training example must contain an "answer" field.')
if 'options_selected' not in example:
errors.append(u'Training example must contain an "options_selected" field.')
is_valid = (len(errors) == 0)
return is_valid, errors
def serialize_training_example(example):
"""
Serialize a training example to a dictionary.
Args:
example (TrainingExample): The training example to serialize.
Returns:
dict
"""
return {
'answer': example.answer,
'options_selected': example.options_selected_dict,
'rubric': RubricSerializer.serialized_from_cache(example.rubric),
}
@transaction.commit_on_success
def deserialize_training_examples(examples, rubric_dict):
"""
Deserialize training examples to Django models.
Args:
examples (list of dict): The serialized training examples.
rubric_dict (dict): The serialized rubric.
Returns:
list of TrainingExamples
Raises:
InvalidRubric
InvalidTrainingExample
Example usage:
>>> options = [
>>> {
>>> "order_num": 0,
>>> "name": "poor",
>>> "explanation": "Poor job!",
>>> "points": 0,
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "good",
>>> "explanation": "Good job!",
>>> "points": 1,
>>> },
>>> {
>>> "order_num": 2,
>>> "name": "excellent",
>>> "explanation": "Excellent job!",
>>> "points": 2,
>>> },
>>> ]
>>>
>>> rubric = {
>>> "prompt": "Write an essay!",
>>> "criteria": [
>>> {
>>> "order_num": 0,
>>> "name": "vocabulary",
>>> "prompt": "How varied is the vocabulary?",
>>> "options": options
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "grammar",
>>> "prompt": "How correct is the grammar?",
>>> "options": options
>>> }
>>> ]
>>> }
>>>
>>> examples = [
>>> {
>>> 'answer': u'Lorem ipsum',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> },
>>> {
>>> 'answer': u'Doler',
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> examples = deserialize_training_examples(examples, rubric)
"""
# Parse the rubric
# This will raise an exception if the serialized rubric is invalid.
rubric = rubric_from_dict(rubric_dict)
# Parse each example
created_examples = []
for example_dict in examples:
is_valid, errors = validate_training_example_format(example_dict)
if not is_valid:
raise InvalidTrainingExample("; ".join(errors))
options_ids = rubric.options_ids(example_dict['options_selected'])
# Calculate the content hash to look up the example
content_hash = TrainingExample.calculate_hash(example_dict['answer'], options_ids, rubric)
try:
example = TrainingExample.objects.get(content_hash=content_hash)
except TrainingExample.DoesNotExist:
try:
example = TrainingExample.create_example(
example_dict['answer'], options_ids, rubric
)
except IntegrityError:
example = TrainingExample.objects.get(content_hash=content_hash)
created_examples.append(example)
return created_examples
......@@ -138,7 +138,7 @@ class TestSelfApi(CacheResetTest):
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_missing_critieron(self):
def test_create_assessment_missing_criterion(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
......
# -*- coding: utf-8 -*-
"""
Tests for training models and serializers (common to student and AI training).
"""
import copy
import mock
from django.db import IntegrityError
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import TrainingExample
from openassessment.assessment.serializers import deserialize_training_examples
class TrainingExampleSerializerTest(CacheResetTest):
"""
Tests for serialization and deserialization of TrainingExample.
These functions are pretty well-covered by API-level tests,
so we focus on edge cases.
"""
RUBRIC_OPTIONS = [
{
"order_num": 0,
"name": u"𝒑𝒐𝒐𝒓",
"explanation": u"𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0,
},
{
"order_num": 1,
"name": u"𝓰𝓸𝓸𝓭",
"explanation": u"ﻭѻѻɗ ﻝѻ๒!",
"points": 1,
},
{
"order_num": 2,
"name": "єχ¢єℓℓєηт",
"explanation": "乇メc乇レレ乇刀イ フo乃!",
"points": 2,
},
]
RUBRIC = {
'prompt': u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ",
'criteria': [
{
"order_num": 0,
"name": u"vøȼȺƀᵾłȺɍɏ",
"prompt": u"Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": RUBRIC_OPTIONS
},
{
"order_num": 1,
"name": u"ﻭɼค๓๓คɼ",
"prompt": u"𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": RUBRIC_OPTIONS
}
]
}
EXAMPLES = [
{
'answer': (
u"𝕿𝖍𝖊𝖗𝖊 𝖆𝖗𝖊 𝖈𝖊𝖗𝖙𝖆𝖎𝖓 𝖖𝖚𝖊𝖊𝖗 𝖙𝖎𝖒𝖊𝖘 𝖆𝖓𝖉 𝖔𝖈𝖈𝖆𝖘𝖎𝖔𝖓𝖘 𝖎𝖓 𝖙𝖍𝖎𝖘 𝖘𝖙𝖗𝖆𝖓𝖌𝖊 𝖒𝖎𝖝𝖊𝖉 𝖆𝖋𝖋𝖆𝖎𝖗 𝖜𝖊 𝖈𝖆𝖑𝖑 𝖑𝖎𝖋𝖊"
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
}
},
{
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
{
'answer': (
u"Consider the subtleness of the sea; how its most dreaded creatures glide under water, "
u"unapparent for the most part, and treacherously hidden beneath the loveliest tints of "
u"azure..... Consider all this; and then turn to this green, gentle, and most docile earth; "
u"consider them both, the sea and the land; and do you not find a strange analogy to something in yourself?"
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
]
def test_duplicate_training_example(self):
# Deserialize some examples for a rubric
deserialize_training_examples(self.EXAMPLES[0:2], self.RUBRIC)
# Deserialize some more examples, of which two are duplicates
examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Check that only three examples were created in the database
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 3)
# Check that the examples match what we got from the deserializer
self.assertItemsEqual(examples, db_examples)
def test_similar_training_examples_different_rubric(self):
# Deserialize some examples
first_examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Deserialize one more example with the rubric mutated slightly
mutated_rubric = copy.deepcopy(self.RUBRIC)
mutated_rubric['criteria'][0]['options'][0]['points'] = 5
second_examples = deserialize_training_examples(self.EXAMPLES[0:2], mutated_rubric)
# There should be a total of 5 examples (3 for the first rubric + 2 for the second)
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 5)
# Check that each of the examples from the deserializer are in the database
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
def test_similar_training_examples_different_options(self):
# Deserialize some examples
first_examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Deserialize another example that's identical to the first example,
# with one option changed
mutated_examples = copy.deepcopy(self.EXAMPLES)
mutated_examples[0]['options_selected'][u'vøȼȺƀᵾłȺɍɏ'] = u"єχ¢єℓℓєηт"
second_examples = deserialize_training_examples(mutated_examples, self.RUBRIC)
# Expect that a total of 4 examples (3 for the first call, plus one new example in the second call)
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 4)
# Check that all the examples are in the database
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
def test_similar_training_examples_different_answer(self):
# Deserialize some examples
first_examples = deserialize_training_examples(self.EXAMPLES, self.RUBRIC)
# Deserialize another example that's identical to the first example,
# with a different answer
mutated_examples = copy.deepcopy(self.EXAMPLES)
mutated_examples[0]['answer'] = u"MUTATED!"
second_examples = deserialize_training_examples(mutated_examples, self.RUBRIC)
# Expect that a total of 4 examples (3 for the first call, plus one new example in the second call)
db_examples = TrainingExample.objects.all()
self.assertEqual(len(db_examples), 4)
# Check that all the examples are in the database
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
@mock.patch.object(TrainingExample.objects, 'get')
@mock.patch.object(TrainingExample, 'create_example')
def test_deserialize_integrity_error(self, mock_create, mock_get):
# Simulate an integrity error when creating the training example
# This can occur when using repeatable-read isolation mode.
mock_example = mock.MagicMock(TrainingExample)
mock_get.side_effect = [TrainingExample.DoesNotExist, mock_example]
mock_create.side_effect = IntegrityError
# Expect that we get the mock example back
# (proves that the function tried to retrieve the object again after
# catching the integrity error)
examples = deserialize_training_examples(self.EXAMPLES[:1], self.RUBRIC)
self.assertEqual(examples, [mock_example])
......@@ -32,6 +32,11 @@ Self Assessment
.. automodule:: openassessment.assessment.api.self
:members:
Student Training
***************
.. automodule:: openassessment.assessment.api.student_training
:members:
Workflow Assessment
*******************
.. automodule:: openassessment.workflow
......@@ -72,6 +77,15 @@ Models
.. automodule:: openassessment.assessment.models.peer
:members:
.. automodule:: openassessment.assessment.models.peer
:members:
.. automodule:: openassessment.assessment.models.training
:members:
.. automodule:: openassessment.assessment.models.student_training
:members:
Workflow
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment