Commit d8bcde15 by Will Daly

Add uniqueness constraints for student training models; check for integrity errors

parent 2b224a74
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'StudentTrainingWorkflowItem', fields ['order_num', 'workflow']
db.create_unique('assessment_studenttrainingworkflowitem', ['order_num', 'workflow_id'])
def backwards(self, orm):
# Removing unique constraint on 'StudentTrainingWorkflowItem', fields ['order_num', 'workflow']
db.delete_unique('assessment_studenttrainingworkflowitem', ['order_num', 'workflow_id'])
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'unique_together': "(('workflow', 'order_num'),)", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
\ No newline at end of file
"""
Django models specific to the student training assessment type.
"""
from django.db import models
from django.db import models, IntegrityError
from django.utils import timezone
from submissions import api as sub_api
from .training import TrainingExample
......@@ -53,12 +53,18 @@ class StudentTrainingWorkflow(models.Model):
student_item = submission['student_item']
# Create the workflow
try:
return cls.objects.create(
submission_uuid=submission_uuid,
student_id=student_item['student_id'],
item_id=student_item['item_id'],
course_id=student_item['course_id']
)
# If we get an integrity error, it means we've violated a uniqueness constraint
# (someone has created this object after we checked if it existed)
# We can therefore assume that the object exists and we can retrieve it.
except IntegrityError:
return cls.objects.get(submission_uuid=submission_uuid)
@property
def num_completed(self):
......@@ -116,11 +122,24 @@ class StudentTrainingWorkflow(models.Model):
else:
order_num = len(items) + 1
next_example = available_examples[0]
try:
StudentTrainingWorkflowItem.objects.create(
workflow=self,
order_num=order_num,
training_example=next_example
)
# If we get an integrity error, it means we've violated a uniqueness constraint
# (someone has created this object after we checked if it existed)
# Since the object already exists, we don't need to do anything
# However, the example might not be the one we intended to use, so
# we need to retrieve the actual training example.
except IntegrityError:
workflow = StudentTrainingWorkflowItem.objects.get(
workflow=self, order_num=order_num
)
return workflow.training_example
else:
return next_example
@property
......@@ -161,6 +180,7 @@ class StudentTrainingWorkflowItem(models.Model):
class Meta:
app_label = "assessment"
ordering = ["workflow", "order_num"]
unique_together = ('workflow', 'order_num')
@property
def is_complete(self):
......
# -*- coding: utf-8 -*-
"""
Constants used as test data.
"""
STUDENT_ITEM = {
'student_id': u'𝓽𝓮𝓼𝓽 𝓼𝓽𝓾𝓭𝓮𝓷𝓽',
'item_id': u'𝖙𝖊𝖘𝖙 𝖎𝖙𝖊𝖒',
'course_id': u'ՇєรՇ ς๏ยгรє',
'item_type': u'openassessment'
}
ANSWER = u'ẗëṡẗ äṅṡẅëṛ'
RUBRIC_OPTIONS = [
{
"order_num": 0,
"name": u"𝒑𝒐𝒐𝒓",
"explanation": u"𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0,
},
{
"order_num": 1,
"name": u"𝓰𝓸𝓸𝓭",
"explanation": u"ﻭѻѻɗ ﻝѻ๒!",
"points": 1,
},
{
"order_num": 2,
"name": u"єχ¢єℓℓєηт",
"explanation": u"乇メc乇レレ乇刀イ フo乃!",
"points": 2,
},
]
RUBRIC = {
'prompt': u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ",
'criteria': [
{
"order_num": 0,
"name": u"vøȼȺƀᵾłȺɍɏ",
"prompt": u"Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": RUBRIC_OPTIONS
},
{
"order_num": 1,
"name": u"ﻭɼค๓๓คɼ",
"prompt": u"𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": RUBRIC_OPTIONS
}
]
}
EXAMPLES = [
{
'answer': (
u"𝕿𝖍𝖊𝖗𝖊 𝖆𝖗𝖊 𝖈𝖊𝖗𝖙𝖆𝖎𝖓 𝖖𝖚𝖊𝖊𝖗 𝖙𝖎𝖒𝖊𝖘 𝖆𝖓𝖉 𝖔𝖈𝖈𝖆𝖘𝖎𝖔𝖓𝖘 𝖎𝖓 𝖙𝖍𝖎𝖘 𝖘𝖙𝖗𝖆𝖓𝖌𝖊 𝖒𝖎𝖝𝖊𝖉 𝖆𝖋𝖋𝖆𝖎𝖗 𝖜𝖊 𝖈𝖆𝖑𝖑 𝖑𝖎𝖋𝖊"
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
}
},
{
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
]
......@@ -3,10 +3,14 @@
Tests for assessment models.
"""
import mock
from django.db import IntegrityError
from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection,
AssessmentFeedback, AssessmentFeedbackOption,
StudentTrainingWorkflow
)
......
......@@ -7,6 +7,7 @@ from django.db import DatabaseError
import ddt
from mock import patch
from openassessment.test_utils import CacheResetTest
from .constants import STUDENT_ITEM, ANSWER, RUBRIC, EXAMPLES
from submissions import api as sub_api
from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import StudentTrainingRequestError, StudentTrainingInternalError
......@@ -20,80 +21,11 @@ class StudentTrainingAssessmentTest(CacheResetTest):
"""
longMessage = True
STUDENT_ITEM = {
'student_id': u'𝓽𝓮𝓼𝓽 𝓼𝓽𝓾𝓭𝓮𝓷𝓽',
'item_id': u'𝖙𝖊𝖘𝖙 𝖎𝖙𝖊𝖒',
'course_id': u'ՇєรՇ ς๏ยгรє',
'item_type': u'openassessment'
}
ANSWER = u'ẗëṡẗ äṅṡẅëṛ'
RUBRIC_OPTIONS = [
{
"order_num": 0,
"name": u"𝒑𝒐𝒐𝒓",
"explanation": u"𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0,
},
{
"order_num": 1,
"name": u"𝓰𝓸𝓸𝓭",
"explanation": u"ﻭѻѻɗ ﻝѻ๒!",
"points": 1,
},
{
"order_num": 2,
"name": u"єχ¢єℓℓєηт",
"explanation": u"乇メc乇レレ乇刀イ フo乃!",
"points": 2,
},
]
RUBRIC = {
'prompt': u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ",
'criteria': [
{
"order_num": 0,
"name": u"vøȼȺƀᵾłȺɍɏ",
"prompt": u"Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": RUBRIC_OPTIONS
},
{
"order_num": 1,
"name": u"ﻭɼค๓๓คɼ",
"prompt": u"𝕳𝖔𝖜 𝖈𝖔𝖗𝖗𝖊𝖈𝖙 𝖎𝖘 𝖙𝖍𝖊 𝖌𝖗𝖆𝖒𝖒𝖆𝖗?",
"options": RUBRIC_OPTIONS
}
]
}
EXAMPLES = [
{
'answer': (
u"𝕿𝖍𝖊𝖗𝖊 𝖆𝖗𝖊 𝖈𝖊𝖗𝖙𝖆𝖎𝖓 𝖖𝖚𝖊𝖊𝖗 𝖙𝖎𝖒𝖊𝖘 𝖆𝖓𝖉 𝖔𝖈𝖈𝖆𝖘𝖎𝖔𝖓𝖘 𝖎𝖓 𝖙𝖍𝖎𝖘 𝖘𝖙𝖗𝖆𝖓𝖌𝖊 𝖒𝖎𝖝𝖊𝖉 𝖆𝖋𝖋𝖆𝖎𝖗 𝖜𝖊 𝖈𝖆𝖑𝖑 𝖑𝖎𝖋𝖊"
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
),
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
}
},
{
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': {
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
},
]
def setUp(self):
"""
Create a submission.
"""
submission = sub_api.create_submission(self.STUDENT_ITEM, self.ANSWER)
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid']
def test_training_workflow(self):
......@@ -101,18 +33,18 @@ class StudentTrainingAssessmentTest(CacheResetTest):
self._assert_workflow_status(self.submission_uuid, 0, 2)
# Get a training example
self._assert_get_example(self.submission_uuid, 0, self.EXAMPLES, self.RUBRIC)
self._assert_get_example(self.submission_uuid, 0, EXAMPLES, RUBRIC)
# Assess the training example the same way the instructor did
corrections = training_api.assess_training_example(
self.submission_uuid,
self.EXAMPLES[0]['options_selected']
EXAMPLES[0]['options_selected']
)
self.assertEqual(corrections, dict())
self._assert_workflow_status(self.submission_uuid, 1, 2)
# Get another training example to assess
self._assert_get_example(self.submission_uuid, 1, self.EXAMPLES, self.RUBRIC)
self._assert_get_example(self.submission_uuid, 1, EXAMPLES, RUBRIC)
# Give the example different scores than the instructor gave
incorrect_assessment = {
......@@ -124,12 +56,12 @@ class StudentTrainingAssessmentTest(CacheResetTest):
)
# Expect that we get corrected and stay on the current example
self.assertItemsEqual(corrections, self.EXAMPLES[1]['options_selected'])
self.assertItemsEqual(corrections, EXAMPLES[1]['options_selected'])
self._assert_workflow_status(self.submission_uuid, 1, 2)
# Try again, and this time assess the same way as the instructor
corrections = training_api.assess_training_example(
self.submission_uuid, self.EXAMPLES[1]['options_selected']
self.submission_uuid, EXAMPLES[1]['options_selected']
)
self.assertEqual(corrections, dict())
......@@ -139,10 +71,10 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_assess_without_update(self):
# Assess the first training example the same way the instructor did
# but do NOT update the workflow
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
corrections = training_api.assess_training_example(
self.submission_uuid,
self.EXAMPLES[0]['options_selected'],
EXAMPLES[0]['options_selected'],
update_workflow=False
)
......@@ -152,11 +84,11 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_get_same_example(self):
# Retrieve a training example
retrieved = training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
retrieved = training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
# If we retrieve an example without completing the current example,
# we should get the same one.
next_retrieved = training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
next_retrieved = training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
self.assertEqual(retrieved, next_retrieved)
def test_get_training_example_num_queries(self):
......@@ -164,33 +96,33 @@ class StudentTrainingAssessmentTest(CacheResetTest):
# Run through the training example once using a different submission
# Training examples and rubrics will be cached and shared for other
# students working on the same problem.
self._warm_cache(self.RUBRIC, self.EXAMPLES)
self._warm_cache(RUBRIC, EXAMPLES)
# First training example
# This will need to create the student training workflow and the first item
# NOTE: we *could* cache the rubric model to reduce the number of queries here,
# but we're selecting it by content hash, which is indexed and should be plenty fast.
with self.assertNumQueries(6):
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
# Without assessing the first training example, try to retrieve a training example.
# This should return the same example as before, so we won't need to create
# any workflows or workflow items.
with self.assertNumQueries(3):
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
# Assess the current training example
training_api.assess_training_example(self.submission_uuid, self.EXAMPLES[0]['options_selected'])
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
# Retrieve the next training example, which requires us to create
# a new workflow item (but not a new workflow).
with self.assertNumQueries(4):
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
def test_submitter_is_finished_num_queries(self):
# Complete the first training example
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.assess_training_example(self.submission_uuid, self.EXAMPLES[0]['options_selected'])
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
# Check whether we've completed the requirements
requirements = {'num_required': 2}
......@@ -199,8 +131,8 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_get_num_completed_num_queries(self):
# Complete the first training example
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.assess_training_example(self.submission_uuid, self.EXAMPLES[0]['options_selected'])
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
# Check the number completed
with self.assertNumQueries(2):
......@@ -208,10 +140,10 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_assess_training_example_num_queries(self):
# Populate the cache with training examples and rubrics
self._warm_cache(self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
self._warm_cache(RUBRIC, EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
with self.assertNumQueries(4):
training_api.assess_training_example(self.submission_uuid, self.EXAMPLES[0]['options_selected'])
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
@ddt.file_data('data/validate_training_examples.json')
def test_validate_training_examples(self, data):
......@@ -230,8 +162,8 @@ class StudentTrainingAssessmentTest(CacheResetTest):
self.assertTrue(training_api.assessment_is_finished(self.submission_uuid, requirements))
def test_get_training_example_none_available(self):
for example in self.EXAMPLES:
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
for example in EXAMPLES:
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, example['options_selected'])
# Now we should be complete
......@@ -239,19 +171,19 @@ class StudentTrainingAssessmentTest(CacheResetTest):
# ... and if we try to get another example, we should get None
self.assertIs(
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES),
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES),
None
)
def test_assess_training_example_completed_workflow(self):
for example in self.EXAMPLES:
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
for example in EXAMPLES:
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, example['options_selected'])
# Try to assess again, and expect an error
with self.assertRaises(StudentTrainingRequestError):
training_api.assess_training_example(
self.submission_uuid, self.EXAMPLES[0]['options_selected']
self.submission_uuid, EXAMPLES[0]['options_selected']
)
def test_assess_training_example_no_workflow(self):
......@@ -260,7 +192,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
# then we should get a request error.
with self.assertRaises(StudentTrainingRequestError):
training_api.assess_training_example(
self.submission_uuid, self.EXAMPLES[0]['options_selected']
self.submission_uuid, EXAMPLES[0]['options_selected']
)
def test_get_num_completed_no_workflow(self):
......@@ -269,15 +201,15 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_get_training_example_invalid_rubric(self):
# Rubric is missing a very important key!
invalid_rubric = copy.deepcopy(self.RUBRIC)
invalid_rubric = copy.deepcopy(RUBRIC)
del invalid_rubric['criteria']
with self.assertRaises(StudentTrainingRequestError):
training_api.get_training_example(self.submission_uuid, invalid_rubric, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, invalid_rubric, EXAMPLES)
def test_get_training_example_no_submission(self):
with self.assertRaises(StudentTrainingRequestError):
training_api.get_training_example("no_such_submission", self.RUBRIC, self.EXAMPLES)
training_api.get_training_example("no_such_submission", RUBRIC, EXAMPLES)
@patch.object(StudentTrainingWorkflow.objects, 'get')
def test_get_num_completed_database_error(self, mock_db):
......@@ -289,14 +221,14 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_get_training_example_database_error(self, mock_db):
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
@patch.object(StudentTrainingWorkflow.objects, 'get')
def test_assess_training_example_database_error(self, mock_db):
training_api.get_training_example(self.submission_uuid, self.RUBRIC, self.EXAMPLES)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.assess_training_example(self.submission_uuid, self.EXAMPLES[0]['options_selected'])
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
@ddt.data({}, {'num_required': 'not an integer!'})
def test_submitter_is_finished_invalid_requirements(self, requirements):
......@@ -388,7 +320,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
None
"""
pre_submission = sub_api.create_submission(self.STUDENT_ITEM, self.ANSWER)
pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
for example in examples:
training_api.get_training_example(pre_submission['uuid'], rubric, examples)
training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
"""
Tests for student training models.
"""
import mock
from django.db import IntegrityError
from submissions import api as sub_api
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
StudentTrainingWorkflow, StudentTrainingWorkflowItem
)
from .constants import STUDENT_ITEM, ANSWER, EXAMPLES
class StudentTrainingWorkflowTest(CacheResetTest):
"""
Tests for the student training workflow model.
"""
@mock.patch.object(StudentTrainingWorkflow.objects, 'get')
@mock.patch.object(StudentTrainingWorkflow.objects, 'create')
def test_create_workflow_integrity_error(self, mock_create, mock_get):
# Simulate a race condition in which someone creates a workflow
# after we check if it exists. This will violate the database uniqueness
# constraints, so we need to handle this case gracefully.
mock_create.side_effect = IntegrityError
# The first time we check, we should see that no workflow exists.
# The second time, we should get the workflow created by someone else
mock_workflow = mock.MagicMock(StudentTrainingWorkflow)
mock_get.side_effect = [
StudentTrainingWorkflow.DoesNotExist,
mock_workflow
]
# Expect that we retry and retrieve the workflow that someone else created
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
workflow = StudentTrainingWorkflow.get_or_create_workflow(submission['uuid'])
self.assertEqual(workflow, mock_workflow)
@mock.patch.object(StudentTrainingWorkflowItem.objects, 'get')
@mock.patch.object(StudentTrainingWorkflowItem.objects, 'create')
def test_create_workflow_item_integrity_error(self, mock_create, mock_get):
# Create a submission and workflow
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
workflow = StudentTrainingWorkflow.get_or_create_workflow(submission['uuid'])
# Simulate a race condition in which someone creates a workflow item
# after we check if it exists.
mock_workflow_item = mock.MagicMock(StudentTrainingWorkflowItem)
mock_create.side_effect = IntegrityError
mock_get.return_value = mock_workflow_item
# Expect that we retry and retrieve the workflow item created by someone else
self.assertEqual(workflow.next_training_example(EXAMPLES), mock_workflow_item.training_example)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment