Commit 506ed2e4 by Will Daly

Cache the dictionary mapping criteria/points to option IDs

Reorganize assessment model tests.
parent c9909779
...@@ -182,17 +182,26 @@ class Rubric(models.Model): ...@@ -182,17 +182,26 @@ class Rubric(models.Model):
InvalidOptionSelection InvalidOptionSelection
""" """
# This is a really inefficient initial implementation # Retrieve the mapping of criterion names/points to option IDs
# TODO -- refactor to add caching # from the cache, if it's available
rubric_options = CriterionOption.objects.filter( cache_key = "assessment.rubric_points_dict.{}".format(self.content_hash)
criterion__rubric=self rubric_points_dict = cache.get(cache_key)
).select_related()
rubric_points_dict = defaultdict(dict) # Otherwise, create the dict by querying the database
for option in rubric_options: if not rubric_points_dict:
if option.points not in rubric_points_dict[option.criterion.name]: rubric_options = CriterionOption.objects.filter(
rubric_points_dict[option.criterion.name][option.points] = option.id criterion__rubric=self
).select_related()
rubric_points_dict = defaultdict(dict)
for option in rubric_options:
if option.points not in rubric_points_dict[option.criterion.name]:
rubric_points_dict[option.criterion.name][option.points] = option.id
# Store the dict in the cache
cache.set(cache_key, rubric_points_dict)
# Find the IDs for the options matching the specified point value
option_id_set = set() option_id_set = set()
for criterion_name, option_points in criterion_points.iteritems(): for criterion_name, option_points in criterion_points.iteritems():
if (criterion_name in rubric_points_dict and if (criterion_name in rubric_points_dict and
......
...@@ -11,7 +11,7 @@ from nose.tools import raises ...@@ -11,7 +11,7 @@ from nose.tools import raises
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback, Assessment, AssessmentPart, AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem PeerWorkflow, PeerWorkflowItem
) )
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
...@@ -1177,3 +1177,142 @@ class TestPeerApi(CacheResetTest): ...@@ -1177,3 +1177,142 @@ class TestPeerApi(CacheResetTest):
peer_api.create_peer_workflow(submission["uuid"]) peer_api.create_peer_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], STEPS) workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item return submission, new_student_item
class PeerWorkflowTest(CacheResetTest):
"""
Tests for the peer workflow model.
"""
STUDENT_ITEM = {
'student_id': 'test_student',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
OTHER_STUDENT = {
'student_id': 'test_student_2',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
def test_create_item_multiple_available(self):
# Bugfix TIM-572
submitter_sub = sub_api.create_submission(self.STUDENT_ITEM, 'test answer')
submitter_workflow = PeerWorkflow.objects.create(
student_id=self.STUDENT_ITEM['student_id'],
item_id=self.STUDENT_ITEM['item_id'],
course_id=self.STUDENT_ITEM['course_id'],
submission_uuid=submitter_sub['uuid']
)
scorer_sub = sub_api.create_submission(self.OTHER_STUDENT, 'test answer 2')
scorer_workflow = PeerWorkflow.objects.create(
student_id=self.OTHER_STUDENT['student_id'],
item_id=self.OTHER_STUDENT['item_id'],
course_id=self.OTHER_STUDENT['course_id'],
submission_uuid=scorer_sub['uuid']
)
for _ in range(2):
PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=submitter_workflow,
submission_uuid=submitter_sub['uuid']
)
# This used to cause an error when `get_or_create` returned multiple workflow items
PeerWorkflow.create_item(scorer_workflow, submitter_sub['uuid'])
class AssessmentFeedbackTest(CacheResetTest):
"""
Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive.
"""
def setUp(self):
self.feedback = AssessmentFeedback.objects.create(
submission_uuid='test_submission',
feedback_text='test feedback',
)
def test_default_options(self):
self.assertEqual(self.feedback.options.count(), 0)
def test_add_options_all_new(self):
# We haven't created any feedback options yet, so these should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_some_new(self):
# Create one feedback option in the database
AssessmentFeedbackOption.objects.create(text='I liked my assessment')
# Add feedback options. The one that's new should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_empty(self):
# No options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 0)
# Add an option
self.feedback.add_options(['test'])
self.assertEqual(len(self.feedback.options.all()), 1)
# Add an empty list of options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 1)
def test_add_options_duplicates(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, one of which is a duplicate
self.feedback.add_options(['I liked my assessment', 'I disliked my assessment'])
# There should be three options
options = self.feedback.options.all()
self.assertEqual(len(options), 3)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
self.assertEqual(options[2].text, 'I disliked my assessment')
# There should be only three options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 3)
def test_add_options_all_old(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, all of which are duplicates
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# There should be two options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
def test_unicode(self):
# Create options with unicode
self.feedback.add_options([u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽', u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'])
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
...@@ -4,11 +4,8 @@ Tests for assessment models. ...@@ -4,11 +4,8 @@ Tests for assessment models.
""" """
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection, Rubric, Criterion, CriterionOption, InvalidOptionSelection
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
) )
...@@ -108,141 +105,61 @@ class TestRubricOptionIds(CacheResetTest): ...@@ -108,141 +105,61 @@ class TestRubricOptionIds(CacheResetTest):
"test criterion 3": "test option 1", "test criterion 3": "test option 1",
}) })
def test_options_ids_points(self):
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 0,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][1].id
]))
class AssessmentFeedbackTest(CacheResetTest): def test_options_ids_points_caching(self):
""" # First call: the dict is not cached
Tests for assessment feedback. with self.assertNumQueries(1):
This is feedback that students give in response to the peer assessments they receive. options_ids = self.rubric.options_ids_for_points({
""" 'test criterion 0': 0,
'test criterion 1': 1,
def setUp(self): 'test criterion 2': 2,
self.feedback = AssessmentFeedback.objects.create( 'test criterion 3': 1
submission_uuid='test_submission', })
feedback_text='test feedback',
)
def test_default_options(self):
self.assertEqual(self.feedback.options.count(), 0)
def test_add_options_all_new(self):
# We haven't created any feedback options yet, so these should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_some_new(self):
# Create one feedback option in the database
AssessmentFeedbackOption.objects.create(text='I liked my assessment')
# Add feedback options. The one that's new should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_empty(self):
# No options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 0)
# Add an option
self.feedback.add_options(['test'])
self.assertEqual(len(self.feedback.options.all()), 1)
# Add an empty list of options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 1)
def test_add_options_duplicates(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, one of which is a duplicate
self.feedback.add_options(['I liked my assessment', 'I disliked my assessment'])
# There should be three options
options = self.feedback.options.all()
self.assertEqual(len(options), 3)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
self.assertEqual(options[2].text, 'I disliked my assessment')
# There should be only three options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 3)
def test_add_options_all_old(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, all of which are duplicates
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# There should be two options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
def test_unicode(self):
# Create options with unicode
self.feedback.add_options([u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽', u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'])
# There should be two options in the database # Second call: the dict is not cached
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2) with self.assertNumQueries(0):
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 1,
'test criterion 1': 2,
'test criterion 2': 1,
'test criterion 3': 0
})
def test_options_ids_first_of_duplicate_points(self):
# Change the first criterion options so that the second and third
# option have the same point value
self.options['test criterion 0'][1].points = 5
self.options['test criterion 0'][1].save()
self.options['test criterion 0'][2].points = 5
self.options['test criterion 0'][2].save()
# Should get the first option back
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 5,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertIn(self.options['test criterion 0'][1].id, options_ids)
class PeerWorkflowTest(CacheResetTest): def test_options_ids_points_invalid_selection(self):
""" with self.assertRaises(InvalidOptionSelection):
Tests for the peer workflow model. self.rubric.options_ids_for_points({
""" 'test criterion 0': self.NUM_OPTIONS + 1,
STUDENT_ITEM = { 'test criterion 1': 2,
'student_id': 'test_student', 'test criterion 2': 1,
'course_id': 'test_course', 'test criterion 3': 0
'item_type': 'openassessment', })
'item_id': 'test_item'
}
OTHER_STUDENT = {
'student_id': 'test_student_2',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
def test_create_item_multiple_available(self):
# Bugfix TIM-572
submitter_sub = sub_api.create_submission(self.STUDENT_ITEM, 'test answer')
submitter_workflow = PeerWorkflow.objects.create(
student_id=self.STUDENT_ITEM['student_id'],
item_id=self.STUDENT_ITEM['item_id'],
course_id=self.STUDENT_ITEM['course_id'],
submission_uuid=submitter_sub['uuid']
)
scorer_sub = sub_api.create_submission(self.OTHER_STUDENT, 'test answer 2')
scorer_workflow = PeerWorkflow.objects.create(
student_id=self.OTHER_STUDENT['student_id'],
item_id=self.OTHER_STUDENT['item_id'],
course_id=self.OTHER_STUDENT['course_id'],
submission_uuid=scorer_sub['uuid']
)
for _ in range(2):
PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=submitter_workflow,
submission_uuid=submitter_sub['uuid']
)
# This used to cause an error when `get_or_create` returned multiple workflow items
PeerWorkflow.create_item(scorer_workflow, submitter_sub['uuid'])
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment