Commit 9e6c4491 by Phil McGachey

[LTI Provider] Grade passback for non-leaf blocks.

This change allows graded assignments to be added to a campus LMS
regardless of the granularity at which the problem sits. Previously
a grade could only be returned if the usage ID for the problem itself
was specified in the LTI launch.

The code assumes that courses taking advantage of this functionality
are arranged in a hiearchy (with sections being parents to verticals,
and verticals being parents to problems). When a grading event occurs
it traverses the parent hiearchy to identify any previous graded LTI
launches for which the new scoring event should generate a grade
update. It then calculates and sends scores to each of those outcome
services.

Since grade calculation is an expensive operation, the code optimizes
the case where a problem has been added only once as a leaf unit. In
that case it is able to behave as before, just taking the grade from
the signal without having to calculate grades for the whole course.
parent 45832b98
......@@ -127,6 +127,51 @@ class MaxScoresCache(object):
return max_score
class ProgressSummary(object):
"""
Wrapper class for the computation of a user's scores across a course.
Attributes
chapters: a summary of all sections with problems in the course. It is
organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded
and ungraded problems, and is good for displaying a course summary with
due dates, etc.
weighted_scores: a dictionary mapping module locations to weighted Score
objects.
locations_to_children: a dictionary mapping module locations to their
direct descendants.
"""
def __init__(self, chapters, weighted_scores, locations_to_children):
self.chapters = chapters
self.weighted_scores = weighted_scores
self.locations_to_children = locations_to_children
def score_for_module(self, location):
"""
Calculate the aggregate weighted score for any location in the course.
This method returns a tuple containing (earned_score, possible_score).
If the location is of 'problem' type, this method will return the
possible and earned scores for that problem. If the location refers to a
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.weighted_scores:
score = self.weighted_scores[location]
return score.earned, score.possible
children = self.locations_to_children[location]
earned = 0.0
possible = 0.0
for child in children:
child_earned, child_possible = self.score_for_module(child)
earned += child_earned
possible += child_possible
return earned, possible
def descriptor_affects_grading(block_types_affecting_grading, descriptor):
"""
Returns True if the descriptor could have any impact on grading, else False.
......@@ -459,6 +504,21 @@ def progress_summary(student, request, course, field_data_cache=None, scores_cli
in case there are unanticipated errors.
"""
with manual_transaction():
progress = _progress_summary(student, request, course, field_data_cache, scores_client)
if progress:
return progress.chapters
else:
return None
@transaction.commit_manually
def get_weighted_scores(student, course, field_data_cache=None, scores_client=None):
"""
Uses the _progress_summary method to return a ProgressSummmary object
containing details of a students weighted scores for the course.
"""
with manual_transaction():
request = _get_mock_request(student)
return _progress_summary(student, request, course, field_data_cache, scores_client)
......@@ -509,6 +569,8 @@ def _progress_summary(student, request, course, field_data_cache=None, scores_cl
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
chapters = []
locations_to_children = defaultdict(list)
locations_to_weighted_scores = {}
# Don't include chapters that aren't displayable (e.g. due to error)
for chapter_module in course_module.get_display_items():
# Skip if the chapter is hidden
......@@ -516,7 +578,6 @@ def _progress_summary(student, request, course, field_data_cache=None, scores_cl
continue
sections = []
for section_module in chapter_module.get_display_items():
# Skip if the section is hidden
with manual_transaction():
......@@ -531,7 +592,7 @@ def _progress_summary(student, request, course, field_data_cache=None, scores_cl
for module_descriptor in yield_dynamic_descriptor_descendants(
section_module, student.id, module_creator
):
course_id = course.id
locations_to_children[module_descriptor.parent].append(module_descriptor.location)
(correct, total) = get_score(
student,
module_descriptor,
......@@ -543,16 +604,17 @@ def _progress_summary(student, request, course, field_data_cache=None, scores_cl
if correct is None and total is None:
continue
scores.append(
Score(
correct,
total,
graded,
module_descriptor.display_name_with_default,
module_descriptor.location
)
weighted_location_score = Score(
correct,
total,
graded,
module_descriptor.display_name_with_default,
module_descriptor.location
)
scores.append(weighted_location_score)
locations_to_weighted_scores[module_descriptor.location] = weighted_location_score
scores.reverse()
section_total, _ = graders.aggregate_scores(
scores, section_module.display_name_with_default)
......@@ -577,7 +639,7 @@ def _progress_summary(student, request, course, field_data_cache=None, scores_cl
max_scores_cache.push_to_remote()
return chapters
return ProgressSummary(chapters, locations_to_weighted_scores, locations_to_children)
def weighted_score(raw_correct, raw_total, weight):
......@@ -705,15 +767,10 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
else:
course = course_or_id
# We make a fake request because grading code expects to be able to look at
# the request. We have to attach the correct user to the request before
# grading that student.
request = RequestFactory().get('/')
for student in students:
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
try:
request.user = student
request = _get_mock_request(student)
# Grading calls problem rendering, which calls masquerading,
# which checks session vars -- thus the empty session dict below.
# It's not pretty, but untangling that is currently beyond the
......@@ -732,3 +789,14 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
exc.message
)
yield student, {}, exc.message
def _get_mock_request(student):
"""
Make a fake request because grading code expects to be able to look at
the request. We have to attach the correct user to the request before
grading that student.
"""
request = RequestFactory().get('/')
request.user = student
return request
......@@ -2,13 +2,15 @@
Test grade calculation.
"""
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from mock import patch
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from courseware.grades import field_data_cache_for_grading, grade, iterate_grades_for, MaxScoresCache
from courseware.grades import field_data_cache_for_grading, grade, iterate_grades_for, MaxScoresCache, ProgressSummary
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
......@@ -194,3 +196,125 @@ class TestFieldDataCacheScorableLocations(ModuleStoreTestCase):
self.assertNotIn('html', block_types)
self.assertNotIn('discussion', block_types)
self.assertIn('problem', block_types)
class TestProgressSummary(TestCase):
"""
Test the method that calculates the score for a given block based on the
cumulative scores of its children. This test class uses a hard-coded block
hierarchy with scores as follows:
a
+--------+--------+
b c
+--------------+-----------+ |
d e f g
+-----+ +-----+-----+ | |
h i j k l m n
(2/5) (3/5) (0/1) - (1/3) - (3/10)
"""
def setUp(self):
super(TestProgressSummary, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.loc_a = self.create_location('chapter', 'a')
self.loc_b = self.create_location('section', 'b')
self.loc_c = self.create_location('section', 'c')
self.loc_d = self.create_location('vertical', 'd')
self.loc_e = self.create_location('vertical', 'e')
self.loc_f = self.create_location('vertical', 'f')
self.loc_g = self.create_location('vertical', 'g')
self.loc_h = self.create_location('problem', 'h')
self.loc_i = self.create_location('problem', 'i')
self.loc_j = self.create_location('problem', 'j')
self.loc_k = self.create_location('html', 'k')
self.loc_l = self.create_location('problem', 'l')
self.loc_m = self.create_location('html', 'm')
self.loc_n = self.create_location('problem', 'n')
weighted_scores = {
self.loc_h: self.create_score(2, 5),
self.loc_i: self.create_score(3, 5),
self.loc_j: self.create_score(0, 1),
self.loc_l: self.create_score(1, 3),
self.loc_n: self.create_score(3, 10),
}
locations_to_scored_children = {
self.loc_a: [self.loc_h, self.loc_i, self.loc_j, self.loc_l, self.loc_n],
self.loc_b: [self.loc_h, self.loc_i, self.loc_j, self.loc_l],
self.loc_c: [self.loc_n],
self.loc_d: [self.loc_h, self.loc_i],
self.loc_e: [self.loc_j, self.loc_l],
self.loc_f: [],
self.loc_g: [self.loc_n],
self.loc_k: [],
self.loc_m: [],
}
self.progress_summary = ProgressSummary(
None, weighted_scores, locations_to_scored_children
)
def create_score(self, earned, possible):
"""
Create a new mock Score object with specified earned and possible values
"""
score = MagicMock()
score.possible = possible
score.earned = earned
return score
def create_location(self, block_type, block_id):
"""
Create a new BlockUsageLocation with the given type and ID.
"""
return BlockUsageLocator(
course_key=self.course_key, block_type=block_type, block_id=block_id
)
def test_score_chapter(self):
earned, possible = self.progress_summary.score_for_module(self.loc_a)
self.assertEqual(earned, 9)
self.assertEqual(possible, 24)
def test_score_section_many_leaves(self):
earned, possible = self.progress_summary.score_for_module(self.loc_b)
self.assertEqual(earned, 6)
self.assertEqual(possible, 14)
def test_score_section_one_leaf(self):
earned, possible = self.progress_summary.score_for_module(self.loc_c)
self.assertEqual(earned, 3)
self.assertEqual(possible, 10)
def test_score_vertical_two_leaves(self):
earned, possible = self.progress_summary.score_for_module(self.loc_d)
self.assertEqual(earned, 5)
self.assertEqual(possible, 10)
def test_score_vertical_two_leaves_one_unscored(self):
earned, possible = self.progress_summary.score_for_module(self.loc_e)
self.assertEqual(earned, 1)
self.assertEqual(possible, 4)
def test_score_vertical_no_score(self):
earned, possible = self.progress_summary.score_for_module(self.loc_f)
self.assertEqual(earned, 0)
self.assertEqual(possible, 0)
def test_score_vertical_one_leaf(self):
earned, possible = self.progress_summary.score_for_module(self.loc_g)
self.assertEqual(earned, 3)
self.assertEqual(possible, 10)
def test_score_leaf(self):
earned, possible = self.progress_summary.score_for_module(self.loc_h)
self.assertEqual(earned, 2)
self.assertEqual(possible, 5)
def test_score_leaf_no_score(self):
earned, possible = self.progress_summary.score_for_module(self.loc_m)
self.assertEqual(earned, 0)
self.assertEqual(possible, 0)
......@@ -140,6 +140,10 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
# Disable the score change signal to prevent other components from being pulled into tests.
signal_patch = patch('courseware.module_render.SCORE_CHANGED.send')
signal_patch.start()
self.addCleanup(signal_patch.stop)
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
......
......@@ -390,7 +390,10 @@ class TestInstructorEnrollmentStudentModule(ModuleStoreTestCase):
module_state_key=msk
).count(), 0)
def test_delete_submission_scores(self):
# Disable the score change signal to prevent other components from being
# pulled into tests.
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
def test_delete_submission_scores(self, _lti_mock):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
......
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, missing-docstring, unused-argument, unused-import, line-too-long
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GradedAssignment.version_number'
db.add_column('lti_provider_gradedassignment', 'version_number',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GradedAssignment.version_number'
db.delete_column('lti_provider_gradedassignment', 'version_number')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lti_provider.gradedassignment': {
'Meta': {'unique_together': "(('outcome_service', 'lis_result_sourcedid'),)", 'object_name': 'GradedAssignment'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lis_result_sourcedid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'outcome_service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.OutcomeService']"}),
'usage_key': ('xmodule_django.models.UsageKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'version_number': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'lti_provider.lticonsumer': {
'Meta': {'object_name': 'LtiConsumer'},
'consumer_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'consumer_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'consumer_secret': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'})
},
'lti_provider.ltiuser': {
'Meta': {'unique_together': "(('lti_consumer', 'lti_user_id'),)", 'object_name': 'LtiUser'},
'edx_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lti_consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.LtiConsumer']"}),
'lti_user_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'lti_provider.outcomeservice': {
'Meta': {'object_name': 'OutcomeService'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lis_outcome_service_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lti_consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.LtiConsumer']"})
}
}
complete_apps = ['lti_provider']
......@@ -112,6 +112,7 @@ class GradedAssignment(models.Model):
usage_key = UsageKeyField(max_length=255, db_index=True)
outcome_service = models.ForeignKey(OutcomeService)
lis_result_sourcedid = models.CharField(max_length=255, db_index=True)
version_number = models.IntegerField(default=0)
class Meta(object):
"""
......
......@@ -7,6 +7,7 @@ import logging
from lxml import etree
from lxml.builder import ElementMaker
import requests
from requests.exceptions import RequestException
import requests_oauthlib
import uuid
......@@ -95,6 +96,61 @@ def generate_replace_result_xml(result_sourcedid, score):
return etree.tostring(xml, xml_declaration=True, encoding='UTF-8')
def get_assignments_for_problem(problem_descriptor, user_id, course_key):
"""
Trace the parent hierarchy from a given problem to find all blocks that
correspond to graded assignment launches for this user. A problem may
show up multiple times for a given user; the problem could be embedded in
multiple courses (or multiple times in the same course), or the block could
be embedded more than once at different granularities (as an individual
problem and as a problem in a vertical, for example).
Returns a list of GradedAssignment objects that are associated with the
given descriptor for the current user.
"""
locations = []
current_descriptor = problem_descriptor
while current_descriptor:
locations.append(current_descriptor.location)
current_descriptor = current_descriptor.get_parent()
assignments = GradedAssignment.objects.filter(
user=user_id, course_key=course_key, usage_key__in=locations
)
return assignments
def send_score_update(assignment, score):
"""
Create and send the XML message to the campus LMS system to update the grade
for a single graded assignment.
"""
xml = generate_replace_result_xml(
assignment.lis_result_sourcedid, score
)
try:
response = sign_and_send_replace_result(assignment, xml)
except RequestException:
# failed to send result. 'response' is None, so more detail will be
# logged at the end of the method.
response = None
log.exception("Outcome Service: Error when sending result.")
# If something went wrong, make sure that we have a complete log record.
# That way we can manually fix things up on the campus system later if
# necessary.
if not (response and check_replace_result_response(response)):
log.error(
"Outcome Service: Failed to update score on LTI consumer. "
"User: %s, course: %s, usage: %s, score: %s, status: %s, body: %s",
assignment.user,
assignment.course_key,
assignment.usage_key,
score,
response,
response.text if response else 'Unknown'
)
def sign_and_send_replace_result(assignment, xml):
"""
Take the XML document generated in generate_replace_result_xml, and sign it
......
......@@ -2,15 +2,19 @@
Asynchronous tasks for the LTI provider app.
"""
from django.conf import settings
from django.contrib.auth.models import User
from django.dispatch import receiver
import logging
from requests.exceptions import RequestException
from courseware.grades import get_weighted_scores
from courseware.models import SCORE_CHANGED
from lms import CELERY_APP
from lti_provider.models import GradedAssignment
import lti_provider.outcomes
import lti_provider.outcomes as outcomes
from lti_provider.views import parse_course_and_usage_keys
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.lti_provider")
......@@ -28,13 +32,18 @@ def score_changed_handler(sender, **kwargs): # pylint: disable=unused-argument
usage_id = kwargs.get('usage_id', None)
if None not in (points_earned, points_possible, user_id, course_id, user_id):
send_outcome.delay(
points_possible,
points_earned,
user_id,
course_id,
usage_id
)
course_key, usage_key = parse_course_and_usage_keys(course_id, usage_id)
assignments = increment_assignment_versions(course_key, usage_key, user_id)
for assignment in assignments:
if assignment.usage_key == usage_key:
send_leaf_outcome.delay(
assignment.id, points_earned, points_possible
)
else:
send_composite_outcome.apply_async(
(user_id, course_id, assignment.id, assignment.version_number),
countdown=settings.LTI_AGGREGATE_SCORE_PASSBACK_DELAY
)
else:
log.error(
"Outcome Service: Required signal parameter is None. "
......@@ -44,55 +53,86 @@ def score_changed_handler(sender, **kwargs): # pylint: disable=unused-argument
)
@CELERY_APP.task
def send_outcome(points_possible, points_earned, user_id, course_id, usage_id):
def increment_assignment_versions(course_key, usage_key, user_id):
"""
Calculate the score for a given user in a problem and send it to the
appropriate LTI consumer's outcome service.
Update the version numbers for all assignments that are affected by a score
change event. Returns a list of all affected assignments.
"""
course_key, usage_key = parse_course_and_usage_keys(course_id, usage_id)
assignments = GradedAssignment.objects.filter(
user=user_id, course_key=course_key, usage_key=usage_key
problem_descriptor = modulestore().get_item(usage_key)
# Get all assignments involving the current problem for which the campus LMS
# is expecting a grade. There may be many possible graded assignments, if
# a problem has been added several times to a course at different
# granularities (such as the unit or the vertical).
assignments = outcomes.get_assignments_for_problem(
problem_descriptor, user_id, course_key
)
# Calculate the user's score, on a scale of 0.0 - 1.0.
score = float(points_earned) / float(points_possible)
# There may be zero or more assignment records. We would expect for there
# to be zero if the user/course/usage combination does not relate to a
# previous graded LTI launch. This can happen if an LTI consumer embeds some
# gradable content in a context that doesn't require a score (maybe by
# including an exercise as a sample that students may complete but don't
# count towards their grade).
# There could be more than one GradedAssignment record if the same content
# is embedded more than once in a single course. This would be a strange
# course design on the consumer's part, but we handle it by sending update
# messages for all launches of the content.
for assignment in assignments:
xml = lti_provider.outcomes.generate_replace_result_xml(
assignment.lis_result_sourcedid, score
assignment.version_number += 1
assignment.save()
return assignments
@CELERY_APP.task
def send_composite_outcome(user_id, course_id, assignment_id, version):
"""
Calculate and transmit the score for a composite module (such as a
vertical).
A composite module may contain multiple problems, so we need to
calculate the total points earned and possible for all child problems. This
requires calculating the scores for the whole course, which is an expensive
operation.
Callers should be aware that the score calculation code accesses the latest
scores from the database. This can lead to a race condition between a view
that updates a user's score and the calculation of the grade. If the Celery
task attempts to read the score from the database before the view exits (and
its transaction is committed), it will see a stale value. Care should be
taken that this task is not triggered until the view exits.
The GradedAssignment model has a version_number field that is incremented
whenever the score is updated. It is used by this method for two purposes.
First, it allows the task to exit if it detects that it has been superseded
by another task that will transmit the score for the same assignment.
Second, it prevents a race condition where two tasks calculate different
scores for a single assignment, and may potentially update the campus LMS
in the wrong order.
"""
assignment = GradedAssignment.objects.get(id=assignment_id)
if version != assignment.version_number:
log.info(
"Score passback for GradedAssignment %s skipped. More recent score available.",
assignment.id
)
try:
response = lti_provider.outcomes.sign_and_send_replace_result(assignment, xml)
except RequestException:
# failed to send result. 'response' is None, so more detail will be
# logged at the end of the method.
response = None
log.exception("Outcome Service: Error when sending result.")
# If something went wrong, make sure that we have a complete log record.
# That way we can manually fix things up on the campus system later if
# necessary.
if not (response and lti_provider.outcomes.check_replace_result_response(response)):
log.error(
"Outcome Service: Failed to update score on LTI consumer. "
"User: %s, course: %s, usage: %s, score: %s, possible: %s "
"status: %s, body: %s",
user_id,
course_key,
usage_key,
points_earned,
points_possible,
response,
response.text if response else 'Unknown'
)
return
course_key = CourseKey.from_string(course_id)
mapped_usage_key = assignment.usage_key.map_into_course(course_key)
user = User.objects.get(id=user_id)
course = modulestore().get_course(course_key, depth=0)
progress_summary = get_weighted_scores(user, course)
earned, possible = progress_summary.score_for_module(mapped_usage_key)
if possible == 0:
weighted_score = 0
else:
weighted_score = float(earned) / float(possible)
assignment = GradedAssignment.objects.get(id=assignment_id)
if assignment.version_number == version:
outcomes.send_score_update(assignment, weighted_score)
@CELERY_APP.task
def send_leaf_outcome(assignment_id, points_earned, points_possible):
"""
Calculate and transmit the score for a single problem. This method assumes
that the individual problem was the source of a score update, and so it
directly takes the points earned and possible values. As such it does not
have to calculate the scores for the course, making this method far faster
than send_outcome_for_composite_assignment.
"""
assignment = GradedAssignment.objects.get(id=assignment_id)
if points_possible == 0:
weighted_score = 0
else:
weighted_score = float(points_earned) / float(points_possible)
outcomes.send_score_update(assignment, weighted_score)
......@@ -9,8 +9,19 @@ from student.tests.factories import UserFactory
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
import lti_provider.outcomes as outcomes
import lti_provider.tasks as tasks
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory, check_mongo_calls
def create_score(earned, possible):
"""
Create a new mock Score object with specified earned and possible values
"""
score = MagicMock()
score.possible = possible
score.earned = earned
return score
class StoreOutcomeParametersTest(TestCase):
......@@ -177,81 +188,6 @@ class SignAndSendReplaceResultTest(TestCase):
self.assertEqual(response, 'response')
class SendOutcomeTest(TestCase):
"""
Tests for the send_outcome method in tasks.py
"""
def setUp(self):
super(SendOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.points_possible = 10
self.points_earned = 3
self.generate_xml_mock = self.setup_patch(
'lti_provider.outcomes.generate_replace_result_xml',
'replace result XML'
)
self.replace_result_mock = self.setup_patch(
'lti_provider.outcomes.sign_and_send_replace_result',
'replace result response'
)
self.check_result_mock = self.setup_patch(
'lti_provider.outcomes.check_replace_result_response',
True
)
consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
)
self.assignment.save()
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
def test_send_outcome(self):
tasks.send_outcome(
self.points_possible,
self.points_earned,
self.user.id,
unicode(self.course_key),
unicode(self.usage_key)
)
self.generate_xml_mock.assert_called_once_with('sourcedid', 0.3)
self.replace_result_mock.assert_called_once_with(self.assignment, 'replace result XML')
class XmlHandlingTest(TestCase):
"""
Tests for the generate_replace_result_xml and check_replace_result_response
......@@ -363,3 +299,125 @@ class XmlHandlingTest(TestCase):
major_code='<imsx_codeMajor>failure</imsx_codeMajor>'
)
self.assertFalse(outcomes.check_replace_result_response(response))
class TestAssignmentsForProblem(ModuleStoreTestCase):
"""
Test cases for the assignments_for_problem method in outcomes.py
"""
def setUp(self):
super(TestAssignmentsForProblem, self).setUp()
self.user = UserFactory.create()
self.user_id = self.user.id
self.outcome_service = self.create_outcome_service('outcomes')
self.course = CourseFactory.create()
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.chapter = ItemFactory.create(parent=self.course, category="chapter")
self.vertical = ItemFactory.create(parent=self.chapter, category="vertical")
self.unit = ItemFactory.create(parent=self.vertical, category="unit")
def create_outcome_service(self, id_suffix):
"""
Create and save a new OutcomeService model in the test database. The
OutcomeService model requires an LtiConsumer model, so we create one of
those as well. The method takes an ID string that is used to ensure that
unique fields do not conflict.
"""
lti_consumer = LtiConsumer(
consumer_name='lti_consumer_name' + id_suffix,
consumer_key='lti_consumer_key' + id_suffix,
consumer_secret='lti_consumer_secret' + id_suffix,
instance_guid='lti_instance_guid' + id_suffix
)
lti_consumer.save()
outcome_service = OutcomeService(
lis_outcome_service_url='https://example.com/outcomes/' + id_suffix,
lti_consumer=lti_consumer
)
outcome_service.save()
return outcome_service
def create_graded_assignment(self, desc, result_id, outcome_service):
"""
Create and save a new GradedAssignment model in the test database.
"""
assignment = GradedAssignment(
user=self.user,
course_key=self.course.id,
usage_key=desc.location,
outcome_service=outcome_service,
lis_result_sourcedid=result_id,
version_number=0
)
assignment.save()
return assignment
def test_with_no_graded_assignments(self):
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 0)
def test_with_graded_unit(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 1)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
def test_with_graded_vertical(self):
self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 1)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_vertical')
def test_with_graded_unit_and_vertical(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_vertical')
def test_with_unit_used_twice(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.unit, 'graded_unit2', self.outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2')
def test_with_unit_graded_for_different_user(self):
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
other_user = UserFactory.create()
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, other_user.id, self.course.id
)
self.assertEqual(len(assignments), 0)
def test_with_unit_graded_for_multiple_consumers(self):
other_outcome_service = self.create_outcome_service('second_consumer')
self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service)
self.create_graded_assignment(self.unit, 'graded_unit2', other_outcome_service)
with check_mongo_calls(3):
assignments = outcomes.get_assignments_for_problem(
self.unit, self.user_id, self.course.id
)
self.assertEqual(len(assignments), 2)
self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit')
self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2')
self.assertEqual(assignments[0].outcome_service, self.outcome_service)
self.assertEqual(assignments[1].outcome_service, other_outcome_service)
"""
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import ddt
from django.test import TestCase
from mock import patch, MagicMock
from student.tests.factories import UserFactory
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
import lti_provider.tasks as tasks
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super(BaseOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id, # pylint: disable=no-member
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
def setUp(self):
super(SendCompositeOutcomeTest, self).setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.weighted_scores = MagicMock()
self.weighted_scores_mock = self.setup_patch(
'lti_provider.tasks.get_weighted_scores', self.weighted_scores
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member
)
self.assertEqual(self.weighted_scores_mock.call_count, 0)
......@@ -2632,6 +2632,18 @@ CREDIT_HELP_LINK_URL = "#"
# route any messages intended for LTI users to a common domain.
LTI_USER_EMAIL_DOMAIN = 'lti.example.com'
# An aggregate score is one derived from multiple problems (such as the
# cumulative score for a vertical element containing many problems). Sending
# aggregate scores immediately introduces two issues: one is a race condition
# between the view method and the Celery task where the updated score may not
# yet be visible to the database if the view has not yet returned (and committed
# its transaction). The other is that the student is likely to receive a stream
# of notifications as the score is updated with every problem. Waiting a
# reasonable period of time allows the view transaction to end, and allows us to
# collapse multiple score updates into a single message.
# The time value is in seconds.
LTI_AGGREGATE_SCORE_PASSBACK_DELAY = 15 * 60
# Number of seconds before JWT tokens expire
JWT_EXPIRATION = 30
JWT_ISSUER = None
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment