Commit bc551b9b by Nimisha Asthagiri Committed by GitHub

Merge pull request #14001 from edx/beryl/get-max-score-opt

Optimize max_score by moving from XModule to Descriptor
parents 0f7f83ac 257fcc03
......@@ -129,7 +129,7 @@ class LoncapaProblem(object):
Main class for capa Problems.
"""
def __init__(self, problem_text, id, capa_system, capa_module, # pylint: disable=redefined-builtin
state=None, seed=None):
state=None, seed=None, minimal_init=False):
"""
Initializes capa Problem.
......@@ -186,7 +186,10 @@ class LoncapaProblem(object):
self._process_includes()
# construct script processor context (eg for customresponse problems)
self.context = self._extract_context(self.tree)
if minimal_init:
self.context = {'script_code': ""}
else:
self.context = self._extract_context(self.tree)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response
......@@ -209,7 +212,8 @@ class LoncapaProblem(object):
if hasattr(response, 'late_transforms'):
response.late_transforms(self)
self.extracted_tree = self._extract_html(self.tree)
if not minimal_init:
self.extracted_tree = self._extract_html(self.tree)
def make_xml_compatible(self, tree):
"""
......
......@@ -357,12 +357,6 @@ class CapaMixin(CapaFields):
"""
return self.lcp.get_score()
def max_score(self):
"""
Access the problem's max score
"""
return self.lcp.get_max_score()
def get_progress(self):
"""
For now, just return score / max_score
......
......@@ -275,6 +275,38 @@ class CapaDescriptor(CapaFields, RawDescriptor):
)
return False
def max_score(self):
"""
Return the problem's max score
"""
from capa.capa_problem import LoncapaProblem, LoncapaSystem
capa_system = LoncapaSystem(
ajax_url=None,
anonymous_student_id=None,
cache=None,
can_execute_unsafe_code=None,
get_python_lib_zip=None,
DEBUG=None,
filestore=self.runtime.resources_fs,
i18n=self.runtime.service(self, "i18n"),
node_path=None,
render_template=None,
seed=None,
STATIC_URL=None,
xqueue=None,
matlab_api_key=None,
)
lcp = LoncapaProblem(
problem_text=self.data,
id=self.location.html_id(),
capa_system=capa_system,
capa_module=self,
state={},
seed=1,
minimal_init=True,
)
return lcp.get_max_score()
# Proxy to CapaModule for access to any of its attributes
answer_available = module_attr('answer_available')
submit_button_name = module_attr('submit_button_name')
......
......@@ -604,7 +604,7 @@
case 'incorrect':
case 'correct':
window.SR.readTexts(that.get_sr_status(response.contents));
that.el.trigger('contentChanged', [that.id, response.contents]);
that.el.trigger('contentChanged', [that.id, response.contents, response]);
that.render(response.contents, that.focus_on_submit_notification);
that.updateProgress(response);
break;
......@@ -662,7 +662,7 @@
id: this.id
}, function(response) {
if (response.success) {
that.el.trigger('contentChanged', [that.id, response.html]);
that.el.trigger('contentChanged', [that.id, response.html, response]);
that.render(response.html, that.scroll_to_problem_meta);
that.updateProgress(response);
return window.SR.readText(gettext('This problem has been reset.'));
......@@ -773,7 +773,7 @@
var saveMessage;
saveMessage = response.msg;
if (response.success) {
that.el.trigger('contentChanged', [that.id, response.html]);
that.el.trigger('contentChanged', [that.id, response.html, response]);
edx.HtmlUtils.setHtml(
that.el.find('.notification-save .notification-message'),
edx.HtmlUtils.HTML(saveMessage)
......
......@@ -45,23 +45,24 @@ class @Sequence
hookUpContentStateChangeEvent: ->
$('.problems-wrapper').bind(
'contentChanged',
(event, problem_id, new_content_state) =>
@addToUpdatedProblems problem_id, new_content_state
(event, problem_id, new_content_state, new_state) =>
@addToUpdatedProblems problem_id, new_content_state, new_state
)
addToUpdatedProblems: (problem_id, new_content_state) =>
addToUpdatedProblems: (problem_id, new_content_state, new_state) =>
# Used to keep updated problem's state temporarily.
# params:
# 'problem_id' is problem id.
# 'new_content_state' is updated problem's state.
# 'new_content_state' is the updated content of the problem.
# 'new_state' is the updated state of the problem.
# initialize for the current sequence if there isn't any updated problem
# for this position.
if not @anyUpdatedProblems @position
@updatedProblems[@position] = {}
# Now, put problem content against problem id for current active sequence.
@updatedProblems[@position][problem_id] = new_content_state
# Now, put problem content and score against problem id for current active sequence.
@updatedProblems[@position][problem_id] = [new_content_state, new_state]
anyUpdatedProblems:(position) ->
# check for the updated problems for given sequence position.
......@@ -161,10 +162,15 @@ class @Sequence
# update the data-attributes with latest contents only for updated problems.
if @anyUpdatedProblems new_position
$.each @updatedProblems[new_position], (problem_id, latest_content) =>
$.each @updatedProblems[new_position], (problem_id, latest_data) =>
latest_content = latest_data[0]
latest_response = latest_data[1]
@content_container
.find("[data-problem-id='#{ problem_id }']")
.data('content', latest_content)
.data('problem-score', latest_response.current_score)
.data('problem-total-possible', latest_response.total_possible)
.data('attempts-used', latest_response.attempts_used)
XBlock.initializeBlocks(@content_container, @requestToken)
......
......@@ -649,9 +649,6 @@ oauth_consumer_key="", oauth_signature="frVp4JuvT1mVXlxktiAUjQ7%2F1cw%3D"'}
params.update(body)
return params
def max_score(self):
return self.weight if self.has_score else None
@XBlock.handler
def grade_handler(self, request, suffix): # pylint: disable=unused-argument
"""
......@@ -898,6 +895,10 @@ class LTIDescriptor(LTIFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescri
"""
Descriptor for LTI Xmodule.
"""
def max_score(self):
return self.weight if self.has_score else None
module_class = LTIModule
resources_dir = None
grade_handler = module_attr('grade_handler')
......
......@@ -72,6 +72,7 @@ class LTIModuleTest(LogicTest):
self.xmodule.due = None
self.xmodule.graceperiod = None
self.xmodule.descriptor = self.system.construct_xblock_from_class(self.descriptor_class, self.xmodule.scope_ids)
def get_request_body(self, params=None):
"""Fetches the body of a request specified by params"""
......
......@@ -806,6 +806,7 @@ class XModule(HTMLSnippet, XModuleMixin):
entry_point = "xmodule.v1"
has_score = descriptor_attr('has_score')
max_score = descriptor_attr('max_score')
show_in_read_only_mode = descriptor_attr('show_in_read_only_mode')
_field_data_cache = descriptor_attr('_field_data_cache')
_field_data = descriptor_attr('_field_data')
......@@ -1201,7 +1202,6 @@ class XModuleDescriptor(HTMLSnippet, ResourceTemplates, XModuleMixin):
get_progress = module_attr('get_progress')
get_score = module_attr('get_score')
handle_ajax = module_attr('handle_ajax')
max_score = module_attr('max_score')
student_view = module_attr(STUDENT_VIEW)
get_child_descriptors = module_attr('get_child_descriptors')
xmodule_handler = module_attr('xmodule_handler')
......
......@@ -124,13 +124,14 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum
Handles the PROBLEM_SCORE_CHANGED signal by enqueueing a subsection update operation to occur asynchronously.
"""
recalculate_subsection_grade.apply_async(
args=(
kwargs['user_id'],
kwargs['course_id'],
kwargs['usage_id'],
kwargs.get('only_if_higher'),
kwargs.get('points_earned'),
kwargs.get('points_possible'),
kwargs=dict(
user_id=kwargs['user_id'],
course_id=kwargs['course_id'],
usage_id=kwargs['usage_id'],
only_if_higher=kwargs.get('only_if_higher'),
raw_earned=kwargs.get('points_earned'),
raw_possible=kwargs.get('points_possible'),
score_deleted=kwargs.get('score_deleted', False),
)
)
......
......@@ -23,50 +23,40 @@ log = getLogger(__name__)
@task(default_retry_delay=30, routing_key=settings.RECALCULATE_GRADES_ROUTING_KEY)
def recalculate_subsection_grade(user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible):
def recalculate_subsection_grade(user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible, **kwargs):
"""
Updates a saved subsection grade.
This method expects the following parameters:
- user_id: serialized id of applicable User object
- course_id: Unicode string identifying the course
- usage_id: Unicode string identifying the course block
- only_if_higher: boolean indicating whether grades should
be updated only if the new raw_earned is higher than the previous
value.
- raw_earned: the raw points the learner earned on the problem that
triggered the update
- raw_possible: the max raw points the leaner could have earned
on the problem
Arguments:
user_id (int): id of applicable User object
course_id (string): identifying the course
usage_id (string): identifying the course block
only_if_higher (boolean): indicating whether grades should
be updated only if the new raw_earned is higher than the
previous value.
raw_earned (float): the raw points the learner earned on the
problem that triggered the update.
raw_possible (float): the max raw points the leaner could have
earned on the problem.
score_deleted (boolean): indicating whether the grade change is
a result of the problem's score being deleted.
"""
course_key = CourseLocator.from_string(course_id)
if not PersistentGradesEnabledFlag.feature_enabled(course_key):
return
score_deleted = kwargs['score_deleted']
scored_block_usage_key = UsageKey.from_string(usage_id).replace(course_key=course_key)
score = get_score(user_id, scored_block_usage_key)
# If the score is None, it has not been saved at all yet
# and we need to retry until it has been saved.
if score is None:
# Verify the database has been updated with the scores when the task was
# created. This race condition occurs if the transaction in the task
# creator's process hasn't committed before the task initiates in the worker
# process.
if not _has_database_updated_with_new_score(
user_id, scored_block_usage_key, raw_earned, raw_possible, score_deleted,
):
raise _retry_recalculate_subsection_grade(
user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible,
)
else:
module_raw_earned, module_raw_possible = score # pylint: disable=unpacking-non-sequence
# Validate that the retrieved scores match the scores when the task was created.
# This race condition occurs if the transaction in the task creator's process hasn't
# committed before the task initiates in the worker process.
grades_match = module_raw_earned == raw_earned and module_raw_possible == raw_possible
# We have to account for the situation where a student's state
# has been deleted- in this case, get_score returns (None, None), but
# the score change signal will contain 0 for raw_earned.
state_deleted = module_raw_earned is None and module_raw_possible is None and raw_earned == 0
if not (state_deleted or grades_match):
raise _retry_recalculate_subsection_grade(
user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible,
user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible, score_deleted,
)
_update_subsection_grades(
......@@ -78,6 +68,28 @@ def recalculate_subsection_grade(user_id, course_id, usage_id, only_if_higher, r
usage_id,
raw_earned,
raw_possible,
score_deleted,
)
def _has_database_updated_with_new_score(
user_id, scored_block_usage_key, expected_raw_earned, expected_raw_possible, score_deleted,
):
"""
Returns whether the database has been updated with the
expected new score values for the given problem and user.
"""
score = get_score(user_id, scored_block_usage_key)
if score is None:
# score should be None only if it was deleted.
# Otherwise, it hasn't yet been saved.
return score_deleted
found_raw_earned, found_raw_possible = score # pylint: disable=unpacking-non-sequence
return (
found_raw_earned == expected_raw_earned and
found_raw_possible == expected_raw_possible
)
......@@ -90,6 +102,7 @@ def _update_subsection_grades(
usage_id,
raw_earned,
raw_possible,
score_deleted,
):
"""
A helper function to update subsection grades in the database
......@@ -124,23 +137,26 @@ def _update_subsection_grades(
except DatabaseError as exc:
raise _retry_recalculate_subsection_grade(
user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible, exc,
user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible, score_deleted, exc,
)
def _retry_recalculate_subsection_grade(user_id, course_id, usage_id, only_if_higher, grade, max_grade, exc=None):
def _retry_recalculate_subsection_grade(
user_id, course_id, usage_id, only_if_higher, raw_earned, raw_possible, score_deleted, exc=None,
):
"""
Calls retry for the recalculate_subsection_grade task with the
given inputs.
"""
recalculate_subsection_grade.retry(
args=[
user_id,
course_id,
usage_id,
only_if_higher,
grade,
max_grade,
],
exc=exc
kwargs=dict(
user_id=user_id,
course_id=course_id,
usage_id=usage_id,
only_if_higher=only_if_higher,
raw_earned=raw_earned,
raw_possible=raw_possible,
score_deleted=score_deleted,
),
exc=exc,
)
......@@ -8,19 +8,16 @@ import ddt
from django.conf import settings
from django.db.utils import IntegrityError
from mock import patch
from uuid import uuid4
from unittest import skip
from opaque_keys.edx.locator import CourseLocator
from student.models import anonymous_id_for_user
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag
from lms.djangoapps.grades.signals.signals import PROBLEM_SCORE_CHANGED, SUBSECTION_SCORE_CHANGED
from lms.djangoapps.grades.signals.signals import PROBLEM_SCORE_CHANGED
from lms.djangoapps.grades.tasks import recalculate_subsection_grade
......@@ -66,8 +63,9 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
('course_id', unicode(self.course.id)),
('usage_id', unicode(self.problem.location)),
('only_if_higher', None),
('grade', 1.0),
('max_grade', 2.0),
('raw_earned', 1.0),
('raw_possible', 2.0),
('score_deleted', False),
])
# this call caches the anonymous id on the user object, saving 4 queries in all happy path tests
......@@ -83,30 +81,18 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
with patch("lms.djangoapps.grades.tasks.get_score", return_value=score):
yield
@ddt.data(
('lms.djangoapps.grades.tasks.recalculate_subsection_grade.apply_async', PROBLEM_SCORE_CHANGED),
)
@ddt.unpack
def test_signal_queues_task(self, enqueue_op, test_signal):
def test_problem_score_changed_queues_task(self):
"""
Ensures that the PROBLEM_SCORE_CHANGED and SUBSECTION_SCORE_CHANGED signals enqueue the correct tasks.
Ensures that the PROBLEM_SCORE_CHANGED signal enqueues the correct task.
"""
self.set_up_course()
if test_signal == PROBLEM_SCORE_CHANGED:
send_args = self.problem_score_changed_kwargs
expected_args = tuple(self.recalculate_subsection_grade_kwargs.values())
else:
send_args = {'user': self.user, 'course': self.course}
expected_args = (
self.problem_score_changed_kwargs['user_id'],
self.problem_score_changed_kwargs['course_id']
)
send_args = self.problem_score_changed_kwargs
with self.mock_get_score() and patch(
enqueue_op,
'lms.djangoapps.grades.tasks.recalculate_subsection_grade.apply_async',
return_value=None
) as mock_task_apply:
test_signal.send(sender=None, **send_args)
mock_task_apply.assert_called_once_with(args=expected_args)
PROBLEM_SCORE_CHANGED.send(sender=None, **send_args)
mock_task_apply.assert_called_once_with(kwargs=self.recalculate_subsection_grade_kwargs)
@patch('lms.djangoapps.grades.signals.signals.SUBSECTION_SCORE_CHANGED.send')
def test_subsection_update_triggers_course_update(self, mock_course_signal):
......@@ -174,7 +160,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
with self.store.default_store(default_store):
self.set_up_course()
with check_mongo_calls(0) and self.assertNumQueries(3 if feature_flag else 2):
recalculate_subsection_grade.apply(args=tuple(self.recalculate_subsection_grade_kwargs.values()))
recalculate_subsection_grade.apply(kwargs=self.recalculate_subsection_grade_kwargs)
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade.retry')
@patch('lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory.update')
......@@ -185,26 +171,43 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
self.set_up_course()
mock_update.side_effect = IntegrityError("WHAMMY")
self._apply_recalculate_subsection_grade()
self.assertTrue(mock_retry.called)
self._assert_retry_called(mock_retry)
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade.retry')
def test_retry_subsection_grade_on_update_not_complete(self, mock_retry):
self.set_up_course()
with self.mock_get_score(score=(0.5, 3.0)):
recalculate_subsection_grade.apply(args=tuple(self.recalculate_subsection_grade_kwargs.values()))
self.assertTrue(mock_retry.called)
self._apply_recalculate_subsection_grade(mock_score=(0.5, 3.0))
self._assert_retry_called(mock_retry)
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade.retry')
def test_retry_subsection_grade_on_no_score(self, mock_retry):
self.set_up_course()
with self.mock_get_score(score=None):
recalculate_subsection_grade.apply(args=tuple(self.recalculate_subsection_grade_kwargs.values()))
self.assertTrue(mock_retry.called)
self._apply_recalculate_subsection_grade(mock_score=None)
self._assert_retry_called(mock_retry)
def _apply_recalculate_subsection_grade(self):
@patch('lms.djangoapps.grades.signals.signals.SUBSECTION_SCORE_CHANGED.send')
@patch('lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory.update')
def test_retry_first_time_only(self, mock_update, mock_course_signal):
"""
Ensures that a task retry completes after a one-time failure.
"""
self.set_up_course()
mock_update.side_effect = [IntegrityError("WHAMMY"), None]
self._apply_recalculate_subsection_grade()
self.assertEquals(mock_course_signal.call_count, 1)
def _apply_recalculate_subsection_grade(self, mock_score=(1.0, 2.0)):
"""
Calls the recalculate_subsection_grade task with necessary
mocking in place.
"""
with self.mock_get_score():
recalculate_subsection_grade.apply(args=tuple(self.recalculate_subsection_grade_kwargs.values()))
with self.mock_get_score(mock_score):
recalculate_subsection_grade.apply(kwargs=self.recalculate_subsection_grade_kwargs)
def _assert_retry_called(self, mock_retry):
"""
Verifies the task was retried and with the correct
number of arguments.
"""
self.assertTrue(mock_retry.called)
self.assertEquals(len(mock_retry.call_args[1]['kwargs']), len(self.recalculate_subsection_grade_kwargs))
......@@ -118,8 +118,10 @@ class GradesTransformer(BlockStructureTransformer):
"""
Collect the `max_score` for every block in the provided `block_structure`.
"""
for module in cls._iter_scorable_xmodules(block_structure):
cls._collect_max_score(block_structure, module)
for block_locator in block_structure.post_order_traversal():
block = block_structure.get_xblock(block_locator)
if getattr(block, 'has_score', False):
cls._collect_max_score(block_structure, block)
@classmethod
def _collect_max_score(cls, block_structure, module):
......@@ -171,20 +173,7 @@ class GradesTransformer(BlockStructureTransformer):
XModule, even though the data is not user specific. Here we bind the
data to a SystemUser.
"""
request = RequestFactory().get('/dummy-collect-max-grades')
user = SystemUser()
request.user = user
request.session = {}
root_block = block_structure.get_xblock(block_structure.root_block_usage_key)
course_key = block_structure.root_block_usage_key.course_key
cache = FieldDataCache.cache_for_descriptor_descendents(
course_id=course_key,
user=request.user,
descriptor=root_block,
descriptor_filter=lambda descriptor: descriptor.has_score,
)
for block_locator in block_structure.post_order_traversal():
block = block_structure.get_xblock(block_locator)
if getattr(block, 'has_score', False):
module = courseware.module_render.get_module_for_descriptor(user, request, block, cache, course_key)
yield module
yield block
......@@ -4,7 +4,6 @@ Enrollment operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
"""
import crum
import json
import logging
from django.contrib.auth.models import User
......@@ -15,8 +14,6 @@ from django.utils.translation import override as override_language
from course_modes.models import CourseMode
from courseware.models import StudentModule
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_string
from lms.djangoapps.grades.scores import weighted_score
from lms.djangoapps.grades.signals.signals import PROBLEM_SCORE_CHANGED
......@@ -298,37 +295,22 @@ def _fire_score_changed_for_block(course_id, student, block, module_state_key):
noted below.
"""
if block and block.has_score:
cache = FieldDataCache.cache_for_descriptor_descendents(
course_id=course_id,
user=student,
descriptor=block,
depth=0
)
# For implementation reasons, we need to pull the max_score from the XModule,
# even though the data is not user-specific. Here we bind the data to the
# current user.
request = crum.get_current_request()
module = get_module_for_descriptor(
user=student,
request=request,
descriptor=block,
field_data_cache=cache,
course_key=course_id
)
max_score = module.max_score()
max_score = block.max_score()
if max_score is None:
return
else:
points_earned, points_possible = weighted_score(0, max_score, getattr(module, 'weight', None))
points_earned, points_possible = weighted_score(0, max_score, getattr(block, 'weight', None))
else:
points_earned, points_possible = 0, 0
PROBLEM_SCORE_CHANGED.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
user_id=student.id,
course_id=unicode(course_id),
usage_id=unicode(module_state_key)
usage_id=unicode(module_state_key),
score_deleted=True,
)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment