Commit aa000c1a by Nimisha Asthagiri

Support for rescoring a problem only if the new score is higher

TNL-5046
parent 0e3e1b47
......@@ -1048,7 +1048,7 @@ class CapaMixin(CapaFields):
return answers
def publish_grade(self):
def publish_grade(self, only_if_higher=None):
"""
Publishes the student's current grade to the system as an event
"""
......@@ -1059,6 +1059,7 @@ class CapaMixin(CapaFields):
{
'value': score['score'],
'max_value': score['total'],
'only_if_higher': only_if_higher,
}
)
......@@ -1370,13 +1371,16 @@ class CapaMixin(CapaFields):
return input_metadata
def rescore_problem(self):
def rescore_problem(self, only_if_higher):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
If only_if_higher is True, the answer and grade are updated
only if the resulting score is higher than before.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
......@@ -1428,7 +1432,7 @@ class CapaMixin(CapaFields):
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
self.publish_grade(only_if_higher)
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
......
......@@ -487,15 +487,14 @@ class CapaModuleTest(unittest.TestCase):
# Simulate that all answers are marked correct, no matter
# what the input is, by patching CorrectMap.is_correct()
# Also simulate rendering the HTML
# TODO: pep8 thinks the following line has invalid syntax
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct, \
patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_is_correct.return_value = True
mock_html.return_value = "Test HTML"
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct:
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_is_correct.return_value = True
mock_html.return_value = "Test HTML"
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.submit_problem(get_request_dict)
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.submit_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
......@@ -861,7 +860,7 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
result = module.rescore_problem(only_if_higher=False)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
......@@ -881,7 +880,7 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
result = module.rescore_problem(only_if_higher=False)
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
......@@ -895,7 +894,7 @@ class CapaModuleTest(unittest.TestCase):
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
module.rescore_problem(only_if_higher=False)
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
......@@ -904,7 +903,7 @@ class CapaModuleTest(unittest.TestCase):
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
module.rescore_problem(only_if_higher=False)
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
......@@ -914,7 +913,7 @@ class CapaModuleTest(unittest.TestCase):
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
result = module.rescore_problem(only_if_higher=False)
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
......@@ -1656,7 +1655,7 @@ class CapaModuleTest(unittest.TestCase):
module.submit_problem(get_request_dict)
# On rescore, state/student_answers should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.rescore_problem()
module.rescore_problem(only_if_higher=False)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'problem_rescore')
......
......@@ -107,6 +107,15 @@ class StaffDebugPage(PageObject):
self.q(css='input[id^=sd_fu_]').first.fill(user)
self.q(css='.staff-modal .staff-debug-rescore').click()
def rescore_if_higher(self, user=None):
"""
This clicks on the reset attempts link with an optionally
specified user.
"""
if user:
self.q(css='input[id^=sd_fu_]').first.fill(user)
self.q(css='.staff-modal .staff-debug-rescore-if-higher').click()
@property
def idash_msg(self):
"""
......
......@@ -169,6 +169,13 @@ class ContainerPage(PageObject, HelpMixin):
"""
return self.q(css='.action-publish').first
def publish(self):
"""
Publishes the container.
"""
self.publish_action.click()
self.wait_for_ajax()
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
......
......@@ -353,17 +353,29 @@ def is_404_page(browser):
return 'Page not found (404)' in browser.find_element_by_tag_name('h1').text
def create_multiple_choice_problem(problem_name):
def create_multiple_choice_xml(correct_choice=2, num_choices=4):
"""
Return the Multiple Choice Problem Descriptor, given the name of the problem.
Return the Multiple Choice Problem XML, given the name of the problem.
"""
factory = MultipleChoiceResponseXMLFactory()
xml_data = factory.build_xml(
question_text='The correct answer is Choice 2',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
# all choices are incorrect except for correct_choice
choices = [False for _ in range(num_choices)]
choices[correct_choice] = True
choice_names = ['choice_{}'.format(index) for index in range(num_choices)]
question_text = 'The correct answer is Choice {}'.format(correct_choice)
return MultipleChoiceResponseXMLFactory().build_xml(
question_text=question_text,
choices=choices,
choice_names=choice_names,
)
def create_multiple_choice_problem(problem_name):
"""
Return the Multiple Choice Problem Descriptor, given the name of the problem.
"""
xml_data = create_multiple_choice_xml()
return XBlockFixtureDesc(
'problem',
problem_name,
......
......@@ -116,8 +116,9 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.USERNAME), msg)
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_delete_state_empty(self):
"""
......@@ -126,8 +127,9 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state '
'for user {}'.format(self.USERNAME), msg)
self.assertEqual(
u'Successfully deleted student state for user {}'.format(self.USERNAME), msg,
)
def test_reset_attempts_state(self):
"""
......@@ -139,10 +141,11 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.USERNAME), msg)
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_rescore_state(self):
def test_rescore_problem(self):
"""
Rescore the student
"""
......@@ -152,7 +155,19 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user STAFF_TESTER', msg)
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_rescore_problem_if_higher(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore_if_higher()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem to improve score for user {}'.format(self.USERNAME), msg)
def test_student_state_delete(self):
"""
......@@ -164,8 +179,7 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state '
'for user {}'.format(self.USERNAME), msg)
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
def test_student_by_email(self):
"""
......@@ -177,8 +191,7 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts(self.EMAIL)
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.EMAIL), msg)
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.EMAIL), msg)
def test_bad_student(self):
"""
......@@ -189,8 +202,7 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state('INVALIDUSER')
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Failed to delete student state. '
'User does not exist.', msg)
self.assertEqual(u'Failed to delete student state for user. User does not exist.', msg)
def test_reset_attempts_for_problem_loaded_via_ajax(self):
"""
......@@ -203,8 +215,7 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.USERNAME), msg)
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg)
def test_rescore_state_for_problem_loaded_via_ajax(self):
"""
......@@ -217,7 +228,7 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user STAFF_TESTER', msg)
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_student_state_delete_for_problem_loaded_via_ajax(self):
"""
......@@ -230,8 +241,7 @@ class StaffDebugTest(CourseWithoutContentGroupsTest):
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state '
'for user {}'.format(self.USERNAME), msg)
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
class CourseWithContentGroupsTest(StaffViewTest):
......
......@@ -172,8 +172,9 @@ class MilestonesTransformerTestCase(CourseStructureTestCase, MilestonesTestCaseM
# block passed in, so we pass in a child of the gating block
lms_gating_api.evaluate_prerequisite(
self.course,
UsageKey.from_string(unicode(self.blocks[gating_block_child].location)),
self.user.id)
self.blocks[gating_block_child],
self.user.id,
)
with self.assertNumQueries(2):
self.get_blocks_and_check_against_expected(self.user, self.ALL_BLOCKS_EXCEPT_SPECIAL)
......
......@@ -16,7 +16,6 @@ from edxmako.shortcuts import render_to_string
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from static_replace import replace_static_urls
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import STUDENT_VIEW
from courseware.access import has_access
......
......@@ -1007,3 +1007,20 @@ def set_score(user_id, usage_key, score, max_score):
student_module.grade = score
student_module.max_grade = max_score
student_module.save()
def get_score(user_id, usage_key):
"""
Get the score and max_score for the specified user and xblock usage.
Returns None if not found.
"""
try:
student_module = StudentModule.objects.get(
student_id=user_id,
module_state_key=usage_key,
course_id=usage_key.course_key,
)
except StudentModule.DoesNotExist:
return None
else:
return student_module.grade, student_module.max_grade
......@@ -8,7 +8,6 @@ import logging
from collections import OrderedDict
from functools import partial
import dogstats_wrapper as dog_stats_api
import newrelic.agent
from capa.xqueue_interface import XQueueInterface
from django.conf import settings
......@@ -18,7 +17,6 @@ from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.test.client import RequestFactory
from django.views.decorators.csrf import csrf_exempt
from edx_proctoring.services import ProctoringService
from eventtracking import tracker
......@@ -34,7 +32,6 @@ from xblock.reference.plugins import FSService
import static_replace
from courseware.access import has_access, get_user_role
from courseware.entrance_exams import (
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
......@@ -44,14 +41,14 @@ from courseware.masquerade import (
is_masquerading_as_specific_student,
setup_masquerade,
)
from courseware.model_data import DjangoKeyValueStore, FieldDataCache, set_score
from lms.djangoapps.grades.signals.signals import SCORE_CHANGED
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from edxmako.shortcuts import render_to_string
from lms.djangoapps.grades.signals.signals import SCORE_PUBLISHED
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from openedx.core.djangoapps.bookmarks.services import BookmarksService
from lms.djangoapps.lms_xblock.runtime import LmsModuleSystem
from lms.djangoapps.verify_student.services import VerificationService, ReverificationService
from openedx.core.djangoapps.bookmarks.services import BookmarksService
from openedx.core.djangoapps.credit.services import CreditService
from openedx.core.djangoapps.util.user_utils import SystemUser
from openedx.core.lib.xblock_utils import (
......@@ -466,89 +463,17 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to
course=course
)
def _fulfill_content_milestones(user, course_key, content_key):
"""
Internal helper to handle milestone fulfillments for the specified content module
"""
# Fulfillment Use Case: Entrance Exam
# If this module is part of an entrance exam, we'll need to see if the student
# has reached the point at which they can collect the associated milestone
if milestones_helpers.is_entrance_exams_enabled():
course = modulestore().get_course(course_key)
content = modulestore().get_item(content_key)
entrance_exam_enabled = getattr(course, 'entrance_exam_enabled', False)
in_entrance_exam = getattr(content, 'in_entrance_exam', False)
if entrance_exam_enabled and in_entrance_exam:
# We don't have access to the true request object in this context, but we can use a mock
request = RequestFactory().request()
request.user = user
exam_pct = get_entrance_exam_score(request, course)
if exam_pct >= course.entrance_exam_minimum_score_pct:
exam_key = UsageKey.from_string(course.entrance_exam_id)
relationship_types = milestones_helpers.get_milestone_relationship_types()
content_milestones = milestones_helpers.get_course_content_milestones(
course_key,
exam_key,
relationship=relationship_types['FULFILLS']
)
# Add each milestone to the user's set...
user = {'id': request.user.id}
for milestone in content_milestones:
milestones_helpers.add_user_milestone(user, milestone)
def handle_grade_event(block, event_type, event): # pylint: disable=unused-argument
"""
Manages the workflow for recording and updating of student module grade state
"""
user_id = user.id
grade = event.get('value')
max_grade = event.get('max_value')
set_score(
user_id,
descriptor.location,
grade,
max_grade,
)
# Bin score into range and increment stats
score_bucket = get_score_bucket(grade, max_grade)
tags = [
u"org:{}".format(course_id.org),
u"course:{}".format(course_id),
u"score_bucket:{0}".format(score_bucket)
]
if grade_bucket_type is not None:
tags.append('type:%s' % grade_bucket_type)
dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
# Cycle through the milestone fulfillment scenarios to see if any are now applicable
# thanks to the updated grading information that was just submitted
_fulfill_content_milestones(
user,
course_id,
descriptor.location,
)
# Send a signal out to any listeners who are waiting for score change
# events.
SCORE_CHANGED.send(
sender=None,
points_possible=event['max_value'],
points_earned=event['value'],
user_id=user.id,
course_id=unicode(course_id),
usage_id=unicode(descriptor.location)
)
def publish(block, event_type, event):
"""A function that allows XModules to publish events."""
if event_type == 'grade' and not is_masquerading_as_specific_student(user, course_id):
handle_grade_event(block, event_type, event)
SCORE_PUBLISHED.send(
sender=None,
block=block,
user=user,
raw_earned=event['value'],
raw_possible=event['max_value'],
only_if_higher=event.get('only_if_higher'),
)
else:
aside_context = {}
for aside in block.runtime.get_asides(block):
......@@ -1138,20 +1063,6 @@ def xblock_view(request, course_id, usage_id, view_name):
})
def get_score_bucket(grade, max_grade):
"""
Function to split arbitrary score ranges into 3 buckets.
Used with statsd tracking.
"""
score_bucket = "incorrect"
if grade > 0 and grade < max_grade:
score_bucket = "partial"
elif grade == max_grade:
score_bucket = "correct"
return score_bucket
def _check_files_limits(files):
"""
Check if the files in a request are under the limits defined by
......
"""
Helpers for courseware tests.
"""
import crum
import json
from django.contrib.auth.models import User
......@@ -19,6 +23,9 @@ def get_request_for_user(user):
request.is_secure = lambda: True
request.get_host = lambda: "edx.org"
request.method = 'GET'
request.GET = {}
request.POST = {}
crum.set_current_request(request)
return request
......
......@@ -253,14 +253,6 @@ class ModuleRenderTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
self.dispatch
)
def test_get_score_bucket(self):
self.assertEquals(render.get_score_bucket(0, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(1, 10), 'partial')
self.assertEquals(render.get_score_bucket(10, 10), 'correct')
# get_score_bucket calls error cases 'incorrect'
self.assertEquals(render.get_score_bucket(11, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(-1, 10), 'incorrect')
def test_anonymous_handle_xblock_callback(self):
dispatch_url = reverse(
'xblock_handler',
......@@ -1839,7 +1831,7 @@ class TestXmoduleRuntimeEvent(TestSubmittingProblems):
self.assertIsNone(student_module.grade)
self.assertIsNone(student_module.max_grade)
@patch('courseware.module_render.SCORE_CHANGED.send')
@patch('lms.djangoapps.grades.signals.handlers.SCORE_CHANGED.send')
def test_score_change_signal(self, send_mock):
"""Test that a Django signal is generated when a score changes"""
self.set_module_grade_using_publish(self.grade_dict)
......@@ -1849,7 +1841,8 @@ class TestXmoduleRuntimeEvent(TestSubmittingProblems):
'points_earned': self.grade_dict['value'],
'user_id': self.student_user.id,
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location)
'usage_id': unicode(self.problem.location),
'only_if_higher': None,
}
send_mock.assert_called_with(**expected_signal_kwargs)
......
......@@ -153,7 +153,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
# Disable the score change signal to prevent other components from being pulled into tests.
self.score_changed_signal_patch = patch('courseware.module_render.SCORE_CHANGED.send')
self.score_changed_signal_patch = patch('lms.djangoapps.grades.signals.handlers.SCORE_CHANGED.send')
self.score_changed_signal_patch.start()
def tearDown(self):
......
"""
API for the gating djangoapp
"""
import logging
import json
from collections import defaultdict
from django.contrib.auth.models import User
from xmodule.modulestore.django import modulestore
from django.test.client import RequestFactory
import json
import logging
from openedx.core.lib.gating import api as gating_api
from opaque_keys.edx.keys import UsageKey
from lms.djangoapps.courseware.entrance_exams import get_entrance_exam_score
from lms.djangoapps.grades.module_grades import get_module_score
from util import milestones_helpers
log = logging.getLogger(__name__)
......@@ -34,7 +37,7 @@ def _get_xblock_parent(xblock, category=None):
@gating_api.gating_enabled(default=False)
def evaluate_prerequisite(course, prereq_content_key, user_id):
def evaluate_prerequisite(course, block, user_id):
"""
Finds the parent subsection of the content in the course and evaluates
any milestone relationships attached to that subsection. If the calculated
......@@ -42,15 +45,14 @@ def evaluate_prerequisite(course, prereq_content_key, user_id):
dependent subsections, the related milestone will be fulfilled for the user.
Arguments:
user_id (int): ID of User for which evaluation should occur
course (CourseModule): The course
prereq_content_key (UsageKey): The prerequisite content usage key
user_id (int): ID of User for which evaluation should occur
Returns:
None
"""
xblock = modulestore().get_item(prereq_content_key)
sequential = _get_xblock_parent(xblock, 'sequential')
sequential = _get_xblock_parent(block, 'sequential')
if sequential:
prereq_milestone = gating_api.get_gating_milestone(
course.id,
......@@ -83,3 +85,32 @@ def evaluate_prerequisite(course, prereq_content_key, user_id):
milestones_helpers.add_user_milestone({'id': user_id}, prereq_milestone)
else:
milestones_helpers.remove_user_milestone({'id': user_id}, prereq_milestone)
def evaluate_entrance_exam(course, block, user_id):
"""
Update milestone fulfillments for the specified content module
"""
# Fulfillment Use Case: Entrance Exam
# If this module is part of an entrance exam, we'll need to see if the student
# has reached the point at which they can collect the associated milestone
if milestones_helpers.is_entrance_exams_enabled():
entrance_exam_enabled = getattr(course, 'entrance_exam_enabled', False)
in_entrance_exam = getattr(block, 'in_entrance_exam', False)
if entrance_exam_enabled and in_entrance_exam:
# We don't have access to the true request object in this context, but we can use a mock
request = RequestFactory().request()
request.user = User.objects.get(id=user_id)
exam_pct = get_entrance_exam_score(request, course)
if exam_pct >= course.entrance_exam_minimum_score_pct:
exam_key = UsageKey.from_string(course.entrance_exam_id)
relationship_types = milestones_helpers.get_milestone_relationship_types()
content_milestones = milestones_helpers.get_course_content_milestones(
course.id,
exam_key,
relationship=relationship_types['FULFILLS']
)
# Add each milestone to the user's set...
user = {'id': request.user.id}
for milestone in content_milestones:
milestones_helpers.add_user_milestone(user, milestone)
......@@ -2,10 +2,11 @@
Signal handlers for the gating djangoapp
"""
from django.dispatch import receiver
from gating import api as gating_api
from lms.djangoapps.grades.signals.signals import SCORE_CHANGED
from opaque_keys.edx.keys import CourseKey, UsageKey
from xmodule.modulestore.django import modulestore
from lms.djangoapps.grades.signals.signals import SCORE_CHANGED
from gating import api as gating_api
@receiver(SCORE_CHANGED)
......@@ -22,9 +23,6 @@ def handle_score_changed(**kwargs):
None
"""
course = modulestore().get_course(CourseKey.from_string(kwargs.get('course_id')))
if course.enable_subsection_gating:
gating_api.evaluate_prerequisite(
course,
UsageKey.from_string(kwargs.get('usage_id')),
kwargs.get('user_id'),
)
block = modulestore().get_item(UsageKey.from_string(kwargs.get('usage_id')))
gating_api.evaluate_prerequisite(course, block, kwargs.get('user_id'))
gating_api.evaluate_entrance_exam(course, block, kwargs.get('user_id'))
......@@ -134,7 +134,7 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
self._setup_gating_milestone(50)
mock_module_score.return_value = module_score
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
evaluate_prerequisite(self.course, self.prob1, self.user.id)
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
@patch('gating.api.log.warning')
......@@ -147,7 +147,7 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
self._setup_gating_milestone(None)
mock_module_score.return_value = module_score
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
evaluate_prerequisite(self.course, self.prob1, self.user.id)
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
self.assertTrue(mock_log.called)
......@@ -155,14 +155,14 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
def test_orphaned_xblock(self, mock_module_score):
""" Test test_orphaned_xblock """
evaluate_prerequisite(self.course, self.prob2.location, self.user.id)
evaluate_prerequisite(self.course, self.prob2, self.user.id)
self.assertFalse(mock_module_score.called)
@patch('gating.api.get_module_score')
def test_no_prerequisites(self, mock_module_score):
""" Test test_no_prerequisites """
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
evaluate_prerequisite(self.course, self.prob1, self.user.id)
self.assertFalse(mock_module_score.called)
@patch('gating.api.get_module_score')
......@@ -172,5 +172,5 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
# Setup gating milestones data
gating_api.add_prerequisite(self.course.id, self.seq1.location)
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
evaluate_prerequisite(self.course, self.prob1, self.user.id)
self.assertFalse(mock_module_score.called)
......@@ -20,7 +20,7 @@ class TestHandleScoreChanged(ModuleStoreTestCase):
super(TestHandleScoreChanged, self).setUp()
self.course = CourseFactory.create(org='TestX', number='TS01', run='2016_Q1')
self.user = UserFactory.create()
self.test_usage_key = UsageKey.from_string('i4x://the/content/key/12345678')
self.test_usage_key = self.course.location
@patch('gating.signals.gating_api.evaluate_prerequisite')
def test_gating_enabled(self, mock_evaluate):
......@@ -35,7 +35,7 @@ class TestHandleScoreChanged(ModuleStoreTestCase):
course_id=unicode(self.course.id),
usage_id=unicode(self.test_usage_key)
)
mock_evaluate.assert_called_with(self.course, self.test_usage_key, self.user.id) # pylint: disable=no-member
mock_evaluate.assert_called_with(self.course, self.course, self.user.id) # pylint: disable=no-member
@patch('gating.signals.gating_api.evaluate_prerequisite')
def test_gating_disabled(self, mock_evaluate):
......
......@@ -11,6 +11,7 @@ from courseware.model_data import ScoresClient
from lms.djangoapps.grades.scores import get_score, possibly_scored
from lms.djangoapps.grades.models import BlockRecord, PersistentSubsectionGrade
from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag
from openedx.core.lib.grade_utils import is_score_higher
from student.models import anonymous_id_for_user, User
from submissions import api as submissions_api
from traceback import format_exc
......@@ -74,6 +75,7 @@ class SubsectionGrade(object):
self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
self._log_event(log.debug, u"init_from_structure", student)
return self
def init_from_model(self, student, model, course_structure, submissions_scores, csm_scores):
"""
......@@ -97,6 +99,7 @@ class SubsectionGrade(object):
module_id=self.location,
)
self._log_event(log.debug, u"init_from_model", student)
return self
@classmethod
def bulk_create_models(cls, student, subsection_grades, course_key):
......@@ -222,10 +225,9 @@ class SubsectionGradeFactory(object):
)
block_structure = self._get_block_structure(block_structure)
subsection_grade = self._get_saved_grade(subsection, block_structure)
subsection_grade = self._get_bulk_cached_grade(subsection, block_structure)
if not subsection_grade:
subsection_grade = SubsectionGrade(subsection, self.course)
subsection_grade.init_from_structure(
subsection_grade = SubsectionGrade(subsection, self.course).init_from_structure(
self.student, block_structure, self._submissions_scores, self._csm_scores,
)
if PersistentGradesEnabledFlag.feature_enabled(self.course.id):
......@@ -247,7 +249,7 @@ class SubsectionGradeFactory(object):
SubsectionGrade.bulk_create_models(self.student, self._unsaved_subsection_grades, self.course.id)
self._unsaved_subsection_grades = []
def update(self, subsection, block_structure=None):
def update(self, subsection, block_structure=None, only_if_higher=None):
"""
Updates the SubsectionGrade object for the student and subsection.
"""
......@@ -259,14 +261,30 @@ class SubsectionGradeFactory(object):
self._log_event(log.warning, u"update, subsection: {}".format(subsection.location))
block_structure = self._get_block_structure(block_structure)
subsection_grade = SubsectionGrade(subsection, self.course)
subsection_grade.init_from_structure(
self.student, block_structure, self._submissions_scores, self._csm_scores
calculated_grade = SubsectionGrade(subsection, self.course).init_from_structure(
self.student, block_structure, self._submissions_scores, self._csm_scores,
)
grade_model = subsection_grade.update_or_create_model(self.student)
if only_if_higher:
try:
grade_model = PersistentSubsectionGrade.read_grade(self.student.id, subsection.location)
except PersistentSubsectionGrade.DoesNotExist:
pass
else:
orig_subsection_grade = SubsectionGrade(subsection, self.course).init_from_model(
self.student, grade_model, block_structure, self._submissions_scores, self._csm_scores,
)
if not is_score_higher(
orig_subsection_grade.graded_total.earned,
orig_subsection_grade.graded_total.possible,
calculated_grade.graded_total.earned,
calculated_grade.graded_total.possible,
):
return orig_subsection_grade
grade_model = calculated_grade.update_or_create_model(self.student)
self._update_saved_subsection_grade(subsection.location, grade_model)
return subsection_grade
return calculated_grade
@lazy
def _csm_scores(self):
......@@ -286,33 +304,34 @@ class SubsectionGradeFactory(object):
anonymous_user_id = anonymous_id_for_user(self.student, self.course.id)
return submissions_api.get_scores(unicode(self.course.id), anonymous_user_id)
def _get_saved_grade(self, subsection, block_structure): # pylint: disable=unused-argument
def _get_bulk_cached_grade(self, subsection, block_structure): # pylint: disable=unused-argument
"""
Returns the saved grade for the student and subsection.
Returns the student's SubsectionGrade for the subsection,
while caching the results of a bulk retrieval for the
course, for future access of other subsections.
Returns None if not found.
"""
if not PersistentGradesEnabledFlag.feature_enabled(self.course.id):
return
saved_subsection_grade = self._get_saved_subsection_grade(subsection.location)
if saved_subsection_grade:
subsection_grade = SubsectionGrade(subsection, self.course)
subsection_grade.init_from_model(
self.student, saved_subsection_grade, block_structure, self._submissions_scores, self._csm_scores,
saved_subsection_grades = self._get_bulk_cached_subsection_grades()
subsection_grade = saved_subsection_grades.get(subsection.location)
if subsection_grade:
return SubsectionGrade(subsection, self.course).init_from_model(
self.student, subsection_grade, block_structure, self._submissions_scores, self._csm_scores,
)
return subsection_grade
def _get_saved_subsection_grade(self, subsection_usage_key):
def _get_bulk_cached_subsection_grades(self):
"""
Returns the saved value of the subsection grade for
the given subsection usage key, caching the value.
Returns None if not found.
Returns and caches (for future access) the results of
a bulk retrieval of all subsection grades in the course.
"""
if self._cached_subsection_grades is None:
self._cached_subsection_grades = {
record.full_usage_key: record
for record in PersistentSubsectionGrade.bulk_read_grades(self.student.id, self.course.id)
}
return self._cached_subsection_grades.get(subsection_usage_key)
return self._cached_subsection_grades
def _update_saved_subsection_grade(self, subsection_usage_key, subsection_model):
"""
......
......@@ -5,12 +5,15 @@ Grades related signals.
from django.dispatch import receiver
from logging import getLogger
from courseware.model_data import get_score, set_score
from openedx.core.lib.grade_utils import is_score_higher
from student.models import user_by_anonymous_id
from submissions.models import score_set, score_reset
from .signals import SCORE_CHANGED
from .signals import SCORE_CHANGED, SCORE_PUBLISHED
from ..tasks import recalculate_subsection_grade
log = getLogger(__name__)
......@@ -40,8 +43,8 @@ def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-a
SCORE_CHANGED.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
points_possible=points_possible,
user_id=user.id,
course_id=course_id,
usage_id=usage_id
......@@ -70,17 +73,61 @@ def submissions_score_reset_handler(sender, **kwargs): # pylint: disable=unused
SCORE_CHANGED.send(
sender=None,
points_possible=0,
points_earned=0,
points_possible=0,
user_id=user.id,
course_id=course_id,
usage_id=usage_id
)
@receiver(SCORE_PUBLISHED)
def score_published_handler(sender, block, user, raw_earned, raw_possible, only_if_higher, **kwargs): # pylint: disable=unused-argument
"""
Handles whenever a block's score is published.
Returns whether the score was actually updated.
"""
update_score = True
if only_if_higher:
previous_score = get_score(user.id, block.location)
if previous_score:
prev_raw_earned, prev_raw_possible = previous_score # pylint: disable=unpacking-non-sequence
if not is_score_higher(prev_raw_earned, prev_raw_possible, raw_earned, raw_possible):
update_score = False
log.warning(
u"Grades: Rescore is not higher than previous: "
u"user: {}, block: {}, previous: {}/{}, new: {}/{} ".format(
user, block.location, prev_raw_earned, prev_raw_possible, raw_earned, raw_possible,
)
)
if update_score:
set_score(user.id, block.location, raw_earned, raw_possible)
SCORE_CHANGED.send(
sender=None,
points_earned=raw_earned,
points_possible=raw_possible,
user_id=user.id,
course_id=unicode(block.location.course_key),
usage_id=unicode(block.location),
only_if_higher=only_if_higher,
)
return update_score
@receiver(SCORE_CHANGED)
def enqueue_update(sender, **kwargs): # pylint: disable=unused-argument
def enqueue_grade_update(sender, **kwargs): # pylint: disable=unused-argument
"""
Handles the SCORE_CHANGED signal by enqueueing an update operation to occur asynchronously.
"""
recalculate_subsection_grade.apply_async(args=(kwargs['user_id'], kwargs['course_id'], kwargs['usage_id']))
recalculate_subsection_grade.apply_async(
args=(
kwargs['user_id'],
kwargs['course_id'],
kwargs['usage_id'],
kwargs.get('only_if_higher'),
)
)
......@@ -12,10 +12,24 @@ from django.dispatch import Signal
# receives the same score).
SCORE_CHANGED = Signal(
providing_args=[
'points_possible', # Maximum score available for the exercise
'points_earned', # Score obtained by the user
'user_id', # Integer User ID
'course_id', # Unicode string representing the course
'usage_id' # Unicode string indicating the courseware instance
'usage_id', # Unicode string indicating the courseware instance
'points_earned', # Score obtained by the user
'points_possible', # Maximum score available for the exercise
'only_if_higher', # Boolean indicating whether updates should be
# made only if the new score is higher than previous.
]
)
SCORE_PUBLISHED = Signal(
providing_args=[
'block', # Course block object
'user', # User object
'raw_earned', # Score obtained by the user
'raw_possible', # Maximum score available for the exercise
'only_if_higher', # Boolean indicating whether updates should be
# made only if the new score is higher than previous.
]
)
......@@ -8,10 +8,10 @@ from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.courseware.courses import get_course_by_id
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.locator import CourseLocator
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from xmodule.modulestore.django import modulestore
from .config.models import PersistentGradesEnabledFlag
from .transformer import GradesTransformer
......@@ -19,13 +19,16 @@ from .new.subsection_grade import SubsectionGradeFactory
@task(default_retry_delay=30, routing_key=settings.RECALCULATE_GRADES_ROUTING_KEY)
def recalculate_subsection_grade(user_id, course_id, usage_id):
def recalculate_subsection_grade(user_id, course_id, usage_id, only_if_higher):
"""
Updates a saved subsection grade.
This method expects the following parameters:
- user_id: serialized id of applicable User object
- course_id: Unicode string representing the course
- usage_id: Unicode string indicating the courseware instance
- only_if_higher: boolean indicating whether grades should
be updated only if the new grade is higher than the previous
value.
"""
course_key = CourseLocator.from_string(course_id)
if not PersistentGradesEnabledFlag.feature_enabled(course_key):
......@@ -35,7 +38,7 @@ def recalculate_subsection_grade(user_id, course_id, usage_id):
scored_block_usage_key = UsageKey.from_string(usage_id).replace(course_key=course_key)
collected_block_structure = get_course_in_cache(course_key)
course = get_course_by_id(course_key, depth=0)
course = modulestore().get_course(course_key, depth=0)
subsection_grade_factory = SubsectionGradeFactory(student, course, collected_block_structure)
subsections_to_update = collected_block_structure.get_transformer_block_field(
scored_block_usage_key,
......@@ -52,7 +55,9 @@ def recalculate_subsection_grade(user_id, course_id, usage_id):
collected_block_structure=collected_block_structure,
)
subsection_grade_factory.update(
transformed_subsection_structure[subsection_usage_key], transformed_subsection_structure
transformed_subsection_structure[subsection_usage_key],
transformed_subsection_structure,
only_if_higher,
)
except IntegrityError as exc:
raise recalculate_subsection_grade.retry(args=[user_id, course_id, usage_id], exc=exc)
......@@ -106,7 +106,7 @@ class TestCourseGradeFactory(GradeTestBase):
@ddt.ddt
class SubsectionGradeFactoryTest(GradeTestBase):
class TestSubsectionGradeFactory(GradeTestBase, ProblemSubmissionTestMixin):
"""
Tests for SubsectionGradeFactory functionality.
......@@ -115,17 +115,36 @@ class SubsectionGradeFactoryTest(GradeTestBase):
enable saving subsection grades blocks/enables that feature as expected.
"""
def assert_grade(self, grade, expected_earned, expected_possible):
"""
Asserts that the given grade object has the expected score.
"""
self.assertEqual(
(grade.all_total.earned, grade.all_total.possible),
(expected_earned, expected_possible),
)
def test_create(self):
"""
Tests to ensure that a persistent subsection grade is created, saved, then fetched on re-request.
Assuming the underlying score reporting methods work,
test that the score is calculated properly.
"""
with mock_get_score(1, 2):
grade = self.subsection_grade_factory.create(self.sequence)
self.assert_grade(grade, 1, 2)
def test_create_internals(self):
"""
Tests to ensure that a persistent subsection grade is
created, saved, then fetched on re-request.
"""
with patch(
'lms.djangoapps.grades.new.subsection_grade.PersistentSubsectionGrade.create_grade',
wraps=PersistentSubsectionGrade.create_grade
) as mock_create_grade:
with patch(
'lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory._get_saved_grade',
wraps=self.subsection_grade_factory._get_saved_grade
'lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory._get_bulk_cached_grade',
wraps=self.subsection_grade_factory._get_bulk_cached_grade
) as mock_get_saved_grade:
with self.assertNumQueries(14):
grade_a = self.subsection_grade_factory.create(self.sequence)
......@@ -143,6 +162,30 @@ class SubsectionGradeFactoryTest(GradeTestBase):
self.assertEqual(grade_a.url_name, grade_b.url_name)
self.assertEqual(grade_a.all_total, grade_b.all_total)
def test_update(self):
"""
Assuming the underlying score reporting methods work,
test that the score is calculated properly.
"""
with mock_get_score(1, 2):
grade = self.subsection_grade_factory.update(self.sequence)
self.assert_grade(grade, 1, 2)
def test_update_if_higher(self):
def verify_update_if_higher(mock_score, expected_grade):
"""
Updates the subsection grade and verifies the
resulting grade is as expected.
"""
with mock_get_score(*mock_score):
grade = self.subsection_grade_factory.update(self.sequence, only_if_higher=True)
self.assert_grade(grade, *expected_grade)
verify_update_if_higher((1, 2), (1, 2)) # previous value was non-existent
verify_update_if_higher((2, 4), (1, 2)) # previous value was equivalent
verify_update_if_higher((1, 4), (1, 2)) # previous value was greater
verify_update_if_higher((3, 4), (3, 4)) # previous value was less
@ddt.data(
(
'lms.djangoapps.grades.new.subsection_grade.SubsectionGrade.create_model',
......@@ -162,7 +205,8 @@ class SubsectionGradeFactoryTest(GradeTestBase):
with patch(underlying_method) as underlying:
underlying.side_effect = DatabaseError("I'm afraid I can't do that")
method_to_test(self)
# By making it this far, we implicitly assert "the factory method swallowed the exception correctly"
# By making it this far, we implicitly assert
# "the factory method swallowed the exception correctly"
self.assertTrue(
log_mock.warning.call_args_list[0].startswith("Persistent Grades: Persistence Error, falling back.")
)
......@@ -196,18 +240,10 @@ class SubsectionGradeTest(GradeTestBase):
Tests SubsectionGrade functionality.
"""
def test_compute(self):
"""
Assuming the underlying score reporting methods work, test that the score is calculated properly.
"""
with mock_get_score(1, 2):
grade = self.subsection_grade_factory.create(self.sequence)
self.assertEqual(grade.all_total.earned, 1)
self.assertEqual(grade.all_total.possible, 2)
def test_save_and_load(self):
"""
Test that grades are persisted to the database properly, and that loading saved grades returns the same data.
Test that grades are persisted to the database properly,
and that loading saved grades returns the same data.
"""
# Create a grade that *isn't* saved to the database
input_grade = SubsectionGrade(self.sequence, self.course)
......
......@@ -2,6 +2,7 @@
Tests for the functionality and infrastructure of grades tasks.
"""
from collections import OrderedDict
import ddt
from django.conf import settings
from django.db.utils import IntegrityError
......@@ -47,11 +48,12 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
self.sequential = ItemFactory.create(parent=self.chapter, category='sequential', display_name="Open Sequential")
self.problem = ItemFactory.create(parent=self.sequential, category='problem', display_name='problem')
self.score_changed_kwargs = {
'user_id': self.user.id,
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
}
self.score_changed_kwargs = OrderedDict([
('user_id', self.user.id),
('course_id', unicode(self.course.id)),
('usage_id', unicode(self.problem.location)),
('only_if_higher', None),
])
# this call caches the anonymous id on the user object, saving 4 queries in all happy path tests
_ = anonymous_id_for_user(self.user, self.course.id)
......@@ -67,13 +69,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
return_value=None
) as mock_task_apply:
SCORE_CHANGED.send(sender=None, **self.score_changed_kwargs)
mock_task_apply.assert_called_once_with(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
mock_task_apply.assert_called_once_with(args=tuple(self.score_changed_kwargs.values()))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_subsection_grade_updated(self, default_store):
......@@ -81,13 +77,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
self.set_up_course()
self.assertTrue(PersistentGradesEnabledFlag.feature_enabled(self.course.id))
with check_mongo_calls(2) and self.assertNumQueries(13):
recalculate_subsection_grade.apply(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
recalculate_subsection_grade.apply(args=tuple(self.score_changed_kwargs.values()))
def test_single_call_to_create_block_structure(self):
self.set_up_course()
......@@ -96,13 +86,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
'openedx.core.lib.block_structure.factory.BlockStructureFactory.create_from_cache',
return_value=None,
) as mock_block_structure_create:
recalculate_subsection_grade.apply(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
recalculate_subsection_grade.apply(args=tuple(self.score_changed_kwargs.values()))
self.assertEquals(mock_block_structure_create.call_count, 1)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
......@@ -113,13 +97,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
ItemFactory.create(parent=self.sequential, category='problem', display_name='problem2')
ItemFactory.create(parent=self.sequential, category='problem', display_name='problem3')
with check_mongo_calls(2) and self.assertNumQueries(13):
recalculate_subsection_grade.apply(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
recalculate_subsection_grade.apply(args=tuple(self.score_changed_kwargs.values()))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_subsection_grades_not_enabled_on_course(self, default_store):
......@@ -127,13 +105,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
self.set_up_course(enable_subsection_grades=False)
self.assertFalse(PersistentGradesEnabledFlag.feature_enabled(self.course.id))
with check_mongo_calls(2) and self.assertNumQueries(0):
recalculate_subsection_grade.apply(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
recalculate_subsection_grade.apply(args=tuple(self.score_changed_kwargs.values()))
@skip("Pending completion of TNL-5089")
@ddt.data(
......@@ -148,13 +120,7 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
with self.store.default_store(default_store):
self.set_up_course()
with check_mongo_calls(0) and self.assertNumQueries(3 if feature_flag else 2):
recalculate_subsection_grade.apply(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
recalculate_subsection_grade.apply(args=tuple(self.score_changed_kwargs.values()))
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade.retry')
@patch('lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory.update')
......@@ -164,11 +130,5 @@ class RecalculateSubsectionGradeTest(ModuleStoreTestCase):
"""
self.set_up_course()
mock_update.side_effect = IntegrityError("WHAMMY")
recalculate_subsection_grade.apply(
args=(
self.score_changed_kwargs['user_id'],
self.score_changed_kwargs['course_id'],
self.score_changed_kwargs['usage_id'],
)
)
recalculate_subsection_grade.apply(args=tuple(self.score_changed_kwargs.values()))
self.assertTrue(mock_retry.called)
......@@ -9,7 +9,7 @@ from logging import getLogger
import json
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
import courseware.module_render
from lms.djangoapps.course_blocks.transformers.utils import collect_unioned_set_field, get_field_on_block
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer
from openedx.core.djangoapps.util.user_utils import SystemUser
......@@ -186,5 +186,5 @@ class GradesTransformer(BlockStructureTransformer):
for block_locator in block_structure.post_order_traversal():
block = block_structure.get_xblock(block_locator)
if getattr(block, 'has_score', False):
module = get_module_for_descriptor(user, request, block, cache, course_key)
module = courseware.module_render.get_module_for_descriptor(user, request, block, cache, course_key)
yield module
......@@ -3190,7 +3190,7 @@ class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTes
})
self.assertEqual(response.status_code, 400)
@patch('courseware.module_render.SCORE_CHANGED.send')
@patch('lms.djangoapps.grades.signals.handlers.SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
......@@ -3469,6 +3469,15 @@ class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginE
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_if_higher_all_student(self):
""" Test rescoring for all students only if higher. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'only_if_higher': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
......
......@@ -378,7 +378,7 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
reset_student_attempts(self.course_key, self.user, msk, requesting_user=self.user)
self.assertEqual(json.loads(module().state)['attempts'], 0)
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
@mock.patch('lms.djangoapps.grades.signals.handlers.SCORE_CHANGED.send')
def test_delete_student_attempts(self, _mock_signal):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
......@@ -404,7 +404,7 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
# Disable the score change signal to prevent other components from being
# pulled into tests.
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
@mock.patch('lms.djangoapps.grades.signals.handlers.SCORE_CHANGED.send')
def test_delete_submission_scores(self, _mock_signal):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
......
......@@ -49,7 +49,7 @@ class InstructorServiceTests(SharedModuleStoreTestCase):
state=json.dumps({'attempts': 2}),
)
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
@mock.patch('lms.djangoapps.grades.signals.handlers.SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
"""
Test delete student state.
......
......@@ -602,8 +602,8 @@ def students_update_enrollment(request, course_id):
action = request.POST.get('action')
identifiers_raw = request.POST.get('identifiers')
identifiers = _split_input_list(identifiers_raw)
auto_enroll = request.POST.get('auto_enroll') in ['true', 'True', True]
email_students = request.POST.get('email_students') in ['true', 'True', True]
auto_enroll = _get_boolean_param(request, 'auto_enroll')
email_students = _get_boolean_param(request, 'email_students')
is_white_label = CourseMode.is_white_label(course_id)
reason = request.POST.get('reason')
if is_white_label:
......@@ -743,8 +743,8 @@ def bulk_beta_modify_access(request, course_id):
action = request.POST.get('action')
identifiers_raw = request.POST.get('identifiers')
identifiers = _split_input_list(identifiers_raw)
email_students = request.POST.get('email_students') in ['true', 'True', True]
auto_enroll = request.POST.get('auto_enroll') in ['true', 'True', True]
email_students = _get_boolean_param(request, 'email_students')
auto_enroll = _get_boolean_param(request, 'auto_enroll')
results = []
rolename = 'beta'
course = get_course_by_id(course_id)
......@@ -1939,8 +1939,8 @@ def reset_student_attempts(request, course_id):
student = None
if student_identifier is not None:
student = get_student_from_identifier(student_identifier)
all_students = request.POST.get('all_students', False) in ['true', 'True', True]
delete_module = request.POST.get('delete_module', False) in ['true', 'True', True]
all_students = _get_boolean_param(request, 'all_students')
delete_module = _get_boolean_param(request, 'delete_module')
# parameter combinations
if all_students and student:
......@@ -2027,8 +2027,8 @@ def reset_student_attempts_for_entrance_exam(request, course_id): # pylint: dis
student = None
if student_identifier is not None:
student = get_student_from_identifier(student_identifier)
all_students = request.POST.get('all_students', False) in ['true', 'True', True]
delete_module = request.POST.get('delete_module', False) in ['true', 'True', True]
all_students = _get_boolean_param(request, 'all_students')
delete_module = _get_boolean_param(request, 'delete_module')
# parameter combinations
if all_students and student:
......@@ -2092,7 +2092,8 @@ def rescore_problem(request, course_id):
if student_identifier is not None:
student = get_student_from_identifier(student_identifier)
all_students = request.POST.get('all_students') in ['true', 'True', True]
all_students = _get_boolean_param(request, 'all_students')
only_if_higher = _get_boolean_param(request, 'only_if_higher')
if not (problem_to_reset and (all_students or student)):
return HttpResponseBadRequest("Missing query parameters.")
......@@ -2107,19 +2108,26 @@ def rescore_problem(request, course_id):
except InvalidKeyError:
return HttpResponseBadRequest("Unable to parse problem id")
response_payload = {}
response_payload['problem_to_reset'] = problem_to_reset
response_payload = {'problem_to_reset': problem_to_reset}
if student:
response_payload['student'] = student_identifier
lms.djangoapps.instructor_task.api.submit_rescore_problem_for_student(request, module_state_key, student)
response_payload['task'] = 'created'
lms.djangoapps.instructor_task.api.submit_rescore_problem_for_student(
request,
module_state_key,
student,
only_if_higher,
)
elif all_students:
lms.djangoapps.instructor_task.api.submit_rescore_problem_for_all_students(request, module_state_key)
response_payload['task'] = 'created'
lms.djangoapps.instructor_task.api.submit_rescore_problem_for_all_students(
request,
module_state_key,
only_if_higher,
)
else:
return HttpResponseBadRequest()
response_payload['task'] = 'created'
return JsonResponse(response_payload)
......@@ -2146,11 +2154,12 @@ def rescore_entrance_exam(request, course_id):
)
student_identifier = request.POST.get('unique_student_identifier', None)
only_if_higher = request.POST.get('only_if_higher', None)
student = None
if student_identifier is not None:
student = get_student_from_identifier(student_identifier)
all_students = request.POST.get('all_students') in ['true', 'True', True]
all_students = _get_boolean_param(request, 'all_students')
if not course.entrance_exam_id:
return HttpResponseBadRequest(
......@@ -2172,7 +2181,10 @@ def rescore_entrance_exam(request, course_id):
response_payload['student'] = student_identifier
else:
response_payload['student'] = _("All Students")
lms.djangoapps.instructor_task.api.submit_rescore_entrance_exam_for_student(request, entrance_exam_key, student)
lms.djangoapps.instructor_task.api.submit_rescore_entrance_exam_for_student(
request, entrance_exam_key, student, only_if_higher,
)
response_payload['task'] = 'created'
return JsonResponse(response_payload)
......@@ -3318,3 +3330,12 @@ def validate_request_data_and_get_certificate(certificate_invalidation, course_k
"username/email and the selected course are correct and try again."
).format(student=student.username, course=course_key.course))
return certificate
def _get_boolean_param(request, param_name):
"""
Returns the value of the boolean parameter with the given
name in the POST request. Handles translation from string
values to boolean values.
"""
return request.POST.get(param_name, False) in ['true', 'True', True]
......@@ -95,7 +95,7 @@ def get_entrance_exam_instructor_task_history(course_id, usage_key=None, student
# Disabling invalid-name because this fn name is longer than 30 chars.
def submit_rescore_problem_for_student(request, usage_key, student): # pylint: disable=invalid-name
def submit_rescore_problem_for_student(request, usage_key, student, only_if_higher=False): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
......@@ -110,13 +110,14 @@ def submit_rescore_problem_for_student(request, usage_key, student): # pylint:
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
task_type = 'rescore_problem'
task_type = 'rescore_problem_if_higher' if only_if_higher else 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key, student)
task_input.update({'only_if_higher': only_if_higher})
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_problem_for_all_students(request, usage_key): # pylint: disable=invalid-name
def submit_rescore_problem_for_all_students(request, usage_key, only_if_higher=False): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
......@@ -136,10 +137,11 @@ def submit_rescore_problem_for_all_students(request, usage_key): # pylint: disa
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key)
task_input.update({'only_if_higher': only_if_higher})
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_entrance_exam_for_student(request, usage_key, student=None): # pylint: disable=invalid-name
def submit_rescore_entrance_exam_for_student(request, usage_key, student=None, only_if_higher=False): # pylint: disable=invalid-name
"""
Request entrance exam problems to be re-scored as a background task.
......@@ -161,6 +163,7 @@ def submit_rescore_entrance_exam_for_student(request, usage_key, student=None):
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
task_input.update({'only_if_higher': only_if_higher})
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
......
......@@ -310,9 +310,9 @@ def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, ta
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
It is passed four arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, the xmodule_instance_args, and the task_input
being passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
......@@ -382,7 +382,7 @@ def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, ta
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
update_status = update_fcn(module_descriptor, module_to_update, task_input)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
......@@ -470,7 +470,7 @@ def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule
@outer_atomic
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module, task_input):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
......@@ -517,7 +517,7 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
result = instance.rescore_problem(only_if_higher=task_input['only_if_higher'])
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
......@@ -559,7 +559,7 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
@outer_atomic
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
"""
Resets problem attempts to zero for specified `student_module`.
......@@ -586,7 +586,7 @@ def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, stude
@outer_atomic
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
"""
Delete the StudentModule entry.
......
......@@ -140,35 +140,27 @@ class TestRescoringTask(TestIntegrationTask):
expected_subsection_grade,
)
def submit_rescore_all_student_answers(self, instructor, problem_url_name):
def submit_rescore_all_student_answers(self, instructor, problem_url_name, only_if_higher=False):
"""Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name))
return submit_rescore_problem_for_all_students(
self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name),
only_if_higher,
)
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student, only_if_higher=False):
"""Submits the particular problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name),
student)
RescoreTestData = namedtuple('RescoreTestData', 'edit, new_expected_scores, new_expected_max')
return submit_rescore_problem_for_student(
self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name),
student,
only_if_higher,
)
@ddt.data(
RescoreTestData(edit=dict(correct_answer=OPTION_2), new_expected_scores=(0, 1, 1, 2), new_expected_max=2),
RescoreTestData(edit=dict(num_inputs=2), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
RescoreTestData(edit=dict(num_inputs=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
RescoreTestData(edit=dict(num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
RescoreTestData(edit=dict(num_inputs=2, num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
)
@ddt.unpack
def test_rescoring_option_problem(self, problem_edit, new_expected_scores, new_expected_max):
def verify_rescore_results(self, problem_edit, new_expected_scores, new_expected_max, rescore_if_higher):
"""
Run rescore scenario on option problem.
Verify rescoring updates grade after content change.
Original problem definition has:
num_inputs = 1
num_responses = 2
correct_answer = OPTION_1
Common helper to verify the results of rescoring for a single
student and all students are as expected.
"""
# get descriptor:
problem_url_name = 'H1P1'
......@@ -196,16 +188,50 @@ class TestRescoringTask(TestIntegrationTask):
self.check_state(self.user1, descriptor, expected_original_scores[0], expected_original_max)
# rescore the problem for only one student -- only that student's grade should change:
self.submit_rescore_one_student_answer('instructor', problem_url_name, self.user1)
self.submit_rescore_one_student_answer('instructor', problem_url_name, self.user1, rescore_if_higher)
self.check_state(self.user1, descriptor, new_expected_scores[0], new_expected_max)
for i, user in enumerate(self.users[1:], start=1): # everyone other than user1
self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
self.submit_rescore_all_student_answers('instructor', problem_url_name, rescore_if_higher)
for i, user in enumerate(self.users):
self.check_state(user, descriptor, new_expected_scores[i], new_expected_max)
RescoreTestData = namedtuple('RescoreTestData', 'edit, new_expected_scores, new_expected_max')
@ddt.data(
RescoreTestData(edit=dict(correct_answer=OPTION_2), new_expected_scores=(0, 1, 1, 2), new_expected_max=2),
RescoreTestData(edit=dict(num_inputs=2), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
RescoreTestData(edit=dict(num_inputs=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
RescoreTestData(edit=dict(num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
RescoreTestData(edit=dict(num_inputs=2, num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
)
@ddt.unpack
def test_rescoring_option_problem(self, problem_edit, new_expected_scores, new_expected_max):
"""
Run rescore scenario on option problem.
Verify rescoring updates grade after content change.
Original problem definition has:
num_inputs = 1
num_responses = 2
correct_answer = OPTION_1
"""
self.verify_rescore_results(
problem_edit, new_expected_scores, new_expected_max, rescore_if_higher=False,
)
@ddt.data(
RescoreTestData(edit=dict(), new_expected_scores=(2, 1, 1, 0), new_expected_max=2),
RescoreTestData(edit=dict(correct_answer=OPTION_2), new_expected_scores=(2, 1, 1, 2), new_expected_max=2),
RescoreTestData(edit=dict(num_inputs=2), new_expected_scores=(2, 1, 1, 0), new_expected_max=2),
)
@ddt.unpack
def test_rescoring_if_higher(self, problem_edit, new_expected_scores, new_expected_max):
self.verify_rescore_results(
problem_edit, new_expected_scores, new_expected_max, rescore_if_higher=True,
)
def test_rescoring_failure(self):
"""Simulate a failure in rescoring a problem"""
problem_url_name = 'H1P1'
......
......@@ -52,10 +52,10 @@ class TestInstructorTasks(InstructorTaskModuleTestCase):
self.instructor = self.create_instructor('instructor')
self.location = self.problem_location(PROBLEM_URL_NAME)
def _create_input_entry(self, student_ident=None, use_problem_url=True, course_id=None):
def _create_input_entry(self, student_ident=None, use_problem_url=True, course_id=None, only_if_higher=False):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
task_input = {}
task_input = {'only_if_higher': only_if_higher}
if use_problem_url:
task_input['problem_url'] = self.location
if student_ident is not None:
......
......@@ -84,7 +84,7 @@ define(['jquery', 'js/instructor_dashboard/student_admin', 'edx-ui-toolkit/js/ut
it('initiates rescoring of the entrance exam when the button is clicked', function() {
var successMessage = gettext("Started entrance exam rescore task for student '{student_id}'." +
" Click the 'Show Background Task History for Student' button to see the status of the task."); // eslint-disable-line max-len
" Click the 'Show Task Status' button to see the status of the task."); // eslint-disable-line max-len
var fullSuccessMessage = interpolate_text(successMessage, {
student_id: uniqStudentIdentifier
});
......@@ -94,7 +94,8 @@ define(['jquery', 'js/instructor_dashboard/student_admin', 'edx-ui-toolkit/js/ut
var requests = AjaxHelpers.requests(this);
// Verify that the client contacts the server to start instructor task
var params = $.param({
unique_student_identifier: uniqStudentIdentifier
unique_student_identifier: uniqStudentIdentifier,
only_if_higher: false
});
studentadmin.$btn_rescore_entrance_exam.click();
......@@ -121,7 +122,8 @@ define(['jquery', 'js/instructor_dashboard/student_admin', 'edx-ui-toolkit/js/ut
var requests = AjaxHelpers.requests(this);
// Verify that the client contacts the server to start instructor task
var params = $.param({
unique_student_identifier: uniqStudentIdentifier
unique_student_identifier: uniqStudentIdentifier,
only_if_higher: false
});
var errorMessage = gettext(
"Error starting a task to rescore entrance exam for student '{student_id}'." +
......
// Build StaffDebug object
var StaffDebug = (function() {
get_current_url = function() {
return window.location.pathname;
};
get_url = function(action) {
var pathname = this.get_current_url();
var url = pathname.substr(0, pathname.indexOf('/courseware')) + '/instructor/api/' + action;
return url;
/* global getCurrentUrl:true */
var getURL = function(action) {
var pathname = this.getCurrentUrl();
return pathname.substr(0, pathname.indexOf('/courseware')) + '/instructor/api/' + action;
};
sanitized_string = function(string) {
var sanitizeString = function(string) {
return string.replace(/[.*+?^:${}()|[\]\\]/g, '\\$&');
};
get_user = function(locname) {
locname = sanitized_string(locname);
var uname = $('#sd_fu_' + locname).val();
var getUser = function(locationName) {
var sanitizedLocationName = sanitizeString(locationName);
var uname = $('#sd_fu_' + sanitizedLocationName).val();
if (uname === '') {
uname = $('#sd_fu_' + locname).attr('placeholder');
uname = $('#sd_fu_' + sanitizedLocationName).attr('placeholder');
}
return uname;
};
do_idash_action = function(action) {
var doInstructorDashAction = function(action) {
var pdata = {
'problem_to_reset': action.location,
'unique_student_identifier': get_user(action.locationName),
'delete_module': action.delete_module
problem_to_reset: action.location,
unique_student_identifier: getUser(action.locationName),
delete_module: action.delete_module,
only_if_higher: action.only_if_higher
};
$.ajax({
type: 'POST',
url: get_url(action.method),
url: getURL(action.method),
data: pdata,
success: function(data) {
var text = _.template(action.success_msg, {interpolate: /\{(.+?)\}/g})(
......@@ -40,72 +37,90 @@ var StaffDebug = (function() {
var html = _.template('<p id="idash_msg" class="success">{text}</p>', {interpolate: /\{(.+?)\}/g})(
{text: text}
);
$('#result_' + sanitized_string(action.locationName)).html(html);
$('#result_' + sanitizeString(action.locationName)).html(html);
},
error: function(request, status, error) {
var response_json;
var responseJSON;
try {
response_json = $.parseJSON(request.responseText);
responseJSON = $.parseJSON(request.responseText);
} catch (e) {
response_json = {error: gettext('Unknown Error Occurred.')};
responseJSON = {error: gettext('Unknown Error Occurred.')};
}
var text = _.template('{error_msg} {error}', {interpolate: /\{(.+?)\}/g})(
{
error_msg: action.error_msg,
error: response_json.error
error: responseJSON.error
}
);
var html = _.template('<p id="idash_msg" class="error">{text}</p>', {interpolate: /\{(.+?)\}/g})(
{text: text}
);
$('#result_' + sanitized_string(action.locationName)).html(html);
$('#result_' + sanitizeString(action.locationName)).html(html);
},
dataType: 'json'
});
};
reset = function(locname, location) {
this.do_idash_action({
var reset = function(locname, location) {
this.doInstructorDashAction({
locationName: locname,
location: location,
method: 'reset_student_attempts',
success_msg: gettext('Successfully reset the attempts for user {user}'),
error_msg: gettext('Failed to reset attempts.'),
error_msg: gettext('Failed to reset attempts for user.'),
delete_module: false
});
};
sdelete = function(locname, location) {
this.do_idash_action({
var deleteStudentState = function(locname, location) {
this.doInstructorDashAction({
locationName: locname,
location: location,
method: 'reset_student_attempts',
success_msg: gettext('Successfully deleted student state for user {user}'),
error_msg: gettext('Failed to delete student state.'),
error_msg: gettext('Failed to delete student state for user.'),
delete_module: true
});
};
rescore = function(locname, location) {
this.do_idash_action({
var rescore = function(locname, location) {
this.doInstructorDashAction({
locationName: locname,
location: location,
method: 'rescore_problem',
success_msg: gettext('Successfully rescored problem for user {user}'),
error_msg: gettext('Failed to rescore problem.'),
delete_module: false
error_msg: gettext('Failed to rescore problem for user.'),
only_if_higher: false
});
};
var rescoreIfHigher = function(locname, location) {
this.doInstructorDashAction({
locationName: locname,
location: location,
method: 'rescore_problem',
success_msg: gettext('Successfully rescored problem to improve score for user {user}'),
error_msg: gettext('Failed to rescore problem to improve score for user.'),
only_if_higher: true
});
};
getCurrentUrl = function() {
return window.location.pathname;
};
return {
reset: reset,
sdelete: sdelete,
deleteStudentState: deleteStudentState,
rescore: rescore,
do_idash_action: do_idash_action,
get_current_url: get_current_url,
get_url: get_url,
get_user: get_user,
sanitized_string: sanitized_string
rescoreIfHigher: rescoreIfHigher,
// export for testing
doInstructorDashAction: doInstructorDashAction,
getCurrentUrl: getCurrentUrl,
getURL: getURL,
getUser: getUser,
sanitizeString: sanitizeString
}; })();
// Register click handlers
......@@ -116,11 +131,15 @@ $(document).ready(function() {
return false;
});
$courseContent.on('click', '.staff-debug-sdelete', function() {
StaffDebug.sdelete($(this).parent().data('location-name'), $(this).parent().data('location'));
StaffDebug.deleteStudentState($(this).parent().data('location-name'), $(this).parent().data('location'));
return false;
});
$courseContent.on('click', '.staff-debug-rescore', function() {
StaffDebug.rescore($(this).parent().data('location-name'), $(this).parent().data('location'));
return false;
});
$courseContent.on('click', '.staff-debug-rescore-if-higher', function() {
StaffDebug.rescoreIfHigher($(this).parent().data('location-name'), $(this).parent().data('location'));
return false;
});
});
......@@ -1283,7 +1283,8 @@
// view - student admin
// --------------------
.instructor-dashboard-wrapper-2 section.idash-section#student_admin > {
.instructor-dashboard-wrapper-2 section.idash-section#student_admin {
.action-type-container{
margin-bottom: $baseline * 2;
}
......@@ -1295,12 +1296,23 @@
.task-history-all-table {
margin-top: 1em;
}
.task-history-single-table {
margin-top: 1em;
}
.running-tasks-table {
margin-top: 1em;
}
input[type="text"] {
width: 651px;
}
.location-example {
font-style: italic;
font-size: 0.9em;
}
}
// view - data download
......
......@@ -69,14 +69,16 @@ ${block_content}
<div data-location="${location | h}" data-location-name="${location.name | h}">
[
% if can_reset_attempts:
<button type="button" class="btn-link staff-debug-reset">${_('Reset Student Attempts')}</button>
<button type="button" class="btn-link staff-debug-reset">${_('Reset Learner\'s Attempts to Zero')}</button>
|
% endif
% if has_instructor_access:
<button type="button" class="btn-link staff-debug-sdelete">${_('Delete Student State')}</button>
<button type="button" class="btn-link staff-debug-sdelete">${_('Delete Learner\'s State')}</button>
% if can_rescore_problem:
|
<button type="button" class="btn-link staff-debug-rescore">${_('Rescore Student Submission')}</button>
<button type="button" class="btn-link staff-debug-rescore">${_('Rescore Learner\'s Submission')}</button>
|
<button type="button" class="btn-link staff-debug-rescore-if-higher">${_('Rescore Only If Score Improves')}</button>
% endif
% endif
]
......
......@@ -297,7 +297,7 @@ def set_credit_requirement_status(user, course_key, req_namespace, req_name, sta
try:
send_credit_notifications(user.username, course_key)
except Exception: # pylint: disable=broad-except
log.error("Error sending email")
log.exception("Error sending email")
# pylint: disable=invalid-name
......
"""
Helpers functions for grades and scores.
"""
def compare_scores(earned1, possible1, earned2, possible2):
"""
Returns a tuple of:
1. Whether the 2nd set of scores is higher than the first.
2. Grade percentage of 1st set of scores.
3. Grade percentage of 2nd set of scores.
"""
percentage1 = float(earned1) / float(possible1)
percentage2 = float(earned2) / float(possible2)
is_higher = percentage2 > percentage1
return is_higher, percentage1, percentage2
def is_score_higher(earned1, possible1, earned2, possible2):
"""
Returns whether the 2nd set of scores is higher than the first.
"""
is_higher, _, _ = compare_scores(earned1, possible1, earned2, possible2)
return is_higher
......@@ -204,8 +204,7 @@ class GradePublishTestMixin(object):
'max_score': max_score})
self.scores = []
patcher = mock.patch("courseware.module_render.set_score",
capture_score)
patcher = mock.patch("lms.djangoapps.grades.signals.handlers.set_score", capture_score)
patcher.start()
self.addCleanup(patcher.stop)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment