Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
ea0027f3
Commit
ea0027f3
authored
Apr 18, 2017
by
J. Cliff Dyer
Committed by
Sanford Student
May 01, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
refactor CAPA to use scorable xblock mixin
for TNL-6594
parent
bf8aef33
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
125 additions
and
190 deletions
+125
-190
common/lib/capa/capa/capa_problem.py
+15
-21
common/lib/xmodule/xmodule/capa_base.py
+0
-0
common/lib/xmodule/xmodule/capa_module.py
+2
-1
common/lib/xmodule/xmodule/tests/test_capa_module.py
+30
-29
common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py
+3
-2
lms/djangoapps/grades/signals/handlers.py
+32
-6
lms/djangoapps/grades/tests/integration/test_events.py
+4
-8
lms/djangoapps/instructor_task/api_helper.py
+13
-6
lms/djangoapps/instructor_task/tasks_helper/module_state.py
+17
-50
lms/djangoapps/instructor_task/tests/test_integration.py
+3
-2
lms/djangoapps/instructor_task/tests/test_tasks.py
+6
-65
No files found.
common/lib/capa/capa/capa_problem.py
View file @
ea0027f3
...
@@ -304,26 +304,25 @@ class LoncapaProblem(object):
...
@@ -304,26 +304,25 @@ class LoncapaProblem(object):
maxscore
+=
responder
.
get_max_score
()
maxscore
+=
responder
.
get_max_score
()
return
maxscore
return
maxscore
def
get_score
(
self
):
def
calculate_score
(
self
,
correct_map
=
None
):
"""
"""
Compute score for this problem. The score is the number of points awarded.
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
'total': get_max_score()}.
Takes an optional correctness map for use in the rescore workflow.
"""
"""
if
correct_map
is
None
:
correct_map
=
self
.
correct_map
correct
=
0
correct
=
0
for
key
in
self
.
correct_map
:
for
key
in
correct_map
:
try
:
try
:
correct
+=
self
.
correct_map
.
get_npoints
(
key
)
correct
+=
correct_map
.
get_npoints
(
key
)
except
Exception
:
except
Exception
:
log
.
error
(
'key=
%
s, correct_map =
%
s'
,
key
,
self
.
correct_map
)
log
.
error
(
'key=
%
s, correct_map =
%
s'
,
key
,
correct_map
)
raise
raise
if
(
not
self
.
student_answers
)
or
len
(
self
.
student_answers
)
==
0
:
return
{
'score'
:
correct
,
'total'
:
self
.
get_max_score
()}
return
{
'score'
:
0
,
'total'
:
self
.
get_max_score
()}
else
:
return
{
'score'
:
correct
,
'total'
:
self
.
get_max_score
()}
def
update_score
(
self
,
score_msg
,
queuekey
):
def
update_score
(
self
,
score_msg
,
queuekey
):
"""
"""
...
@@ -397,7 +396,9 @@ class LoncapaProblem(object):
...
@@ -397,7 +396,9 @@ class LoncapaProblem(object):
# if answers include File objects, convert them to filenames.
# if answers include File objects, convert them to filenames.
self
.
student_answers
=
convert_files_to_filenames
(
answers
)
self
.
student_answers
=
convert_files_to_filenames
(
answers
)
return
self
.
_grade_answers
(
answers
)
new_cmap
=
self
.
get_grade_from_current_answers
(
answers
)
self
.
correct_map
=
new_cmap
return
self
.
correct_map
def
supports_rescoring
(
self
):
def
supports_rescoring
(
self
):
"""
"""
...
@@ -418,16 +419,10 @@ class LoncapaProblem(object):
...
@@ -418,16 +419,10 @@ class LoncapaProblem(object):
"""
"""
return
all
(
'filesubmission'
not
in
responder
.
allowed_inputfields
for
responder
in
self
.
responders
.
values
())
return
all
(
'filesubmission'
not
in
responder
.
allowed_inputfields
for
responder
in
self
.
responders
.
values
())
def
rescore_existing_answers
(
self
):
def
get_grade_from_current_answers
(
self
,
student_answers
):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return
self
.
_grade_answers
(
None
)
def
_grade_answers
(
self
,
student_answers
):
"""
"""
Internal grading call used for checking new 'student_answers' and also
Gets the grade for the currently-saved problem state, but does not save it
rescoring existing student_answers
.
to the block
.
For new student_answers being graded, `student_answers` is a dict of all the
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
entries from request.POST, but with the first part of each key removed
...
@@ -462,7 +457,6 @@ class LoncapaProblem(object):
...
@@ -462,7 +457,6 @@ class LoncapaProblem(object):
results
=
responder
.
evaluate_answers
(
self
.
student_answers
,
oldcmap
)
results
=
responder
.
evaluate_answers
(
self
.
student_answers
,
oldcmap
)
newcmap
.
update
(
results
)
newcmap
.
update
(
results
)
self
.
correct_map
=
newcmap
return
newcmap
return
newcmap
def
get_question_answers
(
self
):
def
get_question_answers
(
self
):
...
...
common/lib/xmodule/xmodule/capa_base.py
View file @
ea0027f3
This diff is collapsed.
Click to expand it.
common/lib/xmodule/xmodule/capa_module.py
View file @
ea0027f3
...
@@ -324,6 +324,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
...
@@ -324,6 +324,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
hint_button
=
module_attr
(
'hint_button'
)
hint_button
=
module_attr
(
'hint_button'
)
handle_problem_html_error
=
module_attr
(
'handle_problem_html_error'
)
handle_problem_html_error
=
module_attr
(
'handle_problem_html_error'
)
handle_ungraded_response
=
module_attr
(
'handle_ungraded_response'
)
handle_ungraded_response
=
module_attr
(
'handle_ungraded_response'
)
has_submitted_answer
=
module_attr
(
'has_submitted_answer'
)
is_attempted
=
module_attr
(
'is_attempted'
)
is_attempted
=
module_attr
(
'is_attempted'
)
is_correct
=
module_attr
(
'is_correct'
)
is_correct
=
module_attr
(
'is_correct'
)
is_past_due
=
module_attr
(
'is_past_due'
)
is_past_due
=
module_attr
(
'is_past_due'
)
...
@@ -332,7 +333,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
...
@@ -332,7 +333,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
make_dict_of_responses
=
module_attr
(
'make_dict_of_responses'
)
make_dict_of_responses
=
module_attr
(
'make_dict_of_responses'
)
new_lcp
=
module_attr
(
'new_lcp'
)
new_lcp
=
module_attr
(
'new_lcp'
)
publish_grade
=
module_attr
(
'publish_grade'
)
publish_grade
=
module_attr
(
'publish_grade'
)
rescore
_problem
=
module_attr
(
'rescore_problem
'
)
rescore
=
module_attr
(
'rescore
'
)
reset_problem
=
module_attr
(
'reset_problem'
)
reset_problem
=
module_attr
(
'reset_problem'
)
save_problem
=
module_attr
(
'save_problem'
)
save_problem
=
module_attr
(
'save_problem'
)
set_state_from_lcp
=
module_attr
(
'set_state_from_lcp'
)
set_state_from_lcp
=
module_attr
(
'set_state_from_lcp'
)
...
...
common/lib/xmodule/xmodule/tests/test_capa_module.py
View file @
ea0027f3
...
@@ -29,6 +29,7 @@ from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
...
@@ -29,6 +29,7 @@ from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
from
opaque_keys.edx.locations
import
Location
from
opaque_keys.edx.locations
import
Location
from
xblock.field_data
import
DictFieldData
from
xblock.field_data
import
DictFieldData
from
xblock.fields
import
ScopeIds
from
xblock.fields
import
ScopeIds
from
xblock.scorable
import
Score
from
.
import
get_test_system
from
.
import
get_test_system
from
pytz
import
UTC
from
pytz
import
UTC
...
@@ -130,9 +131,9 @@ class CapaFactory(object):
...
@@ -130,9 +131,9 @@ class CapaFactory(object):
if
override_get_score
:
if
override_get_score
:
if
correct
:
if
correct
:
# TODO: probably better to actually set the internal state properly, but...
# TODO: probably better to actually set the internal state properly, but...
module
.
get_score
=
lambda
:
{
'score'
:
1
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
1
,
raw_possible
=
1
)
else
:
else
:
module
.
get_score
=
lambda
:
{
'score'
:
0
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
0
,
raw_possible
=
1
)
module
.
graded
=
'False'
module
.
graded
=
'False'
return
module
return
module
...
@@ -196,10 +197,10 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -196,10 +197,10 @@ class CapaModuleTest(unittest.TestCase):
def
test_import
(
self
):
def
test_import
(
self
):
module
=
CapaFactory
.
create
()
module
=
CapaFactory
.
create
()
self
.
assertEqual
(
module
.
get_score
()
[
'score'
]
,
0
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0
)
other_module
=
CapaFactory
.
create
()
other_module
=
CapaFactory
.
create
()
self
.
assertEqual
(
module
.
get_score
()
[
'score'
]
,
0
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0
)
self
.
assertNotEqual
(
module
.
url_name
,
other_module
.
url_name
,
self
.
assertNotEqual
(
module
.
url_name
,
other_module
.
url_name
,
"Factory should be creating unique names for each problem"
)
"Factory should be creating unique names for each problem"
)
...
@@ -208,31 +209,33 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -208,31 +209,33 @@ class CapaModuleTest(unittest.TestCase):
Check that the factory creates correct and incorrect problems properly.
Check that the factory creates correct and incorrect problems properly.
"""
"""
module
=
CapaFactory
.
create
()
module
=
CapaFactory
.
create
()
self
.
assertEqual
(
module
.
get_score
()
[
'score'
]
,
0
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0
)
other_module
=
CapaFactory
.
create
(
correct
=
True
)
other_module
=
CapaFactory
.
create
(
correct
=
True
)
self
.
assertEqual
(
other_module
.
get_score
()
[
'score'
]
,
1
)
self
.
assertEqual
(
other_module
.
get_score
()
.
raw_earned
,
1
)
def
test_get_score
(
self
):
def
test_get_score
(
self
):
"""
"""
Do 1 test where the internals of get_score are properly set
Tests the internals of get_score. In keeping with the ScorableXBlock spec,
Capa modules store their score independently of the LCP internals, so it must
@jbau Note: this obviously depends on a particular implementation of get_score, but I think this is actually
be explicitly updated.
useful as unit-code coverage for this current implementation. I don't see a layer where LoncapaProblem
is tested directly
"""
"""
student_answers
=
{
'1_2_1'
:
'abcd'
}
student_answers
=
{
'1_2_1'
:
'abcd'
}
correct_map
=
CorrectMap
(
answer_id
=
'1_2_1'
,
correctness
=
"correct"
,
npoints
=
0.9
)
correct_map
=
CorrectMap
(
answer_id
=
'1_2_1'
,
correctness
=
"correct"
,
npoints
=
0.9
)
module
=
CapaFactory
.
create
(
correct
=
True
,
override_get_score
=
False
)
module
=
CapaFactory
.
create
(
correct
=
True
,
override_get_score
=
False
)
module
.
lcp
.
correct_map
=
correct_map
module
.
lcp
.
correct_map
=
correct_map
module
.
lcp
.
student_answers
=
student_answers
module
.
lcp
.
student_answers
=
student_answers
self
.
assertEqual
(
module
.
get_score
()[
'score'
],
0.9
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0.0
)
module
.
set_score
(
module
.
score_from_lcp
())
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0.9
)
other_correct_map
=
CorrectMap
(
answer_id
=
'1_2_1'
,
correctness
=
"incorrect"
,
npoints
=
0.1
)
other_correct_map
=
CorrectMap
(
answer_id
=
'1_2_1'
,
correctness
=
"incorrect"
,
npoints
=
0.1
)
other_module
=
CapaFactory
.
create
(
correct
=
False
,
override_get_score
=
False
)
other_module
=
CapaFactory
.
create
(
correct
=
False
,
override_get_score
=
False
)
other_module
.
lcp
.
correct_map
=
other_correct_map
other_module
.
lcp
.
correct_map
=
other_correct_map
other_module
.
lcp
.
student_answers
=
student_answers
other_module
.
lcp
.
student_answers
=
student_answers
self
.
assertEqual
(
other_module
.
get_score
()[
'score'
],
0.1
)
self
.
assertEqual
(
other_module
.
get_score
()
.
raw_earned
,
0.0
)
other_module
.
set_score
(
other_module
.
score_from_lcp
())
self
.
assertEqual
(
other_module
.
get_score
()
.
raw_earned
,
0.1
)
def
test_showanswer_default
(
self
):
def
test_showanswer_default
(
self
):
"""
"""
...
@@ -1007,14 +1010,15 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -1007,14 +1010,15 @@ class CapaModuleTest(unittest.TestCase):
# Simulate that all answers are marked correct, no matter
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
# what the input is, by patching LoncapaResponse.evaluate_answers()
with
patch
(
'capa.responsetypes.LoncapaResponse.evaluate_answers'
)
as
mock_evaluate_answers
:
with
patch
(
'capa.responsetypes.LoncapaResponse.evaluate_answers'
)
as
mock_evaluate_answers
:
mock_evaluate_answers
.
return_value
=
CorrectMap
(
CapaFactory
.
answer_key
(),
'correct'
)
mock_evaluate_answers
.
return_value
=
CorrectMap
(
result
=
module
.
rescore_problem
(
only_if_higher
=
False
)
answer_id
=
CapaFactory
.
answer_key
(),
correctness
=
'correct'
,
npoints
=
1
,
)
module
.
rescore
(
only_if_higher
=
False
)
# Expect that the problem is marked correct
# Expect that the problem is marked correct
self
.
assertEqual
(
result
[
'success'
],
'correct'
)
self
.
assertEqual
(
module
.
is_correct
(),
True
)
# Expect that we get no HTML
self
.
assertNotIn
(
'contents'
,
result
)
# Expect that the number of attempts is not incremented
# Expect that the number of attempts is not incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
self
.
assertEqual
(
module
.
attempts
,
1
)
...
@@ -1028,10 +1032,10 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -1028,10 +1032,10 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
# what the input is, by patching LoncapaResponse.evaluate_answers()
with
patch
(
'capa.responsetypes.LoncapaResponse.evaluate_answers'
)
as
mock_evaluate_answers
:
with
patch
(
'capa.responsetypes.LoncapaResponse.evaluate_answers'
)
as
mock_evaluate_answers
:
mock_evaluate_answers
.
return_value
=
CorrectMap
(
CapaFactory
.
answer_key
(),
'incorrect'
)
mock_evaluate_answers
.
return_value
=
CorrectMap
(
CapaFactory
.
answer_key
(),
'incorrect'
)
result
=
module
.
rescore_problem
(
only_if_higher
=
False
)
module
.
rescore
(
only_if_higher
=
False
)
# Expect that the problem is marked incorrect
# Expect that the problem is marked incorrect
self
.
assertEqual
(
result
[
'success'
],
'incorrect'
)
self
.
assertEqual
(
module
.
is_correct
(),
False
)
# Expect that the number of attempts is not incremented
# Expect that the number of attempts is not incremented
self
.
assertEqual
(
module
.
attempts
,
0
)
self
.
assertEqual
(
module
.
attempts
,
0
)
...
@@ -1042,7 +1046,7 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -1042,7 +1046,7 @@ class CapaModuleTest(unittest.TestCase):
# Try to rescore the problem, and get exception
# Try to rescore the problem, and get exception
with
self
.
assertRaises
(
xmodule
.
exceptions
.
NotFoundError
):
with
self
.
assertRaises
(
xmodule
.
exceptions
.
NotFoundError
):
module
.
rescore
_problem
(
only_if_higher
=
False
)
module
.
rescore
(
only_if_higher
=
False
)
def
test_rescore_problem_not_supported
(
self
):
def
test_rescore_problem_not_supported
(
self
):
module
=
CapaFactory
.
create
(
done
=
True
)
module
=
CapaFactory
.
create
(
done
=
True
)
...
@@ -1051,7 +1055,7 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -1051,7 +1055,7 @@ class CapaModuleTest(unittest.TestCase):
with
patch
(
'capa.capa_problem.LoncapaProblem.supports_rescoring'
)
as
mock_supports_rescoring
:
with
patch
(
'capa.capa_problem.LoncapaProblem.supports_rescoring'
)
as
mock_supports_rescoring
:
mock_supports_rescoring
.
return_value
=
False
mock_supports_rescoring
.
return_value
=
False
with
self
.
assertRaises
(
NotImplementedError
):
with
self
.
assertRaises
(
NotImplementedError
):
module
.
rescore
_problem
(
only_if_higher
=
False
)
module
.
rescore
(
only_if_higher
=
False
)
def
_rescore_problem_error_helper
(
self
,
exception_class
):
def
_rescore_problem_error_helper
(
self
,
exception_class
):
"""Helper to allow testing all errors that rescoring might return."""
"""Helper to allow testing all errors that rescoring might return."""
...
@@ -1059,13 +1063,10 @@ class CapaModuleTest(unittest.TestCase):
...
@@ -1059,13 +1063,10 @@ class CapaModuleTest(unittest.TestCase):
module
=
CapaFactory
.
create
(
attempts
=
1
,
done
=
True
)
module
=
CapaFactory
.
create
(
attempts
=
1
,
done
=
True
)
# Simulate answering a problem that raises the exception
# Simulate answering a problem that raises the exception
with
patch
(
'capa.capa_problem.LoncapaProblem.
rescore_existing
_answers'
)
as
mock_rescore
:
with
patch
(
'capa.capa_problem.LoncapaProblem.
get_grade_from_current
_answers'
)
as
mock_rescore
:
mock_rescore
.
side_effect
=
exception_class
(
u'test error
\u03a9
'
)
mock_rescore
.
side_effect
=
exception_class
(
u'test error
\u03a9
'
)
result
=
module
.
rescore_problem
(
only_if_higher
=
False
)
with
self
.
assertRaises
(
exception_class
):
module
.
rescore
(
only_if_higher
=
False
)
# Expect an AJAX alert message in 'success'
expected_msg
=
u'Error: test error
\u03a9
'
self
.
assertEqual
(
result
[
'success'
],
expected_msg
)
# Expect that the number of attempts is NOT incremented
# Expect that the number of attempts is NOT incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
self
.
assertEqual
(
module
.
attempts
,
1
)
...
...
common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py
View file @
ea0027f3
...
@@ -18,6 +18,7 @@ from xmodule.capa_module import CapaModule
...
@@ -18,6 +18,7 @@ from xmodule.capa_module import CapaModule
from
opaque_keys.edx.locations
import
Location
from
opaque_keys.edx.locations
import
Location
from
xblock.field_data
import
DictFieldData
from
xblock.field_data
import
DictFieldData
from
xblock.fields
import
ScopeIds
from
xblock.fields
import
ScopeIds
from
xblock.scorable
import
Score
from
.
import
get_test_system
from
.
import
get_test_system
from
pytz
import
UTC
from
pytz
import
UTC
...
@@ -111,9 +112,9 @@ class CapaFactoryWithDelay(object):
...
@@ -111,9 +112,9 @@ class CapaFactoryWithDelay(object):
if
correct
:
if
correct
:
# Could set the internal state formally, but here we just jam in the score.
# Could set the internal state formally, but here we just jam in the score.
module
.
get_score
=
lambda
:
{
'score'
:
1
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
1
,
raw_possible
=
1
)
else
:
else
:
module
.
get_score
=
lambda
:
{
'score'
:
0
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
0
,
raw_possible
=
1
)
return
module
return
module
...
...
lms/djangoapps/grades/signals/handlers.py
View file @
ea0027f3
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
Grades related signals.
Grades related signals.
"""
"""
from
contextlib
import
contextmanager
from
contextlib
import
contextmanager
from
crum
import
get_current_user
from
logging
import
getLogger
from
logging
import
getLogger
from
django.dispatch
import
receiver
from
django.dispatch
import
receiver
...
@@ -32,6 +33,8 @@ from ..tasks import recalculate_subsection_grade_v3, RECALCULATE_GRADE_DELAY
...
@@ -32,6 +33,8 @@ from ..tasks import recalculate_subsection_grade_v3, RECALCULATE_GRADE_DELAY
log
=
getLogger
(
__name__
)
log
=
getLogger
(
__name__
)
# define values to be used in grading events
GRADES_RESCORE_EVENT_TYPE
=
'edx.grades.problem.rescored'
PROBLEM_SUBMITTED_EVENT_TYPE
=
'edx.grades.problem.submitted'
PROBLEM_SUBMITTED_EVENT_TYPE
=
'edx.grades.problem.submitted'
...
@@ -209,7 +212,7 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum
...
@@ -209,7 +212,7 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum
Handles the PROBLEM_WEIGHTED_SCORE_CHANGED signal by
Handles the PROBLEM_WEIGHTED_SCORE_CHANGED signal by
enqueueing a subsection update operation to occur asynchronously.
enqueueing a subsection update operation to occur asynchronously.
"""
"""
_emit_
problem_submitted_
event
(
kwargs
)
_emit_event
(
kwargs
)
result
=
recalculate_subsection_grade_v3
.
apply_async
(
result
=
recalculate_subsection_grade_v3
.
apply_async
(
kwargs
=
dict
(
kwargs
=
dict
(
user_id
=
kwargs
[
'user_id'
],
user_id
=
kwargs
[
'user_id'
],
...
@@ -241,12 +244,14 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
...
@@ -241,12 +244,14 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
CourseGradeFactory
()
.
update
(
user
,
course
=
course
,
course_structure
=
course_structure
)
CourseGradeFactory
()
.
update
(
user
,
course
=
course
,
course_structure
=
course_structure
)
def
_emit_
problem_submitted_
event
(
kwargs
):
def
_emit_event
(
kwargs
):
"""
"""
Emits a problem submitted event only if
Emits a problem submitted event only if there is no current event
there is no current event transaction type,
transaction type, i.e. we have not reached this point in the code via a
i.e. we have not reached this point in the
rescore or student state deletion.
code via a rescore or student state deletion.
If the event transaction type has already been set and the transacation is
a rescore, emits a problem rescored event.
"""
"""
root_type
=
get_event_transaction_type
()
root_type
=
get_event_transaction_type
()
...
@@ -267,3 +272,24 @@ def _emit_problem_submitted_event(kwargs):
...
@@ -267,3 +272,24 @@ def _emit_problem_submitted_event(kwargs):
'weighted_possible'
:
kwargs
.
get
(
'weighted_possible'
),
'weighted_possible'
:
kwargs
.
get
(
'weighted_possible'
),
}
}
)
)
if
root_type
==
'edx.grades.problem.rescored'
:
current_user
=
get_current_user
()
if
current_user
is
not
None
and
hasattr
(
current_user
,
'id'
):
instructor_id
=
unicode
(
current_user
.
id
)
else
:
instructor_id
=
None
tracker
.
emit
(
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
{
'course_id'
:
unicode
(
kwargs
[
'course_id'
]),
'user_id'
:
unicode
(
kwargs
[
'user_id'
]),
'problem_id'
:
unicode
(
kwargs
[
'usage_id'
]),
'new_weighted_earned'
:
kwargs
.
get
(
'weighted_earned'
),
'new_weighted_possible'
:
kwargs
.
get
(
'weighted_possible'
),
'only_if_higher'
:
kwargs
.
get
(
'only_if_higher'
),
'instructor_id'
:
instructor_id
,
'event_transaction_id'
:
unicode
(
get_event_transaction_id
()),
'event_transaction_type'
:
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
}
)
lms/djangoapps/grades/tests/integration/test_events.py
View file @
ea0027f3
...
@@ -160,10 +160,9 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
...
@@ -160,10 +160,9 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
}
}
)
)
@patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.tracker'
)
@patch
(
'lms.djangoapps.grades.signals.handlers.tracker'
)
@patch
(
'lms.djangoapps.grades.signals.handlers.tracker'
)
@patch
(
'lms.djangoapps.grades.models.tracker'
)
@patch
(
'lms.djangoapps.grades.models.tracker'
)
def
test_rescoring_events
(
self
,
models_tracker
,
handlers_tracker
,
instructor_task_tracker
):
def
test_rescoring_events
(
self
,
models_tracker
,
handlers_tracker
):
# submit answer
# submit answer
self
.
submit_question_answer
(
'p1'
,
{
'2_1'
:
'choice_choice_3'
})
self
.
submit_question_answer
(
'p1'
,
{
'2_1'
:
'choice_choice_3'
})
models_tracker
.
reset_mock
()
models_tracker
.
reset_mock
()
...
@@ -187,11 +186,8 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
...
@@ -187,11 +186,8 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
)
)
# check logging to make sure id's are tracked correctly across
# check logging to make sure id's are tracked correctly across
# events
# events
event_transaction_id
=
instructor_task_tracker
.
emit
.
mock_calls
[
0
][
1
][
1
][
'event_transaction_id'
]
event_transaction_id
=
handlers_tracker
.
emit
.
mock_calls
[
0
][
1
][
1
][
'event_transaction_id'
]
self
.
assertEqual
(
instructor_task_tracker
.
get_tracker
()
.
context
.
call_args
[
0
],
(
'edx.grades.problem.rescored'
,
{
'course_id'
:
unicode
(
self
.
course
.
id
),
'org_id'
:
unicode
(
self
.
course
.
org
)})
)
# make sure the id is propagated throughout the event flow
# make sure the id is propagated throughout the event flow
for
call
in
models_tracker
.
emit
.
mock_calls
:
for
call
in
models_tracker
.
emit
.
mock_calls
:
self
.
assertEqual
(
event_transaction_id
,
call
[
1
][
1
][
'event_transaction_id'
])
self
.
assertEqual
(
event_transaction_id
,
call
[
1
][
1
][
'event_transaction_id'
])
...
@@ -206,7 +202,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
...
@@ -206,7 +202,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
handlers_tracker
.
assert_not_called
()
handlers_tracker
.
assert_not_called
()
instructor_task
_tracker
.
emit
.
assert_called_with
(
handlers
_tracker
.
emit
.
assert_called_with
(
unicode
(
RESCORE_TYPE
),
unicode
(
RESCORE_TYPE
),
{
{
'course_id'
:
unicode
(
self
.
course
.
id
),
'course_id'
:
unicode
(
self
.
course
.
id
),
...
...
lms/djangoapps/instructor_task/api_helper.py
View file @
ea0027f3
...
@@ -100,6 +100,17 @@ def _get_xmodule_instance_args(request, task_id):
...
@@ -100,6 +100,17 @@ def _get_xmodule_instance_args(request, task_id):
return
xmodule_instance_args
return
xmodule_instance_args
def
_supports_rescore
(
descriptor
):
"""
Helper method to determine whether a given item supports rescoring.
In order to accommodate both XModules and XBlocks, we have to check
the descriptor itself then fall back on its module class.
"""
return
hasattr
(
descriptor
,
'rescore'
)
or
(
hasattr
(
descriptor
,
'module_class'
)
and
hasattr
(
descriptor
.
module_class
,
'rescore'
)
)
def
_update_instructor_task
(
instructor_task
,
task_result
):
def
_update_instructor_task
(
instructor_task
,
task_result
):
"""
"""
Updates and possibly saves a InstructorTask entry based on a task Result.
Updates and possibly saves a InstructorTask entry based on a task Result.
...
@@ -246,10 +257,7 @@ def check_arguments_for_rescoring(usage_key):
...
@@ -246,10 +257,7 @@ def check_arguments_for_rescoring(usage_key):
corresponding module doesn't support rescoring calls.
corresponding module doesn't support rescoring calls.
"""
"""
descriptor
=
modulestore
()
.
get_item
(
usage_key
)
descriptor
=
modulestore
()
.
get_item
(
usage_key
)
# TODO: Clean this up as part of TNL-6594 when CAPA uses the ScorableXBlockMixin
if
not
_supports_rescore
(
descriptor
):
if
(
not
hasattr
(
descriptor
,
'module_class'
)
or
not
hasattr
(
descriptor
.
module_class
,
'rescore_problem'
)
)
and
not
hasattr
(
descriptor
,
'rescore'
):
msg
=
"Specified module does not support rescoring."
msg
=
"Specified module does not support rescoring."
raise
NotImplementedError
(
msg
)
raise
NotImplementedError
(
msg
)
...
@@ -264,8 +272,7 @@ def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=inv
...
@@ -264,8 +272,7 @@ def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=inv
any of the problem in entrance exam doesn't support re-scoring calls.
any of the problem in entrance exam doesn't support re-scoring calls.
"""
"""
problems
=
get_problems_in_section
(
exam_key
)
.
values
()
problems
=
get_problems_in_section
(
exam_key
)
.
values
()
if
any
(
not
hasattr
(
problem
,
'module_class'
)
or
not
hasattr
(
problem
.
module_class
,
'rescore_problem'
)
if
any
(
not
_supports_rescore
(
problem
)
for
problem
in
problems
):
for
problem
in
problems
):
msg
=
_
(
"Not all problems in entrance exam support re-scoring."
)
msg
=
_
(
"Not all problems in entrance exam support re-scoring."
)
raise
NotImplementedError
(
msg
)
raise
NotImplementedError
(
msg
)
...
...
lms/djangoapps/instructor_task/tasks_helper/module_state.py
View file @
ea0027f3
...
@@ -10,6 +10,7 @@ from time import time
...
@@ -10,6 +10,7 @@ from time import time
from
eventtracking
import
tracker
from
eventtracking
import
tracker
from
opaque_keys.edx.keys
import
UsageKey
from
opaque_keys.edx.keys
import
UsageKey
from
xmodule.modulestore.django
import
modulestore
from
xmodule.modulestore.django
import
modulestore
from
capa.responsetypes
import
StudentInputError
,
ResponseError
,
LoncapaProblemError
from
courseware.courses
import
get_course_by_id
,
get_problems_in_section
from
courseware.courses
import
get_course_by_id
,
get_problems_in_section
from
courseware.models
import
StudentModule
from
courseware.models
import
StudentModule
from
courseware.model_data
import
DjangoKeyValueStore
,
FieldDataCache
from
courseware.model_data
import
DjangoKeyValueStore
,
FieldDataCache
...
@@ -174,38 +175,28 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
...
@@ -174,38 +175,28 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
TASK_LOG
.
warning
(
msg
)
TASK_LOG
.
warning
(
msg
)
return
UPDATE_STATUS_FAILED
return
UPDATE_STATUS_FAILED
# TODO: (TNL-6594) Remove this switch once rescore_problem support
if
not
hasattr
(
instance
,
'rescore'
):
# once CAPA uses ScorableXBlockMixin.
for
method
in
[
'rescore'
,
'rescore_problem'
]:
rescore_method
=
getattr
(
instance
,
method
,
None
)
if
rescore_method
is
not
None
:
break
else
:
# for-else: Neither method exists on the block.
# This should not happen, since it should be already checked in the
# This should not happen, since it should be already checked in the
# caller, but check here to be sure.
# caller, but check here to be sure.
msg
=
"Specified problem does not support rescoring."
msg
=
"Specified problem does not support rescoring."
raise
UpdateProblemModuleStateError
(
msg
)
raise
UpdateProblemModuleStateError
(
msg
)
# TODO: Remove the first part of this if-else with TNL-6594
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
if
hasattr
(
instance
,
"done"
):
if
not
instance
.
has_submitted_answer
():
if
not
instance
.
done
:
return
UPDATE_STATUS_SKIPPED
elif
not
instance
.
has_submitted_answer
():
return
UPDATE_STATUS_SKIPPED
return
UPDATE_STATUS_SKIPPED
# Set the tracking info before this call, because it makes downstream
# Set the tracking info before this call, because it makes downstream
# calls that create events. We retrieve and store the id here because
# calls that create events. We retrieve and store the id here because
# the request cache will be erased during downstream calls.
# the request cache will be erased during downstream calls.
event_transaction_id
=
create_new_event_transaction_id
()
create_new_event_transaction_id
()
set_event_transaction_type
(
GRADES_RESCORE_EVENT_TYPE
)
set_event_transaction_type
(
GRADES_RESCORE_EVENT_TYPE
)
result
=
rescore_method
(
only_if_higher
=
task_input
[
'only_if_higher'
])
# specific events from CAPA are not propagated up the stack. Do we want this?
instance
.
save
()
try
:
instance
.
rescore
(
only_if_higher
=
task_input
[
'only_if_higher'
])
if
result
is
None
or
result
.
get
(
u'success'
)
in
{
u'correct'
,
u'incorrect'
}
:
except
(
LoncapaProblemError
,
StudentInputError
,
ResponseError
)
:
TASK_LOG
.
debu
g
(
TASK_LOG
.
warnin
g
(
u"
successfully processed
rescore call for course
%(course)
s, problem
%(loc)
s "
u"
error processing
rescore call for course
%(course)
s, problem
%(loc)
s "
u"and student
%(student)
s"
,
u"and student
%(student)
s"
,
dict
(
dict
(
course
=
course_id
,
course
=
course_id
,
...
@@ -213,44 +204,20 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
...
@@ -213,44 +204,20 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
student
=
student
student
=
student
)
)
)
)
return
UPDATE_STATUS_FAILED
if
result
is
not
None
:
# Only for CAPA. This will get moved to the grade handler.
instance
.
save
()
new_weighted_earned
,
new_weighted_possible
=
weighted_score
(
TASK_LOG
.
debug
(
result
[
'new_raw_earned'
]
if
result
else
None
,
u"successfully processed rescore call for course
%(course)
s, problem
%(loc)
s "
result
[
'new_raw_possible'
]
if
result
else
None
,
u"and student
%(student)
s"
,
module_descriptor
.
weight
,
)
# TODO: remove this context manager after completion of AN-6134
context
=
course_context_from_course_id
(
course_id
)
with
tracker
.
get_tracker
()
.
context
(
GRADES_RESCORE_EVENT_TYPE
,
context
):
tracker
.
emit
(
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
{
'course_id'
:
unicode
(
course_id
),
'user_id'
:
unicode
(
student
.
id
),
'problem_id'
:
unicode
(
usage_key
),
'new_weighted_earned'
:
new_weighted_earned
,
'new_weighted_possible'
:
new_weighted_possible
,
'only_if_higher'
:
task_input
[
'only_if_higher'
],
'instructor_id'
:
unicode
(
xmodule_instance_args
[
'request_info'
][
'user_id'
]),
'event_transaction_id'
:
unicode
(
event_transaction_id
),
'event_transaction_type'
:
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
}
)
return
UPDATE_STATUS_SUCCEEDED
else
:
TASK_LOG
.
warning
(
u"error processing rescore call for course
%(course)
s, problem
%(loc)
s "
u"and student
%(student)
s:
%(msg)
s"
,
dict
(
dict
(
msg
=
result
.
get
(
'success'
,
result
),
course
=
course_id
,
course
=
course_id
,
loc
=
usage_key
,
loc
=
usage_key
,
student
=
student
student
=
student
)
)
)
)
return
UPDATE_STATUS_FAILED
return
UPDATE_STATUS_SUCCEEDED
@outer_atomic
@outer_atomic
...
...
lms/djangoapps/instructor_task/tests/test_integration.py
View file @
ea0027f3
...
@@ -18,6 +18,7 @@ from django.contrib.auth.models import User
...
@@ -18,6 +18,7 @@ from django.contrib.auth.models import User
from
django.core.urlresolvers
import
reverse
from
django.core.urlresolvers
import
reverse
from
openedx.core.djangoapps.util.testing
import
TestConditionalContent
from
openedx.core.djangoapps.util.testing
import
TestConditionalContent
from
openedx.core.djangolib.testing.utils
import
get_mock_request
from
capa.tests.response_xml_factory
import
(
CodeResponseXMLFactory
,
from
capa.tests.response_xml_factory
import
(
CodeResponseXMLFactory
,
CustomResponseXMLFactory
)
CustomResponseXMLFactory
)
from
xmodule.modulestore.tests.factories
import
ItemFactory
from
xmodule.modulestore.tests.factories
import
ItemFactory
...
@@ -275,7 +276,7 @@ class TestRescoringTask(TestIntegrationTask):
...
@@ -275,7 +276,7 @@ class TestRescoringTask(TestIntegrationTask):
self
.
submit_student_answer
(
'u1'
,
problem_url_name
,
[
OPTION_1
,
OPTION_1
])
self
.
submit_student_answer
(
'u1'
,
problem_url_name
,
[
OPTION_1
,
OPTION_1
])
expected_message
=
"bad things happened"
expected_message
=
"bad things happened"
with
patch
(
'capa.capa_problem.LoncapaProblem.
rescore_existing
_answers'
)
as
mock_rescore
:
with
patch
(
'capa.capa_problem.LoncapaProblem.
get_grade_from_current
_answers'
)
as
mock_rescore
:
mock_rescore
.
side_effect
=
ZeroDivisionError
(
expected_message
)
mock_rescore
.
side_effect
=
ZeroDivisionError
(
expected_message
)
instructor_task
=
self
.
submit_rescore_all_student_answers
(
'instructor'
,
problem_url_name
)
instructor_task
=
self
.
submit_rescore_all_student_answers
(
'instructor'
,
problem_url_name
)
self
.
_assert_task_failure
(
instructor_task
.
id
,
'rescore_problem'
,
problem_url_name
,
expected_message
)
self
.
_assert_task_failure
(
instructor_task
.
id
,
'rescore_problem'
,
problem_url_name
,
expected_message
)
...
@@ -293,7 +294,7 @@ class TestRescoringTask(TestIntegrationTask):
...
@@ -293,7 +294,7 @@ class TestRescoringTask(TestIntegrationTask):
# return an input error as if it were a numerical response, with an embedded unicode character:
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message
=
u"Could not interpret '2/3
\u03a9
' as a number"
expected_message
=
u"Could not interpret '2/3
\u03a9
' as a number"
with
patch
(
'capa.capa_problem.LoncapaProblem.
rescore_existing
_answers'
)
as
mock_rescore
:
with
patch
(
'capa.capa_problem.LoncapaProblem.
get_grade_from_current
_answers'
)
as
mock_rescore
:
mock_rescore
.
side_effect
=
StudentInputError
(
expected_message
)
mock_rescore
.
side_effect
=
StudentInputError
(
expected_message
)
instructor_task
=
self
.
submit_rescore_all_student_answers
(
'instructor'
,
problem_url_name
)
instructor_task
=
self
.
submit_rescore_all_student_answers
(
'instructor'
,
problem_url_name
)
...
...
lms/djangoapps/instructor_task/tests/test_tasks.py
View file @
ea0027f3
...
@@ -307,30 +307,21 @@ class TestRescoreInstructorTask(TestInstructorTasks):
...
@@ -307,30 +307,21 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name
=
'rescored'
action_name
=
'rescored'
)
)
@ddt.data
(
def
test_rescoring_success
(
self
):
(
'rescore'
,
None
),
(
'rescore_problem'
,
{
'success'
:
'correct'
,
'new_raw_earned'
:
1
,
'new_raw_possible'
:
1
})
)
@ddt.unpack
def
test_rescoring_success
(
self
,
rescore_method
,
rescore_result
):
"""
"""
Tests rescores a problem in a course, for all students succeeds.
Tests rescores a problem in a course, for all students succeeds.
"""
"""
mock_instance
=
MagicMock
()
mock_instance
=
MagicMock
()
other_method
=
({
'rescore'
,
'rescore_problem'
}
-
{
rescore_method
})
.
pop
()
getattr
(
mock_instance
,
'rescore'
)
.
return_value
=
None
getattr
(
mock_instance
,
rescore_method
)
.
return_value
=
rescore_result
delattr
(
mock_instance
,
other_method
)
if
rescore_method
==
'rescore'
:
del
mock_instance
.
done
mock_instance
.
has_submitted_answer
.
return_value
=
True
mock_instance
.
has_submitted_answer
.
return_value
=
True
else
:
del
mock_instance
.
done
# old CAPA code used to use this value so we delete it here to be sure
mock_instance
.
done
=
True
num_students
=
10
num_students
=
10
self
.
_create_students_with_state
(
num_students
)
self
.
_create_students_with_state
(
num_students
)
task_entry
=
self
.
_create_input_entry
()
task_entry
=
self
.
_create_input_entry
()
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
mock_get_module
.
return_value
=
mock_instance
mock_get_module
.
return_value
=
mock_instance
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
...
@@ -344,56 +335,6 @@ class TestRescoreInstructorTask(TestInstructorTasks):
...
@@ -344,56 +335,6 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name
=
'rescored'
action_name
=
'rescored'
)
)
def
test_rescoring_bad_result
(
self
):
"""
Tests and confirm that rescoring does not succeed if "success" key is not an expected value.
"""
input_state
=
json
.
dumps
({
'done'
:
True
})
num_students
=
10
self
.
_create_students_with_state
(
num_students
,
input_state
)
task_entry
=
self
.
_create_input_entry
()
mock_instance
=
Mock
()
mock_instance
.
rescore_problem
=
Mock
(
return_value
=
{
'success'
:
'bogus'
})
del
mock_instance
.
rescore
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
mock_get_module
.
return_value
=
mock_instance
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
self
.
assert_task_output
(
output
=
self
.
get_task_output
(
task_entry
.
id
),
total
=
num_students
,
attempted
=
num_students
,
succeeded
=
0
,
skipped
=
0
,
failed
=
num_students
,
action_name
=
'rescored'
)
def
test_rescoring_missing_result
(
self
):
"""
Tests and confirm that rescoring does not succeed if "success" key is not returned.
"""
input_state
=
json
.
dumps
({
'done'
:
True
})
num_students
=
10
self
.
_create_students_with_state
(
num_students
,
input_state
)
task_entry
=
self
.
_create_input_entry
()
mock_instance
=
Mock
()
mock_instance
.
rescore_problem
=
Mock
(
return_value
=
{
'bogus'
:
'value'
})
del
mock_instance
.
rescore
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
mock_get_module
.
return_value
=
mock_instance
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
self
.
assert_task_output
(
output
=
self
.
get_task_output
(
task_entry
.
id
),
total
=
num_students
,
attempted
=
num_students
,
succeeded
=
0
,
skipped
=
0
,
failed
=
num_students
,
action_name
=
'rescored'
)
@attr
(
shard
=
3
)
@attr
(
shard
=
3
)
class
TestResetAttemptsInstructorTask
(
TestInstructorTasks
):
class
TestResetAttemptsInstructorTask
(
TestInstructorTasks
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment