Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
bcc3d165
Commit
bcc3d165
authored
May 02, 2017
by
sanfordstudent
Committed by
GitHub
May 02, 2017
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #14929 from edx/neem/capa-refactor
Refactor CAPA to use scorable XBlock mixin
parents
d48429d1
ea0027f3
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
264 additions
and
300 deletions
+264
-300
common/lib/capa/capa/capa_problem.py
+15
-21
common/lib/xmodule/xmodule/capa_base.py
+139
-110
common/lib/xmodule/xmodule/capa_module.py
+2
-1
common/lib/xmodule/xmodule/tests/test_capa_module.py
+30
-29
common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py
+3
-2
lms/djangoapps/grades/signals/handlers.py
+32
-6
lms/djangoapps/grades/tests/integration/test_events.py
+4
-8
lms/djangoapps/instructor_task/api_helper.py
+13
-6
lms/djangoapps/instructor_task/tasks_helper/module_state.py
+17
-50
lms/djangoapps/instructor_task/tests/test_integration.py
+3
-2
lms/djangoapps/instructor_task/tests/test_tasks.py
+6
-65
No files found.
common/lib/capa/capa/capa_problem.py
View file @
bcc3d165
...
...
@@ -304,26 +304,25 @@ class LoncapaProblem(object):
maxscore
+=
responder
.
get_max_score
()
return
maxscore
def
get_score
(
self
):
def
calculate_score
(
self
,
correct_map
=
None
):
"""
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
Takes an optional correctness map for use in the rescore workflow.
"""
if
correct_map
is
None
:
correct_map
=
self
.
correct_map
correct
=
0
for
key
in
self
.
correct_map
:
for
key
in
correct_map
:
try
:
correct
+=
self
.
correct_map
.
get_npoints
(
key
)
correct
+=
correct_map
.
get_npoints
(
key
)
except
Exception
:
log
.
error
(
'key=
%
s, correct_map =
%
s'
,
key
,
self
.
correct_map
)
log
.
error
(
'key=
%
s, correct_map =
%
s'
,
key
,
correct_map
)
raise
if
(
not
self
.
student_answers
)
or
len
(
self
.
student_answers
)
==
0
:
return
{
'score'
:
0
,
'total'
:
self
.
get_max_score
()}
else
:
return
{
'score'
:
correct
,
'total'
:
self
.
get_max_score
()}
return
{
'score'
:
correct
,
'total'
:
self
.
get_max_score
()}
def
update_score
(
self
,
score_msg
,
queuekey
):
"""
...
...
@@ -397,7 +396,9 @@ class LoncapaProblem(object):
# if answers include File objects, convert them to filenames.
self
.
student_answers
=
convert_files_to_filenames
(
answers
)
return
self
.
_grade_answers
(
answers
)
new_cmap
=
self
.
get_grade_from_current_answers
(
answers
)
self
.
correct_map
=
new_cmap
return
self
.
correct_map
def
supports_rescoring
(
self
):
"""
...
...
@@ -418,16 +419,10 @@ class LoncapaProblem(object):
"""
return
all
(
'filesubmission'
not
in
responder
.
allowed_inputfields
for
responder
in
self
.
responders
.
values
())
def
rescore_existing_answers
(
self
):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return
self
.
_grade_answers
(
None
)
def
_grade_answers
(
self
,
student_answers
):
def
get_grade_from_current_answers
(
self
,
student_answers
):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers
.
Gets the grade for the currently-saved problem state, but does not save it
to the block
.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
...
...
@@ -462,7 +457,6 @@ class LoncapaProblem(object):
results
=
responder
.
evaluate_answers
(
self
.
student_answers
,
oldcmap
)
newcmap
.
update
(
results
)
self
.
correct_map
=
newcmap
return
newcmap
def
get_question_answers
(
self
):
...
...
common/lib/xmodule/xmodule/capa_base.py
View file @
bcc3d165
...
...
@@ -24,6 +24,7 @@ from capa.inputtypes import Status
from
capa.responsetypes
import
StudentInputError
,
ResponseError
,
LoncapaProblemError
from
capa.util
import
convert_files_to_filenames
,
get_inner_html_from_xpath
from
xblock.fields
import
Boolean
,
Dict
,
Float
,
Integer
,
Scope
,
String
,
XMLString
from
xblock.scorable
import
ScorableXBlockMixin
,
Score
from
xmodule.capa_base_constants
import
RANDOMIZATION
,
SHOWANSWER
,
SHOW_CORRECTNESS
from
xmodule.exceptions
import
NotFoundError
from
.fields
import
Date
,
Timedelta
...
...
@@ -219,7 +220,7 @@ class CapaFields(object):
)
class
CapaMixin
(
CapaFields
):
class
CapaMixin
(
ScorableXBlockMixin
,
CapaFields
):
"""
Core logic for Capa Problem, which can be used by XModules or XBlocks.
"""
...
...
@@ -290,6 +291,8 @@ class CapaMixin(CapaFields):
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
assert
self
.
seed
is
not
None
def
choose_new_seed
(
self
):
...
...
@@ -372,32 +375,28 @@ class CapaMixin(CapaFields):
"""
self
.
last_submission_time
=
datetime
.
datetime
.
now
(
utc
)
def
get_score
(
self
):
"""
Access the problem's score
"""
return
self
.
lcp
.
get_score
()
def
get_progress
(
self
):
"""
For now, just return
score / max_scor
e
For now, just return
weighted earned / weighted possibl
e
"""
score
_dict
=
self
.
get_score
()
score
=
score_dict
[
'score'
]
total
=
score_dict
[
'total'
]
score
=
self
.
get_score
()
raw_earned
=
score
.
raw_earned
raw_possible
=
score
.
raw_possible
if
total
>
0
:
if
raw_possible
>
0
:
if
self
.
weight
is
not
None
:
# Progress objects expect total > 0
if
self
.
weight
==
0
:
return
None
# scale score and total by weight/total:
score
=
score
*
self
.
weight
/
total
total
=
self
.
weight
weighted_earned
=
raw_earned
*
self
.
weight
/
raw_possible
weighted_possible
=
self
.
weight
else
:
weighted_earned
=
raw_earned
weighted_possible
=
raw_possible
try
:
return
Progress
(
score
,
total
)
return
Progress
(
weighted_earned
,
weighted_possible
)
except
(
TypeError
,
ValueError
):
log
.
exception
(
"Got bad progress"
)
return
None
...
...
@@ -572,7 +571,7 @@ class CapaMixin(CapaFields):
# Next, generate a fresh LoncapaProblem
self
.
lcp
=
self
.
new_lcp
(
None
)
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
# Prepend a scary warning to the student
_
=
self
.
runtime
.
service
(
self
,
"i18n"
)
.
ugettext
warning_msg
=
_
(
"Warning: The problem has been reset to its initial state!"
)
...
...
@@ -879,8 +878,7 @@ class CapaMixin(CapaFields):
"""
True iff full points
"""
score_dict
=
self
.
get_score
()
return
score_dict
[
'score'
]
==
score_dict
[
'total'
]
return
self
.
score
.
raw_earned
==
self
.
score
.
raw_possible
def
answer_available
(
self
):
"""
...
...
@@ -949,6 +947,7 @@ class CapaMixin(CapaFields):
score_msg
=
data
[
'xqueue_body'
]
self
.
lcp
.
update_score
(
score_msg
,
queuekey
)
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
self
.
publish_grade
()
return
dict
()
# No AJAX return is needed
...
...
@@ -1132,18 +1131,17 @@ class CapaMixin(CapaFields):
"""
Publishes the student's current grade to the system as an event
"""
score
=
self
.
lcp
.
get_score
()
self
.
runtime
.
publish
(
self
,
'grade'
,
{
'value'
:
s
core
[
'score'
]
,
'max_value'
:
s
core
[
'total'
]
,
'value'
:
s
elf
.
score
.
raw_earned
,
'max_value'
:
s
elf
.
score
.
raw_possible
,
'only_if_higher'
:
only_if_higher
,
}
)
return
{
'grade'
:
s
core
[
'score'
],
'max_grade'
:
score
[
'total'
]
}
return
{
'grade'
:
s
elf
.
score
.
raw_earned
,
'max_grade'
:
self
.
score
.
raw_possible
}
# pylint: disable=too-many-statements
def
submit_problem
(
self
,
data
,
override_time
=
False
):
...
...
@@ -1216,6 +1214,7 @@ class CapaMixin(CapaFields):
self
.
attempts
=
self
.
attempts
+
1
self
.
lcp
.
done
=
True
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
self
.
set_last_submission_time
()
except
(
StudentInputError
,
ResponseError
,
LoncapaProblemError
)
as
inst
:
...
...
@@ -1227,6 +1226,7 @@ class CapaMixin(CapaFields):
# Save the user's state before failing
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
# If the user is a staff member, include
# the full exception, including traceback,
...
...
@@ -1245,6 +1245,7 @@ class CapaMixin(CapaFields):
except
Exception
as
err
:
# Save the user's state before failing
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
if
self
.
runtime
.
DEBUG
:
msg
=
u"Error checking problem: {}"
.
format
(
err
.
message
)
...
...
@@ -1457,93 +1458,6 @@ class CapaMixin(CapaFields):
return
input_metadata
def
rescore_problem
(
self
,
only_if_higher
):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
If only_if_higher is True, the answer and grade are updated
only if the resulting score is higher than before.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info
=
{
'state'
:
self
.
lcp
.
get_state
(),
'problem_id'
:
self
.
location
.
to_deprecated_string
()}
_
=
self
.
runtime
.
service
(
self
,
"i18n"
)
.
ugettext
if
not
self
.
lcp
.
supports_rescoring
():
event_info
[
'failure'
]
=
'unsupported'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
# pylint: disable=line-too-long
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise
NotImplementedError
(
_
(
"Problem's definition does not support rescoring."
))
# pylint: enable=line-too-long
if
not
self
.
done
:
event_info
[
'failure'
]
=
'unanswered'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
raise
NotFoundError
(
_
(
"Problem must be answered before it can be graded again."
))
# get old score, for comparison:
orig_score
=
self
.
lcp
.
get_score
()
event_info
[
'orig_score'
]
=
orig_score
[
'score'
]
event_info
[
'orig_total'
]
=
orig_score
[
'total'
]
try
:
correct_map
=
self
.
lcp
.
rescore_existing_answers
()
except
(
StudentInputError
,
ResponseError
,
LoncapaProblemError
)
as
inst
:
log
.
warning
(
"Input error in capa_module:problem_rescore"
,
exc_info
=
True
)
event_info
[
'failure'
]
=
'input_error'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
return
{
'success'
:
u"Error: {0}"
.
format
(
inst
.
message
)}
except
Exception
as
err
:
event_info
[
'failure'
]
=
'unexpected'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
if
self
.
runtime
.
DEBUG
:
msg
=
u"Error checking problem: {0}"
.
format
(
err
.
message
)
msg
+=
u'
\n
Traceback:
\n
'
+
traceback
.
format_exc
()
return
{
'success'
:
msg
}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self
.
set_state_from_lcp
()
self
.
publish_grade
(
only_if_higher
)
new_score
=
self
.
lcp
.
get_score
()
event_info
[
'new_score'
]
=
new_score
[
'score'
]
event_info
[
'new_total'
]
=
new_score
[
'total'
]
# success = correct if ALL questions in this problem are correct
success
=
'correct'
for
answer_id
in
correct_map
:
if
not
correct_map
.
is_correct
(
answer_id
):
success
=
'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info
[
'correct_map'
]
=
correct_map
.
get_dict
()
event_info
[
'success'
]
=
success
event_info
[
'attempts'
]
=
self
.
attempts
self
.
track_function_unmask
(
'problem_rescore'
,
event_info
)
return
{
'success'
:
success
,
'new_raw_earned'
:
new_score
[
'score'
],
'new_raw_possible'
:
new_score
[
'total'
],
}
def
save_problem
(
self
,
data
):
"""
Save the passed in answers.
...
...
@@ -1584,6 +1498,7 @@ class CapaMixin(CapaFields):
self
.
lcp
.
has_saved_answers
=
True
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
self
.
track_function_unmask
(
'save_problem_success'
,
event_info
)
msg
=
_
(
"Your answers have been saved."
)
...
...
@@ -1642,6 +1557,7 @@ class CapaMixin(CapaFields):
# Pull in the new problem seed
self
.
set_state_from_lcp
()
self
.
set_score
(
self
.
score_from_lcp
())
# Grade may have changed, so publish new value
self
.
publish_grade
()
...
...
@@ -1653,3 +1569,116 @@ class CapaMixin(CapaFields):
'success'
:
True
,
'html'
:
self
.
get_problem_html
(
encapsulate
=
False
),
}
# ScorableXBlockMixin methods
def
rescore
(
self
,
only_if_higher
=
False
):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
If only_if_higher is True, the answer and grade are updated
only if the resulting score is higher than before.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info
=
{
'state'
:
self
.
lcp
.
get_state
(),
'problem_id'
:
self
.
location
.
to_deprecated_string
()}
_
=
self
.
runtime
.
service
(
self
,
"i18n"
)
.
ugettext
if
not
self
.
lcp
.
supports_rescoring
():
event_info
[
'failure'
]
=
'unsupported'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
# pylint: disable=line-too-long
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise
NotImplementedError
(
_
(
"Problem's definition does not support rescoring."
))
# pylint: enable=line-too-long
if
not
self
.
done
:
event_info
[
'failure'
]
=
'unanswered'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
raise
NotFoundError
(
_
(
"Problem must be answered before it can be graded again."
))
# get old score, for comparison:
orig_score
=
self
.
get_score
()
event_info
[
'orig_score'
]
=
orig_score
.
raw_earned
event_info
[
'orig_total'
]
=
orig_score
.
raw_possible
try
:
calculated_score
=
self
.
calculate_score
()
except
(
StudentInputError
,
ResponseError
,
LoncapaProblemError
)
as
inst
:
log
.
warning
(
"Input error in capa_module:problem_rescore"
,
exc_info
=
True
)
event_info
[
'failure'
]
=
'input_error'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
raise
except
Exception
:
event_info
[
'failure'
]
=
'unexpected'
self
.
track_function_unmask
(
'problem_rescore_fail'
,
event_info
)
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self
.
set_state_from_lcp
()
self
.
set_score
(
calculated_score
)
self
.
publish_grade
(
only_if_higher
)
event_info
[
'new_score'
]
=
calculated_score
.
raw_earned
event_info
[
'new_total'
]
=
calculated_score
.
raw_possible
# success = correct if ALL questions in this problem are correct
success
=
'correct'
for
answer_id
in
self
.
lcp
.
correct_map
:
if
not
self
.
lcp
.
correct_map
.
is_correct
(
answer_id
):
success
=
'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info
[
'correct_map'
]
=
self
.
lcp
.
correct_map
.
get_dict
()
event_info
[
'success'
]
=
success
event_info
[
'attempts'
]
=
self
.
attempts
self
.
track_function_unmask
(
'problem_rescore'
,
event_info
)
def
has_submitted_answer
(
self
):
return
self
.
done
def
set_score
(
self
,
score
):
"""
Sets the internal score for the problem. This is not derived directly
from the internal LCP in keeping with the ScorableXBlock spec.
"""
self
.
score
=
score
def
get_score
(
self
):
"""
Returns the score currently set on the block.
"""
return
self
.
score
def
calculate_score
(
self
):
"""
Returns the score calculated from the current problem state.
Operates by creating a new correctness map based on the current
state of the LCP, and having the LCP generate a score from that.
"""
new_correctness
=
self
.
lcp
.
get_grade_from_current_answers
(
None
)
new_score
=
self
.
lcp
.
calculate_score
(
new_correctness
)
return
Score
(
raw_earned
=
new_score
[
'score'
],
raw_possible
=
new_score
[
'total'
])
def
score_from_lcp
(
self
):
"""
Returns the score associated with the correctness map
currently stored by the LCP.
"""
lcp_score
=
self
.
lcp
.
calculate_score
()
return
Score
(
raw_earned
=
lcp_score
[
'score'
],
raw_possible
=
lcp_score
[
'total'
])
common/lib/xmodule/xmodule/capa_module.py
View file @
bcc3d165
...
...
@@ -318,6 +318,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
hint_button
=
module_attr
(
'hint_button'
)
handle_problem_html_error
=
module_attr
(
'handle_problem_html_error'
)
handle_ungraded_response
=
module_attr
(
'handle_ungraded_response'
)
has_submitted_answer
=
module_attr
(
'has_submitted_answer'
)
is_attempted
=
module_attr
(
'is_attempted'
)
is_correct
=
module_attr
(
'is_correct'
)
is_past_due
=
module_attr
(
'is_past_due'
)
...
...
@@ -326,7 +327,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
make_dict_of_responses
=
module_attr
(
'make_dict_of_responses'
)
new_lcp
=
module_attr
(
'new_lcp'
)
publish_grade
=
module_attr
(
'publish_grade'
)
rescore
_problem
=
module_attr
(
'rescore_problem
'
)
rescore
=
module_attr
(
'rescore
'
)
reset_problem
=
module_attr
(
'reset_problem'
)
save_problem
=
module_attr
(
'save_problem'
)
set_state_from_lcp
=
module_attr
(
'set_state_from_lcp'
)
...
...
common/lib/xmodule/xmodule/tests/test_capa_module.py
View file @
bcc3d165
...
...
@@ -29,6 +29,7 @@ from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
from
opaque_keys.edx.locations
import
Location
from
xblock.field_data
import
DictFieldData
from
xblock.fields
import
ScopeIds
from
xblock.scorable
import
Score
from
.
import
get_test_system
from
pytz
import
UTC
...
...
@@ -130,9 +131,9 @@ class CapaFactory(object):
if
override_get_score
:
if
correct
:
# TODO: probably better to actually set the internal state properly, but...
module
.
get_score
=
lambda
:
{
'score'
:
1
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
1
,
raw_possible
=
1
)
else
:
module
.
get_score
=
lambda
:
{
'score'
:
0
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
0
,
raw_possible
=
1
)
module
.
graded
=
'False'
return
module
...
...
@@ -196,10 +197,10 @@ class CapaModuleTest(unittest.TestCase):
def
test_import
(
self
):
module
=
CapaFactory
.
create
()
self
.
assertEqual
(
module
.
get_score
()
[
'score'
]
,
0
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0
)
other_module
=
CapaFactory
.
create
()
self
.
assertEqual
(
module
.
get_score
()
[
'score'
]
,
0
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0
)
self
.
assertNotEqual
(
module
.
url_name
,
other_module
.
url_name
,
"Factory should be creating unique names for each problem"
)
...
...
@@ -208,31 +209,33 @@ class CapaModuleTest(unittest.TestCase):
Check that the factory creates correct and incorrect problems properly.
"""
module
=
CapaFactory
.
create
()
self
.
assertEqual
(
module
.
get_score
()
[
'score'
]
,
0
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0
)
other_module
=
CapaFactory
.
create
(
correct
=
True
)
self
.
assertEqual
(
other_module
.
get_score
()
[
'score'
]
,
1
)
self
.
assertEqual
(
other_module
.
get_score
()
.
raw_earned
,
1
)
def
test_get_score
(
self
):
"""
Do 1 test where the internals of get_score are properly set
@jbau Note: this obviously depends on a particular implementation of get_score, but I think this is actually
useful as unit-code coverage for this current implementation. I don't see a layer where LoncapaProblem
is tested directly
Tests the internals of get_score. In keeping with the ScorableXBlock spec,
Capa modules store their score independently of the LCP internals, so it must
be explicitly updated.
"""
student_answers
=
{
'1_2_1'
:
'abcd'
}
correct_map
=
CorrectMap
(
answer_id
=
'1_2_1'
,
correctness
=
"correct"
,
npoints
=
0.9
)
module
=
CapaFactory
.
create
(
correct
=
True
,
override_get_score
=
False
)
module
.
lcp
.
correct_map
=
correct_map
module
.
lcp
.
student_answers
=
student_answers
self
.
assertEqual
(
module
.
get_score
()[
'score'
],
0.9
)
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0.0
)
module
.
set_score
(
module
.
score_from_lcp
())
self
.
assertEqual
(
module
.
get_score
()
.
raw_earned
,
0.9
)
other_correct_map
=
CorrectMap
(
answer_id
=
'1_2_1'
,
correctness
=
"incorrect"
,
npoints
=
0.1
)
other_module
=
CapaFactory
.
create
(
correct
=
False
,
override_get_score
=
False
)
other_module
.
lcp
.
correct_map
=
other_correct_map
other_module
.
lcp
.
student_answers
=
student_answers
self
.
assertEqual
(
other_module
.
get_score
()[
'score'
],
0.1
)
self
.
assertEqual
(
other_module
.
get_score
()
.
raw_earned
,
0.0
)
other_module
.
set_score
(
other_module
.
score_from_lcp
())
self
.
assertEqual
(
other_module
.
get_score
()
.
raw_earned
,
0.1
)
def
test_showanswer_default
(
self
):
"""
...
...
@@ -1007,14 +1010,15 @@ class CapaModuleTest(unittest.TestCase):
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with
patch
(
'capa.responsetypes.LoncapaResponse.evaluate_answers'
)
as
mock_evaluate_answers
:
mock_evaluate_answers
.
return_value
=
CorrectMap
(
CapaFactory
.
answer_key
(),
'correct'
)
result
=
module
.
rescore_problem
(
only_if_higher
=
False
)
mock_evaluate_answers
.
return_value
=
CorrectMap
(
answer_id
=
CapaFactory
.
answer_key
(),
correctness
=
'correct'
,
npoints
=
1
,
)
module
.
rescore
(
only_if_higher
=
False
)
# Expect that the problem is marked correct
self
.
assertEqual
(
result
[
'success'
],
'correct'
)
# Expect that we get no HTML
self
.
assertNotIn
(
'contents'
,
result
)
self
.
assertEqual
(
module
.
is_correct
(),
True
)
# Expect that the number of attempts is not incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
...
...
@@ -1028,10 +1032,10 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with
patch
(
'capa.responsetypes.LoncapaResponse.evaluate_answers'
)
as
mock_evaluate_answers
:
mock_evaluate_answers
.
return_value
=
CorrectMap
(
CapaFactory
.
answer_key
(),
'incorrect'
)
result
=
module
.
rescore_problem
(
only_if_higher
=
False
)
module
.
rescore
(
only_if_higher
=
False
)
# Expect that the problem is marked incorrect
self
.
assertEqual
(
result
[
'success'
],
'incorrect'
)
self
.
assertEqual
(
module
.
is_correct
(),
False
)
# Expect that the number of attempts is not incremented
self
.
assertEqual
(
module
.
attempts
,
0
)
...
...
@@ -1042,7 +1046,7 @@ class CapaModuleTest(unittest.TestCase):
# Try to rescore the problem, and get exception
with
self
.
assertRaises
(
xmodule
.
exceptions
.
NotFoundError
):
module
.
rescore
_problem
(
only_if_higher
=
False
)
module
.
rescore
(
only_if_higher
=
False
)
def
test_rescore_problem_not_supported
(
self
):
module
=
CapaFactory
.
create
(
done
=
True
)
...
...
@@ -1051,7 +1055,7 @@ class CapaModuleTest(unittest.TestCase):
with
patch
(
'capa.capa_problem.LoncapaProblem.supports_rescoring'
)
as
mock_supports_rescoring
:
mock_supports_rescoring
.
return_value
=
False
with
self
.
assertRaises
(
NotImplementedError
):
module
.
rescore
_problem
(
only_if_higher
=
False
)
module
.
rescore
(
only_if_higher
=
False
)
def
_rescore_problem_error_helper
(
self
,
exception_class
):
"""Helper to allow testing all errors that rescoring might return."""
...
...
@@ -1059,13 +1063,10 @@ class CapaModuleTest(unittest.TestCase):
module
=
CapaFactory
.
create
(
attempts
=
1
,
done
=
True
)
# Simulate answering a problem that raises the exception
with
patch
(
'capa.capa_problem.LoncapaProblem.
rescore_existing
_answers'
)
as
mock_rescore
:
with
patch
(
'capa.capa_problem.LoncapaProblem.
get_grade_from_current
_answers'
)
as
mock_rescore
:
mock_rescore
.
side_effect
=
exception_class
(
u'test error
\u03a9
'
)
result
=
module
.
rescore_problem
(
only_if_higher
=
False
)
# Expect an AJAX alert message in 'success'
expected_msg
=
u'Error: test error
\u03a9
'
self
.
assertEqual
(
result
[
'success'
],
expected_msg
)
with
self
.
assertRaises
(
exception_class
):
module
.
rescore
(
only_if_higher
=
False
)
# Expect that the number of attempts is NOT incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
...
...
common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py
View file @
bcc3d165
...
...
@@ -18,6 +18,7 @@ from xmodule.capa_module import CapaModule
from
opaque_keys.edx.locations
import
Location
from
xblock.field_data
import
DictFieldData
from
xblock.fields
import
ScopeIds
from
xblock.scorable
import
Score
from
.
import
get_test_system
from
pytz
import
UTC
...
...
@@ -111,9 +112,9 @@ class CapaFactoryWithDelay(object):
if
correct
:
# Could set the internal state formally, but here we just jam in the score.
module
.
get_score
=
lambda
:
{
'score'
:
1
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
1
,
raw_possible
=
1
)
else
:
module
.
get_score
=
lambda
:
{
'score'
:
0
,
'total'
:
1
}
module
.
score
=
Score
(
raw_earned
=
0
,
raw_possible
=
1
)
return
module
...
...
lms/djangoapps/grades/signals/handlers.py
View file @
bcc3d165
...
...
@@ -2,6 +2,7 @@
Grades related signals.
"""
from
contextlib
import
contextmanager
from
crum
import
get_current_user
from
logging
import
getLogger
from
django.dispatch
import
receiver
...
...
@@ -32,6 +33,8 @@ from ..tasks import recalculate_subsection_grade_v3, RECALCULATE_GRADE_DELAY
log
=
getLogger
(
__name__
)
# define values to be used in grading events
GRADES_RESCORE_EVENT_TYPE
=
'edx.grades.problem.rescored'
PROBLEM_SUBMITTED_EVENT_TYPE
=
'edx.grades.problem.submitted'
...
...
@@ -209,7 +212,7 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum
Handles the PROBLEM_WEIGHTED_SCORE_CHANGED signal by
enqueueing a subsection update operation to occur asynchronously.
"""
_emit_
problem_submitted_
event
(
kwargs
)
_emit_event
(
kwargs
)
result
=
recalculate_subsection_grade_v3
.
apply_async
(
kwargs
=
dict
(
user_id
=
kwargs
[
'user_id'
],
...
...
@@ -241,12 +244,14 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
CourseGradeFactory
()
.
update
(
user
,
course
=
course
,
course_structure
=
course_structure
)
def
_emit_
problem_submitted_
event
(
kwargs
):
def
_emit_event
(
kwargs
):
"""
Emits a problem submitted event only if
there is no current event transaction type,
i.e. we have not reached this point in the
code via a rescore or student state deletion.
Emits a problem submitted event only if there is no current event
transaction type, i.e. we have not reached this point in the code via a
rescore or student state deletion.
If the event transaction type has already been set and the transacation is
a rescore, emits a problem rescored event.
"""
root_type
=
get_event_transaction_type
()
...
...
@@ -267,3 +272,24 @@ def _emit_problem_submitted_event(kwargs):
'weighted_possible'
:
kwargs
.
get
(
'weighted_possible'
),
}
)
if
root_type
==
'edx.grades.problem.rescored'
:
current_user
=
get_current_user
()
if
current_user
is
not
None
and
hasattr
(
current_user
,
'id'
):
instructor_id
=
unicode
(
current_user
.
id
)
else
:
instructor_id
=
None
tracker
.
emit
(
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
{
'course_id'
:
unicode
(
kwargs
[
'course_id'
]),
'user_id'
:
unicode
(
kwargs
[
'user_id'
]),
'problem_id'
:
unicode
(
kwargs
[
'usage_id'
]),
'new_weighted_earned'
:
kwargs
.
get
(
'weighted_earned'
),
'new_weighted_possible'
:
kwargs
.
get
(
'weighted_possible'
),
'only_if_higher'
:
kwargs
.
get
(
'only_if_higher'
),
'instructor_id'
:
instructor_id
,
'event_transaction_id'
:
unicode
(
get_event_transaction_id
()),
'event_transaction_type'
:
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
}
)
lms/djangoapps/grades/tests/integration/test_events.py
View file @
bcc3d165
...
...
@@ -160,10 +160,9 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
}
)
@patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.tracker'
)
@patch
(
'lms.djangoapps.grades.signals.handlers.tracker'
)
@patch
(
'lms.djangoapps.grades.models.tracker'
)
def
test_rescoring_events
(
self
,
models_tracker
,
handlers_tracker
,
instructor_task_tracker
):
def
test_rescoring_events
(
self
,
models_tracker
,
handlers_tracker
):
# submit answer
self
.
submit_question_answer
(
'p1'
,
{
'2_1'
:
'choice_choice_3'
})
models_tracker
.
reset_mock
()
...
...
@@ -187,11 +186,8 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
)
# check logging to make sure id's are tracked correctly across
# events
event_transaction_id
=
instructor_task_tracker
.
emit
.
mock_calls
[
0
][
1
][
1
][
'event_transaction_id'
]
self
.
assertEqual
(
instructor_task_tracker
.
get_tracker
()
.
context
.
call_args
[
0
],
(
'edx.grades.problem.rescored'
,
{
'course_id'
:
unicode
(
self
.
course
.
id
),
'org_id'
:
unicode
(
self
.
course
.
org
)})
)
event_transaction_id
=
handlers_tracker
.
emit
.
mock_calls
[
0
][
1
][
1
][
'event_transaction_id'
]
# make sure the id is propagated throughout the event flow
for
call
in
models_tracker
.
emit
.
mock_calls
:
self
.
assertEqual
(
event_transaction_id
,
call
[
1
][
1
][
'event_transaction_id'
])
...
...
@@ -206,7 +202,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
handlers_tracker
.
assert_not_called
()
instructor_task
_tracker
.
emit
.
assert_called_with
(
handlers
_tracker
.
emit
.
assert_called_with
(
unicode
(
RESCORE_TYPE
),
{
'course_id'
:
unicode
(
self
.
course
.
id
),
...
...
lms/djangoapps/instructor_task/api_helper.py
View file @
bcc3d165
...
...
@@ -100,6 +100,17 @@ def _get_xmodule_instance_args(request, task_id):
return
xmodule_instance_args
def
_supports_rescore
(
descriptor
):
"""
Helper method to determine whether a given item supports rescoring.
In order to accommodate both XModules and XBlocks, we have to check
the descriptor itself then fall back on its module class.
"""
return
hasattr
(
descriptor
,
'rescore'
)
or
(
hasattr
(
descriptor
,
'module_class'
)
and
hasattr
(
descriptor
.
module_class
,
'rescore'
)
)
def
_update_instructor_task
(
instructor_task
,
task_result
):
"""
Updates and possibly saves a InstructorTask entry based on a task Result.
...
...
@@ -246,10 +257,7 @@ def check_arguments_for_rescoring(usage_key):
corresponding module doesn't support rescoring calls.
"""
descriptor
=
modulestore
()
.
get_item
(
usage_key
)
# TODO: Clean this up as part of TNL-6594 when CAPA uses the ScorableXBlockMixin
if
(
not
hasattr
(
descriptor
,
'module_class'
)
or
not
hasattr
(
descriptor
.
module_class
,
'rescore_problem'
)
)
and
not
hasattr
(
descriptor
,
'rescore'
):
if
not
_supports_rescore
(
descriptor
):
msg
=
"Specified module does not support rescoring."
raise
NotImplementedError
(
msg
)
...
...
@@ -264,8 +272,7 @@ def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=inv
any of the problem in entrance exam doesn't support re-scoring calls.
"""
problems
=
get_problems_in_section
(
exam_key
)
.
values
()
if
any
(
not
hasattr
(
problem
,
'module_class'
)
or
not
hasattr
(
problem
.
module_class
,
'rescore_problem'
)
for
problem
in
problems
):
if
any
(
not
_supports_rescore
(
problem
)
for
problem
in
problems
):
msg
=
_
(
"Not all problems in entrance exam support re-scoring."
)
raise
NotImplementedError
(
msg
)
...
...
lms/djangoapps/instructor_task/tasks_helper/module_state.py
View file @
bcc3d165
...
...
@@ -10,6 +10,7 @@ from time import time
from
eventtracking
import
tracker
from
opaque_keys.edx.keys
import
UsageKey
from
xmodule.modulestore.django
import
modulestore
from
capa.responsetypes
import
StudentInputError
,
ResponseError
,
LoncapaProblemError
from
courseware.courses
import
get_course_by_id
,
get_problems_in_section
from
courseware.models
import
StudentModule
from
courseware.model_data
import
DjangoKeyValueStore
,
FieldDataCache
...
...
@@ -174,38 +175,28 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
TASK_LOG
.
warning
(
msg
)
return
UPDATE_STATUS_FAILED
# TODO: (TNL-6594) Remove this switch once rescore_problem support
# once CAPA uses ScorableXBlockMixin.
for
method
in
[
'rescore'
,
'rescore_problem'
]:
rescore_method
=
getattr
(
instance
,
method
,
None
)
if
rescore_method
is
not
None
:
break
else
:
# for-else: Neither method exists on the block.
if
not
hasattr
(
instance
,
'rescore'
):
# This should not happen, since it should be already checked in the
# caller, but check here to be sure.
msg
=
"Specified problem does not support rescoring."
raise
UpdateProblemModuleStateError
(
msg
)
# TODO: Remove the first part of this if-else with TNL-6594
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
if
hasattr
(
instance
,
"done"
):
if
not
instance
.
done
:
return
UPDATE_STATUS_SKIPPED
elif
not
instance
.
has_submitted_answer
():
if
not
instance
.
has_submitted_answer
():
return
UPDATE_STATUS_SKIPPED
# Set the tracking info before this call, because it makes downstream
# calls that create events. We retrieve and store the id here because
# the request cache will be erased during downstream calls.
event_transaction_id
=
create_new_event_transaction_id
()
create_new_event_transaction_id
()
set_event_transaction_type
(
GRADES_RESCORE_EVENT_TYPE
)
result
=
rescore_method
(
only_if_higher
=
task_input
[
'only_if_higher'
])
instance
.
save
()
if
result
is
None
or
result
.
get
(
u'success'
)
in
{
u'correct'
,
u'incorrect'
}
:
TASK_LOG
.
debu
g
(
u"
successfully processed
rescore call for course
%(course)
s, problem
%(loc)
s "
# specific events from CAPA are not propagated up the stack. Do we want this?
try
:
instance
.
rescore
(
only_if_higher
=
task_input
[
'only_if_higher'
])
except
(
LoncapaProblemError
,
StudentInputError
,
ResponseError
)
:
TASK_LOG
.
warnin
g
(
u"
error processing
rescore call for course
%(course)
s, problem
%(loc)
s "
u"and student
%(student)
s"
,
dict
(
course
=
course_id
,
...
...
@@ -213,44 +204,20 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
student
=
student
)
)
return
UPDATE_STATUS_FAILED
if
result
is
not
None
:
# Only for CAPA. This will get moved to the grade handler.
new_weighted_earned
,
new_weighted_possible
=
weighted_score
(
result
[
'new_raw_earned'
]
if
result
else
None
,
result
[
'new_raw_possible'
]
if
result
else
None
,
module_descriptor
.
weight
,
)
# TODO: remove this context manager after completion of AN-6134
context
=
course_context_from_course_id
(
course_id
)
with
tracker
.
get_tracker
()
.
context
(
GRADES_RESCORE_EVENT_TYPE
,
context
):
tracker
.
emit
(
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
{
'course_id'
:
unicode
(
course_id
),
'user_id'
:
unicode
(
student
.
id
),
'problem_id'
:
unicode
(
usage_key
),
'new_weighted_earned'
:
new_weighted_earned
,
'new_weighted_possible'
:
new_weighted_possible
,
'only_if_higher'
:
task_input
[
'only_if_higher'
],
'instructor_id'
:
unicode
(
xmodule_instance_args
[
'request_info'
][
'user_id'
]),
'event_transaction_id'
:
unicode
(
event_transaction_id
),
'event_transaction_type'
:
unicode
(
GRADES_RESCORE_EVENT_TYPE
),
}
)
return
UPDATE_STATUS_SUCCEEDED
else
:
TASK_LOG
.
warning
(
u"error processing rescore call for course
%(course)
s, problem
%(loc)
s "
u"and student
%(student)
s:
%(msg)
s"
,
instance
.
save
()
TASK_LOG
.
debug
(
u"successfully processed rescore call for course
%(course)
s, problem
%(loc)
s "
u"and student
%(student)
s"
,
dict
(
msg
=
result
.
get
(
'success'
,
result
),
course
=
course_id
,
loc
=
usage_key
,
student
=
student
)
)
return
UPDATE_STATUS_FAILED
return
UPDATE_STATUS_SUCCEEDED
@outer_atomic
...
...
lms/djangoapps/instructor_task/tests/test_integration.py
View file @
bcc3d165
...
...
@@ -18,6 +18,7 @@ from django.contrib.auth.models import User
from
django.core.urlresolvers
import
reverse
from
openedx.core.djangoapps.util.testing
import
TestConditionalContent
from
openedx.core.djangolib.testing.utils
import
get_mock_request
from
capa.tests.response_xml_factory
import
(
CodeResponseXMLFactory
,
CustomResponseXMLFactory
)
from
xmodule.modulestore.tests.factories
import
ItemFactory
...
...
@@ -275,7 +276,7 @@ class TestRescoringTask(TestIntegrationTask):
self
.
submit_student_answer
(
'u1'
,
problem_url_name
,
[
OPTION_1
,
OPTION_1
])
expected_message
=
"bad things happened"
with
patch
(
'capa.capa_problem.LoncapaProblem.
rescore_existing
_answers'
)
as
mock_rescore
:
with
patch
(
'capa.capa_problem.LoncapaProblem.
get_grade_from_current
_answers'
)
as
mock_rescore
:
mock_rescore
.
side_effect
=
ZeroDivisionError
(
expected_message
)
instructor_task
=
self
.
submit_rescore_all_student_answers
(
'instructor'
,
problem_url_name
)
self
.
_assert_task_failure
(
instructor_task
.
id
,
'rescore_problem'
,
problem_url_name
,
expected_message
)
...
...
@@ -293,7 +294,7 @@ class TestRescoringTask(TestIntegrationTask):
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message
=
u"Could not interpret '2/3
\u03a9
' as a number"
with
patch
(
'capa.capa_problem.LoncapaProblem.
rescore_existing
_answers'
)
as
mock_rescore
:
with
patch
(
'capa.capa_problem.LoncapaProblem.
get_grade_from_current
_answers'
)
as
mock_rescore
:
mock_rescore
.
side_effect
=
StudentInputError
(
expected_message
)
instructor_task
=
self
.
submit_rescore_all_student_answers
(
'instructor'
,
problem_url_name
)
...
...
lms/djangoapps/instructor_task/tests/test_tasks.py
View file @
bcc3d165
...
...
@@ -307,30 +307,21 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name
=
'rescored'
)
@ddt.data
(
(
'rescore'
,
None
),
(
'rescore_problem'
,
{
'success'
:
'correct'
,
'new_raw_earned'
:
1
,
'new_raw_possible'
:
1
})
)
@ddt.unpack
def
test_rescoring_success
(
self
,
rescore_method
,
rescore_result
):
def
test_rescoring_success
(
self
):
"""
Tests rescores a problem in a course, for all students succeeds.
"""
mock_instance
=
MagicMock
()
other_method
=
({
'rescore'
,
'rescore_problem'
}
-
{
rescore_method
})
.
pop
()
getattr
(
mock_instance
,
rescore_method
)
.
return_value
=
rescore_result
delattr
(
mock_instance
,
other_method
)
if
rescore_method
==
'rescore'
:
del
mock_instance
.
done
getattr
(
mock_instance
,
'rescore'
)
.
return_value
=
None
mock_instance
.
has_submitted_answer
.
return_value
=
True
else
:
mock_instance
.
done
=
True
del
mock_instance
.
done
# old CAPA code used to use this value so we delete it here to be sure
num_students
=
10
self
.
_create_students_with_state
(
num_students
)
task_entry
=
self
.
_create_input_entry
()
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
mock_get_module
.
return_value
=
mock_instance
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
...
...
@@ -344,56 +335,6 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name
=
'rescored'
)
def
test_rescoring_bad_result
(
self
):
"""
Tests and confirm that rescoring does not succeed if "success" key is not an expected value.
"""
input_state
=
json
.
dumps
({
'done'
:
True
})
num_students
=
10
self
.
_create_students_with_state
(
num_students
,
input_state
)
task_entry
=
self
.
_create_input_entry
()
mock_instance
=
Mock
()
mock_instance
.
rescore_problem
=
Mock
(
return_value
=
{
'success'
:
'bogus'
})
del
mock_instance
.
rescore
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
mock_get_module
.
return_value
=
mock_instance
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
self
.
assert_task_output
(
output
=
self
.
get_task_output
(
task_entry
.
id
),
total
=
num_students
,
attempted
=
num_students
,
succeeded
=
0
,
skipped
=
0
,
failed
=
num_students
,
action_name
=
'rescored'
)
def
test_rescoring_missing_result
(
self
):
"""
Tests and confirm that rescoring does not succeed if "success" key is not returned.
"""
input_state
=
json
.
dumps
({
'done'
:
True
})
num_students
=
10
self
.
_create_students_with_state
(
num_students
,
input_state
)
task_entry
=
self
.
_create_input_entry
()
mock_instance
=
Mock
()
mock_instance
.
rescore_problem
=
Mock
(
return_value
=
{
'bogus'
:
'value'
})
del
mock_instance
.
rescore
with
patch
(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
)
as
mock_get_module
:
mock_get_module
.
return_value
=
mock_instance
self
.
_run_task_with_mock_celery
(
rescore_problem
,
task_entry
.
id
,
task_entry
.
task_id
)
self
.
assert_task_output
(
output
=
self
.
get_task_output
(
task_entry
.
id
),
total
=
num_students
,
attempted
=
num_students
,
succeeded
=
0
,
skipped
=
0
,
failed
=
num_students
,
action_name
=
'rescored'
)
@attr
(
shard
=
3
)
class
TestResetAttemptsInstructorTask
(
TestInstructorTasks
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment