Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
00abaffe
Commit
00abaffe
authored
Apr 04, 2013
by
Mark L. Chang
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'origin/master' into feature/markchang/studio-analytics
parents
db1c0bf0
1b7e552d
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
139 additions
and
105 deletions
+139
-105
common/lib/capa/capa/inputtypes.py
+3
-3
common/lib/capa/capa/tests/test_inputtypes.py
+62
-1
common/lib/xmodule/xmodule/capa_module.py
+9
-15
common/lib/xmodule/xmodule/tests/test_capa_module.py
+38
-70
lms/djangoapps/courseware/management/commands/remove_input_state.py
+5
-0
lms/djangoapps/psychometrics/psychoanalyze.py
+22
-16
No files found.
common/lib/capa/capa/inputtypes.py
View file @
00abaffe
...
...
@@ -655,9 +655,9 @@ class MatlabInput(CodeInput):
# Check if problem has been queued
self
.
queuename
=
'matlab'
self
.
queue_msg
=
''
if
'queue_msg'
in
self
.
input_state
and
self
.
status
in
[
'queued'
,
'incomplete'
,
'unsubmitted'
]:
if
'queue_msg'
in
self
.
input_state
and
self
.
status
in
[
'queued'
,
'incomplete'
,
'unsubmitted'
]:
self
.
queue_msg
=
self
.
input_state
[
'queue_msg'
]
if
'queue
d'
in
self
.
input_state
and
self
.
input_state
[
'queuestate'
]
is
not
None
:
if
'queue
state'
in
self
.
input_state
and
self
.
input_state
[
'queuestate'
]
==
'queued'
:
self
.
status
=
'queued'
self
.
queue_len
=
1
self
.
msg
=
self
.
plot_submitted_msg
...
...
@@ -702,7 +702,7 @@ class MatlabInput(CodeInput):
def
_extra_context
(
self
):
''' Set up additional context variables'''
extra_context
=
{
'queue_len'
:
s
elf
.
queue_len
,
'queue_len'
:
s
tr
(
self
.
queue_len
)
,
'queue_msg'
:
self
.
queue_msg
}
return
extra_context
...
...
common/lib/capa/capa/tests/test_inputtypes.py
View file @
00abaffe
...
...
@@ -361,7 +361,6 @@ class MatlabTest(unittest.TestCase):
'feedback'
:
{
'message'
:
'3'
},
}
elt
=
etree
.
fromstring
(
self
.
xml
)
input_class
=
lookup_tag
(
'matlabinput'
)
the_input
=
self
.
input_class
(
test_system
,
elt
,
state
)
context
=
the_input
.
_get_render_context
()
...
...
@@ -381,6 +380,31 @@ class MatlabTest(unittest.TestCase):
self
.
assertEqual
(
context
,
expected
)
def
test_rendering_while_queued
(
self
):
state
=
{
'value'
:
'print "good evening"'
,
'status'
:
'incomplete'
,
'input_state'
:
{
'queuestate'
:
'queued'
},
}
elt
=
etree
.
fromstring
(
self
.
xml
)
the_input
=
self
.
input_class
(
test_system
,
elt
,
state
)
context
=
the_input
.
_get_render_context
()
expected
=
{
'id'
:
'prob_1_2'
,
'value'
:
'print "good evening"'
,
'status'
:
'queued'
,
'msg'
:
self
.
input_class
.
plot_submitted_msg
,
'mode'
:
self
.
mode
,
'rows'
:
self
.
rows
,
'cols'
:
self
.
cols
,
'queue_msg'
:
''
,
'linenumbers'
:
'true'
,
'hidden'
:
''
,
'tabsize'
:
int
(
self
.
tabsize
),
'queue_len'
:
'1'
,
}
self
.
assertEqual
(
context
,
expected
)
def
test_plot_data
(
self
):
get
=
{
'submission'
:
'x = 1234;'
}
response
=
self
.
the_input
.
handle_ajax
(
"plot"
,
get
)
...
...
@@ -391,6 +415,43 @@ class MatlabTest(unittest.TestCase):
self
.
assertTrue
(
self
.
the_input
.
input_state
[
'queuekey'
]
is
not
None
)
self
.
assertEqual
(
self
.
the_input
.
input_state
[
'queuestate'
],
'queued'
)
def
test_ungraded_response_success
(
self
):
queuekey
=
'abcd'
input_state
=
{
'queuekey'
:
queuekey
,
'queuestate'
:
'queued'
}
state
=
{
'value'
:
'print "good evening"'
,
'status'
:
'incomplete'
,
'input_state'
:
input_state
,
'feedback'
:
{
'message'
:
'3'
},
}
elt
=
etree
.
fromstring
(
self
.
xml
)
the_input
=
self
.
input_class
(
test_system
,
elt
,
state
)
inner_msg
=
'hello!'
queue_msg
=
json
.
dumps
({
'msg'
:
inner_msg
})
the_input
.
ungraded_response
(
queue_msg
,
queuekey
)
self
.
assertTrue
(
input_state
[
'queuekey'
]
is
None
)
self
.
assertTrue
(
input_state
[
'queuestate'
]
is
None
)
self
.
assertEqual
(
input_state
[
'queue_msg'
],
inner_msg
)
def
test_ungraded_response_key_mismatch
(
self
):
queuekey
=
'abcd'
input_state
=
{
'queuekey'
:
queuekey
,
'queuestate'
:
'queued'
}
state
=
{
'value'
:
'print "good evening"'
,
'status'
:
'incomplete'
,
'input_state'
:
input_state
,
'feedback'
:
{
'message'
:
'3'
},
}
elt
=
etree
.
fromstring
(
self
.
xml
)
the_input
=
self
.
input_class
(
test_system
,
elt
,
state
)
inner_msg
=
'hello!'
queue_msg
=
json
.
dumps
({
'msg'
:
inner_msg
})
the_input
.
ungraded_response
(
queue_msg
,
'abc'
)
self
.
assertEqual
(
input_state
[
'queuekey'
],
queuekey
)
self
.
assertEqual
(
input_state
[
'queuestate'
],
'queued'
)
self
.
assertFalse
(
'queue_msg'
in
input_state
)
...
...
common/lib/xmodule/xmodule/capa_module.py
View file @
00abaffe
...
...
@@ -108,11 +108,10 @@ class CapaModule(CapaFields, XModule):
'''
icon_class
=
'problem'
js
=
{
'coffee'
:
[
resource_string
(
__name__
,
'js/src/capa/display.coffee'
),
resource_string
(
__name__
,
'js/src/collapsible.coffee'
),
resource_string
(
__name__
,
'js/src/javascript_loader.coffee'
),
],
],
'js'
:
[
resource_string
(
__name__
,
'js/src/capa/imageinput.js'
),
resource_string
(
__name__
,
'js/src/capa/schematic.js'
)
]}
...
...
@@ -367,11 +366,11 @@ class CapaModule(CapaFields, XModule):
self
.
set_state_from_lcp
()
# Prepend a scary warning to the student
warning
=
'<div class="capa_reset">'
\
'<h2>Warning: The problem has been reset to its initial state!</h2>'
\
'The problem
\'
s state was corrupted by an invalid submission. '
\
'The submission consisted of:'
\
'<ul>'
warning
=
'<div class="capa_reset">'
\
'<h2>Warning: The problem has been reset to its initial state!</h2>'
\
'The problem
\'
s state was corrupted by an invalid submission. '
\
'The submission consisted of:'
\
'<ul>'
for
student_answer
in
student_answers
.
values
():
if
student_answer
!=
''
:
warning
+=
'<li>'
+
cgi
.
escape
(
student_answer
)
+
'</li>'
...
...
@@ -388,7 +387,6 @@ class CapaModule(CapaFields, XModule):
return
html
def
get_problem_html
(
self
,
encapsulate
=
True
):
'''Return html for the problem. Adds check, reset, save buttons
as necessary based on the problem config and state.'''
...
...
@@ -401,7 +399,6 @@ class CapaModule(CapaFields, XModule):
except
Exception
,
err
:
html
=
self
.
handle_problem_html_error
(
err
)
# The convention is to pass the name of the check button
# if we want to show a check button, and False otherwise
# This works because non-empty strings evaluate to True
...
...
@@ -454,7 +451,7 @@ class CapaModule(CapaFields, XModule):
'score_update'
:
self
.
update_score
,
'input_ajax'
:
self
.
handle_input_ajax
,
'ungraded_response'
:
self
.
handle_ungraded_response
}
}
if
dispatch
not
in
handlers
:
return
'Error'
...
...
@@ -472,7 +469,7 @@ class CapaModule(CapaFields, XModule):
d
.
update
({
'progress_changed'
:
after
!=
before
,
'progress_status'
:
Progress
.
to_js_status_str
(
after
),
})
})
return
json
.
dumps
(
d
,
cls
=
ComplexEncoder
)
def
is_past_due
(
self
):
...
...
@@ -535,7 +532,6 @@ class CapaModule(CapaFields, XModule):
return
False
def
update_score
(
self
,
get
):
"""
Delivers grading response (e.g. from asynchronous code checking) to
...
...
@@ -590,7 +586,6 @@ class CapaModule(CapaFields, XModule):
self
.
set_state_from_lcp
()
return
response
def
get_answer
(
self
,
get
):
'''
For the "show answer" button.
...
...
@@ -700,7 +695,6 @@ class CapaModule(CapaFields, XModule):
'max_value'
:
score
[
'total'
],
})
def
check_problem
(
self
,
get
):
''' Checks whether answers to a problem are correct, and
returns a map of correct/incorrect answers:
...
...
@@ -783,7 +777,7 @@ class CapaModule(CapaFields, XModule):
self
.
system
.
track_function
(
'save_problem_check'
,
event_info
)
if
hasattr
(
self
.
system
,
'psychometrics_handler'
):
# update PsychometricsData using callback
self
.
system
.
psychometrics_handler
(
self
.
get_
instance_state
())
self
.
system
.
psychometrics_handler
(
self
.
get_
state_for_lcp
())
# render problem into HTML
html
=
self
.
get_problem_html
(
encapsulate
=
False
)
...
...
common/lib/xmodule/xmodule/tests/test_capa_module.py
View file @
00abaffe
...
...
@@ -35,6 +35,7 @@ class CapaFactory(object):
"""
num
=
0
@staticmethod
def
next_num
():
CapaFactory
.
num
+=
1
...
...
@@ -49,7 +50,7 @@ class CapaFactory(object):
def
answer_key
():
""" Return the key stored in the capa problem answer dict """
return
(
"-"
.
join
([
'i4x'
,
'edX'
,
'capa_test'
,
'problem'
,
'SampleProblem
%
d'
%
CapaFactory
.
num
])
+
'SampleProblem
%
d'
%
CapaFactory
.
num
])
+
"_2_1"
)
@staticmethod
...
...
@@ -120,7 +121,6 @@ class CapaFactory(object):
return
module
class
CapaModuleTest
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -142,9 +142,6 @@ class CapaModuleTest(unittest.TestCase):
self
.
assertNotEqual
(
module
.
url_name
,
other_module
.
url_name
,
"Factory should be creating unique names for each problem"
)
def
test_correct
(
self
):
"""
Check that the factory creates correct and incorrect problems properly.
...
...
@@ -155,7 +152,6 @@ class CapaModuleTest(unittest.TestCase):
other_module
=
CapaFactory
.
create
(
correct
=
True
)
self
.
assertEqual
(
other_module
.
get_score
()[
'score'
],
1
)
def
test_showanswer_default
(
self
):
"""
Make sure the show answer logic does the right thing.
...
...
@@ -165,14 +161,12 @@ class CapaModuleTest(unittest.TestCase):
problem
=
CapaFactory
.
create
()
self
.
assertFalse
(
problem
.
answer_available
())
def
test_showanswer_attempted
(
self
):
problem
=
CapaFactory
.
create
(
showanswer
=
'attempted'
)
self
.
assertFalse
(
problem
.
answer_available
())
problem
.
attempts
=
1
self
.
assertTrue
(
problem
.
answer_available
())
def
test_showanswer_closed
(
self
):
# can see after attempts used up, even with due date in the future
...
...
@@ -182,21 +176,19 @@ class CapaModuleTest(unittest.TestCase):
due
=
self
.
tomorrow_str
)
self
.
assertTrue
(
used_all_attempts
.
answer_available
())
# can see after due date
after_due_date
=
CapaFactory
.
create
(
showanswer
=
'closed'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
yesterday_str
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
yesterday_str
)
self
.
assertTrue
(
after_due_date
.
answer_available
())
# can't see because attempts left
attempts_left_open
=
CapaFactory
.
create
(
showanswer
=
'closed'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
)
self
.
assertFalse
(
attempts_left_open
.
answer_available
())
# Can't see because grace period hasn't expired
...
...
@@ -207,8 +199,6 @@ class CapaModuleTest(unittest.TestCase):
graceperiod
=
self
.
two_day_delta_str
)
self
.
assertFalse
(
still_in_grace
.
answer_available
())
def
test_showanswer_past_due
(
self
):
"""
With showanswer="past_due" should only show answer after the problem is closed
...
...
@@ -222,20 +212,18 @@ class CapaModuleTest(unittest.TestCase):
due
=
self
.
tomorrow_str
)
self
.
assertFalse
(
used_all_attempts
.
answer_available
())
# can see after due date
past_due_date
=
CapaFactory
.
create
(
showanswer
=
'past_due'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
yesterday_str
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
yesterday_str
)
self
.
assertTrue
(
past_due_date
.
answer_available
())
# can't see because attempts left
attempts_left_open
=
CapaFactory
.
create
(
showanswer
=
'past_due'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
)
self
.
assertFalse
(
attempts_left_open
.
answer_available
())
# Can't see because grace period hasn't expired, even though have no more
...
...
@@ -260,31 +248,28 @@ class CapaModuleTest(unittest.TestCase):
due
=
self
.
tomorrow_str
)
self
.
assertTrue
(
used_all_attempts
.
answer_available
())
# can see after due date
past_due_date
=
CapaFactory
.
create
(
showanswer
=
'finished'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
yesterday_str
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
yesterday_str
)
self
.
assertTrue
(
past_due_date
.
answer_available
())
# can't see because attempts left and wrong
attempts_left_open
=
CapaFactory
.
create
(
showanswer
=
'finished'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
)
self
.
assertFalse
(
attempts_left_open
.
answer_available
())
# _can_ see because attempts left and right
correct_ans
=
CapaFactory
.
create
(
showanswer
=
'finished'
,
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
,
correct
=
True
)
max_attempts
=
"1"
,
attempts
=
"0"
,
due
=
self
.
tomorrow_str
,
correct
=
True
)
self
.
assertTrue
(
correct_ans
.
answer_available
())
# Can see even though grace period hasn't expired, because have no more
# attempts.
still_in_grace
=
CapaFactory
.
create
(
showanswer
=
'finished'
,
...
...
@@ -294,7 +279,6 @@ class CapaModuleTest(unittest.TestCase):
graceperiod
=
self
.
two_day_delta_str
)
self
.
assertTrue
(
still_in_grace
.
answer_available
())
def
test_closed
(
self
):
# Attempts < Max attempts --> NOT closed
...
...
@@ -322,7 +306,6 @@ class CapaModuleTest(unittest.TestCase):
due
=
self
.
yesterday_str
)
self
.
assertTrue
(
module
.
closed
())
def
test_parse_get_params
(
self
):
# We have to set up Django settings in order to use QueryDict
...
...
@@ -348,7 +331,6 @@ class CapaModuleTest(unittest.TestCase):
"Output dict should have key
%
s"
%
original_key
)
self
.
assertEqual
(
valid_get_dict
[
original_key
],
result
[
key
])
# Valid GET param dict with list keys
valid_get_dict
=
self
.
_querydict_from_dict
({
'input_2[]'
:
[
'test1'
,
'test2'
]})
result
=
CapaModule
.
make_dict_of_responses
(
valid_get_dict
)
...
...
@@ -366,12 +348,11 @@ class CapaModuleTest(unittest.TestCase):
with
self
.
assertRaises
(
ValueError
):
result
=
CapaModule
.
make_dict_of_responses
(
invalid_get_dict
)
# Two equivalent names (one list, one non-list)
# One of the values would overwrite the other, so detect this
# and raise an exception
invalid_get_dict
=
self
.
_querydict_from_dict
({
'input_1[]'
:
'test 1'
,
'input_1'
:
'test 2'
})
'input_1'
:
'test 2'
})
with
self
.
assertRaises
(
ValueError
):
result
=
CapaModule
.
make_dict_of_responses
(
invalid_get_dict
)
...
...
@@ -395,7 +376,6 @@ class CapaModuleTest(unittest.TestCase):
return
copyDict
def
test_check_problem_correct
(
self
):
module
=
CapaFactory
.
create
(
attempts
=
1
)
...
...
@@ -403,6 +383,7 @@ class CapaModuleTest(unittest.TestCase):
# Simulate that all answers are marked correct, no matter
# what the input is, by patching CorrectMap.is_correct()
# Also simulate rendering the HTML
# TODO: pep8 thinks the following line has invalid syntax
with
patch
(
'capa.correctmap.CorrectMap.is_correct'
)
as
mock_is_correct
,
\
patch
(
'xmodule.capa_module.CapaModule.get_problem_html'
)
as
mock_html
:
mock_is_correct
.
return_value
=
True
...
...
@@ -439,7 +420,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is incremented by 1
self
.
assertEqual
(
module
.
attempts
,
1
)
def
test_check_problem_closed
(
self
):
module
=
CapaFactory
.
create
(
attempts
=
3
)
...
...
@@ -503,12 +483,11 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is NOT incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
def
test_check_problem_error
(
self
):
# Try each exception that capa_module should handle
for
exception_class
in
[
StudentInputError
,
LoncapaProblemError
,
for
exception_class
in
[
StudentInputError
,
LoncapaProblemError
,
ResponseError
]:
# Create the module
...
...
@@ -532,9 +511,9 @@ class CapaModuleTest(unittest.TestCase):
self
.
assertEqual
(
module
.
attempts
,
1
)
def
test_check_problem_error_with_staff_user
(
self
):
# Try each exception that capa module should handle
for
exception_class
in
[
StudentInputError
,
for
exception_class
in
[
StudentInputError
,
LoncapaProblemError
,
ResponseError
]:
...
...
@@ -560,7 +539,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is NOT incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
def
test_reset_problem
(
self
):
module
=
CapaFactory
.
create
(
done
=
True
)
module
.
new_lcp
=
Mock
(
wraps
=
module
.
new_lcp
)
...
...
@@ -583,7 +561,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was reset
module
.
new_lcp
.
assert_called_once_with
({
'seed'
:
None
})
def
test_reset_problem_closed
(
self
):
module
=
CapaFactory
.
create
()
...
...
@@ -598,7 +575,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset
self
.
assertTrue
(
'success'
in
result
and
not
result
[
'success'
])
def
test_reset_problem_not_done
(
self
):
# Simulate that the problem is NOT done
module
=
CapaFactory
.
create
(
done
=
False
)
...
...
@@ -610,7 +586,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset
self
.
assertTrue
(
'success'
in
result
and
not
result
[
'success'
])
def
test_save_problem
(
self
):
module
=
CapaFactory
.
create
(
done
=
False
)
...
...
@@ -625,7 +600,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the result is success
self
.
assertTrue
(
'success'
in
result
and
result
[
'success'
])
def
test_save_problem_closed
(
self
):
module
=
CapaFactory
.
create
(
done
=
False
)
...
...
@@ -640,7 +614,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the result is failure
self
.
assertTrue
(
'success'
in
result
and
not
result
[
'success'
])
def
test_save_problem_submitted_with_randomize
(
self
):
module
=
CapaFactory
.
create
(
rerandomize
=
'always'
,
done
=
True
)
...
...
@@ -651,7 +624,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that we cannot save
self
.
assertTrue
(
'success'
in
result
and
not
result
[
'success'
])
def
test_save_problem_submitted_no_randomize
(
self
):
module
=
CapaFactory
.
create
(
rerandomize
=
'never'
,
done
=
True
)
...
...
@@ -724,7 +696,6 @@ class CapaModuleTest(unittest.TestCase):
module
=
CapaFactory
.
create
(
rerandomize
=
"never"
,
done
=
True
)
self
.
assertTrue
(
module
.
should_show_check_button
())
def
test_should_show_reset_button
(
self
):
attempts
=
random
.
randint
(
1
,
10
)
...
...
@@ -755,7 +726,6 @@ class CapaModuleTest(unittest.TestCase):
module
=
CapaFactory
.
create
(
max_attempts
=
0
,
done
=
True
)
self
.
assertTrue
(
module
.
should_show_reset_button
())
def
test_should_show_save_button
(
self
):
attempts
=
random
.
randint
(
1
,
10
)
...
...
@@ -823,7 +793,6 @@ class CapaModuleTest(unittest.TestCase):
html
=
module
.
get_problem_html
()
# assert that we got here without exploding
def
test_get_problem_html
(
self
):
module
=
CapaFactory
.
create
()
...
...
@@ -869,7 +838,6 @@ class CapaModuleTest(unittest.TestCase):
# Assert that the encapsulated html contains the original html
self
.
assertTrue
(
html
in
html_encapsulated
)
def
test_get_problem_html_error
(
self
):
"""
In production, when an error occurs with the problem HTML
...
...
@@ -902,7 +870,6 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the module has created a new dummy problem with the error
self
.
assertNotEqual
(
original_problem
,
module
.
lcp
)
def
test_random_seed_no_change
(
self
):
# Run the test for each possible rerandomize value
...
...
@@ -920,10 +887,10 @@ class CapaModuleTest(unittest.TestCase):
self
.
assertEqual
(
seed
,
1
)
# Check the problem
get_request_dict
=
{
CapaFactory
.
input_key
():
'3.14'
}
get_request_dict
=
{
CapaFactory
.
input_key
():
'3.14'
}
module
.
check_problem
(
get_request_dict
)
# Expect that the seed is the same
# Expect that the seed is the same
self
.
assertEqual
(
seed
,
module
.
seed
)
# Save the problem
...
...
@@ -933,7 +900,7 @@ class CapaModuleTest(unittest.TestCase):
self
.
assertEqual
(
seed
,
module
.
seed
)
def
test_random_seed_with_reset
(
self
):
def
_reset_and_get_seed
(
module
):
'''
Reset the XModule and return the module's seed
...
...
@@ -956,7 +923,7 @@ class CapaModuleTest(unittest.TestCase):
Returns True if *test_func* was successful
(returned True) within *num_tries* attempts
*test_func* must be a function
*test_func* must be a function
of the form test_func() -> bool
'''
success
=
False
...
...
@@ -989,9 +956,10 @@ class CapaModuleTest(unittest.TestCase):
# Since there's a small chance we might get the
# same seed again, give it 5 chances
# to generate a different seed
success
=
_retry_and_check
(
5
,
lambda
:
_reset_and_get_seed
(
module
)
!=
seed
)
success
=
_retry_and_check
(
5
,
lambda
:
_reset_and_get_seed
(
module
)
!=
seed
)
# TODO: change this comparison to module.seed is not None?
self
.
assertTrue
(
module
.
seed
!=
None
)
msg
=
'Could not get a new seed from reset after 5 tries'
self
.
assertTrue
(
success
,
msg
)
lms/djangoapps/courseware/management/commands/remove_input_state.py
View file @
00abaffe
...
...
@@ -76,6 +76,11 @@ class Command(BaseCommand):
for
hist_module
in
hist_modules
:
self
.
remove_studentmodulehistory_input_state
(
hist_module
,
save_changes
)
if
self
.
num_visited
%
1000
==
0
:
LOG
.
info
(
" Progress: updated {0} of {1} student modules"
.
format
(
self
.
num_changed
,
self
.
num_visited
))
LOG
.
info
(
" Progress: updated {0} of {1} student history modules"
.
format
(
self
.
num_hist_changed
,
self
.
num_hist_visited
))
@transaction.autocommit
def
remove_studentmodule_input_state
(
self
,
module
,
save_changes
):
''' Fix the grade assigned to a StudentModule'''
...
...
lms/djangoapps/psychometrics/psychoanalyze.py
View file @
00abaffe
...
...
@@ -15,7 +15,6 @@ from scipy.optimize import curve_fit
from
django.conf
import
settings
from
django.db.models
import
Sum
,
Max
from
psychometrics.models
import
*
from
xmodule.modulestore
import
Location
log
=
logging
.
getLogger
(
"mitx.psychometrics"
)
...
...
@@ -246,13 +245,16 @@ def generate_plots_for_problem(problem):
yset
[
'ydat'
]
=
ydat
if
len
(
ydat
)
>
3
:
# try to fit to logistic function if enough data points
cfp
=
curve_fit
(
func_2pl
,
xdat
,
ydat
,
[
1.0
,
max_attempts
/
2.0
])
yset
[
'fitparam'
]
=
cfp
yset
[
'fitpts'
]
=
func_2pl
(
np
.
array
(
xdat
),
*
cfp
[
0
])
yset
[
'fiterr'
]
=
[
yd
-
yf
for
(
yd
,
yf
)
in
zip
(
ydat
,
yset
[
'fitpts'
])]
fitx
=
np
.
linspace
(
xdat
[
0
],
xdat
[
-
1
],
100
)
yset
[
'fitx'
]
=
fitx
yset
[
'fity'
]
=
func_2pl
(
np
.
array
(
fitx
),
*
cfp
[
0
])
try
:
cfp
=
curve_fit
(
func_2pl
,
xdat
,
ydat
,
[
1.0
,
max_attempts
/
2.0
])
yset
[
'fitparam'
]
=
cfp
yset
[
'fitpts'
]
=
func_2pl
(
np
.
array
(
xdat
),
*
cfp
[
0
])
yset
[
'fiterr'
]
=
[
yd
-
yf
for
(
yd
,
yf
)
in
zip
(
ydat
,
yset
[
'fitpts'
])]
fitx
=
np
.
linspace
(
xdat
[
0
],
xdat
[
-
1
],
100
)
yset
[
'fitx'
]
=
fitx
yset
[
'fity'
]
=
func_2pl
(
np
.
array
(
fitx
),
*
cfp
[
0
])
except
Exception
as
err
:
log
.
debug
(
'Error in psychoanalyze curve fitting:
%
s'
%
err
)
dataset
[
'grade_
%
d'
%
grade
]
=
yset
...
...
@@ -289,7 +291,7 @@ def generate_plots_for_problem(problem):
'info'
:
''
,
'data'
:
jsdata
,
'cmd'
:
'[
%
s],
%
s'
%
(
','
.
join
(
jsplots
),
axisopts
),
})
})
#log.debug('plots = %s' % plots)
return
msg
,
plots
...
...
@@ -302,12 +304,12 @@ def make_psychometrics_data_update_handler(course_id, user, module_state_key):
Construct and return a procedure which may be called to update
the PsychometricsData instance for the given StudentModule instance.
"""
sm
=
studentm
odule
.
objects
.
get_or_create
(
course_id
=
course_id
,
student
=
user
,
module_state_key
=
module_state_key
,
defaults
=
{
'state'
:
'{}'
,
'module_type'
:
'problem'
},
)
sm
,
status
=
StudentM
odule
.
objects
.
get_or_create
(
course_id
=
course_id
,
student
=
user
,
module_state_key
=
module_state_key
,
defaults
=
{
'state'
:
'{}'
,
'module_type'
:
'problem'
},
)
try
:
pmd
=
PsychometricData
.
objects
.
using
(
db
)
.
get
(
studentmodule
=
sm
)
...
...
@@ -329,7 +331,11 @@ def make_psychometrics_data_update_handler(course_id, user, module_state_key):
return
pmd
.
done
=
done
pmd
.
attempts
=
state
[
'attempts'
]
try
:
pmd
.
attempts
=
state
.
get
(
'attempts'
,
0
)
except
:
log
.
exception
(
"no attempts for
%
s (state=
%
s)"
%
(
sm
,
sm
.
state
))
try
:
checktimes
=
eval
(
pmd
.
checktimes
)
# update log of attempt timestamps
except
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment