Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
33abe54e
Commit
33abe54e
authored
Feb 14, 2013
by
Ned Batchelder
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Work in progress to sandbox the uses of eval in LMS.
parent
0a6761c9
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
147 additions
and
56 deletions
+147
-56
common/lib/capa/capa/capa_problem.py
+33
-11
common/lib/capa/capa/responsetypes.py
+27
-39
common/lib/codejail/codejail/safe_exec.py
+5
-0
common/test/data/embedded_python/course/2013_Spring.xml
+51
-3
lms/djangoapps/courseware/tests/tests.py
+31
-3
No files found.
common/lib/capa/capa/capa_problem.py
View file @
33abe54e
...
@@ -22,7 +22,6 @@ import numpy
...
@@ -22,7 +22,6 @@ import numpy
import
os
import
os
import
random
import
random
import
re
import
re
import
scipy
import
struct
import
struct
import
sys
import
sys
...
@@ -30,6 +29,7 @@ from lxml import etree
...
@@ -30,6 +29,7 @@ from lxml import etree
from
xml.sax.saxutils
import
unescape
from
xml.sax.saxutils
import
unescape
from
copy
import
deepcopy
from
copy
import
deepcopy
<<<<<<<
HEAD
import
chem
import
chem
import
chem.miller
import
chem.miller
import
chem.chemcalc
import
chem.chemcalc
...
@@ -38,8 +38,9 @@ import verifiers
...
@@ -38,8 +38,9 @@ import verifiers
import
verifiers.draganddrop
import
verifiers.draganddrop
import
calc
import
calc
=======
>>>>>>>
Work
in
progress
to
sandbox
the
uses
of
eval
in
LMS
.
from
.correctmap
import
CorrectMap
from
.correctmap
import
CorrectMap
import
eia
import
inputtypes
import
inputtypes
import
customrender
import
customrender
from
.util
import
contextualize_text
,
convert_files_to_filenames
from
.util
import
contextualize_text
,
convert_files_to_filenames
...
@@ -48,6 +49,8 @@ import xqueue_interface
...
@@ -48,6 +49,8 @@ import xqueue_interface
# to be replaced with auto-registering
# to be replaced with auto-registering
import
responsetypes
import
responsetypes
from
codejail.safe_exec
import
safe_exec
# dict of tagname, Response Class -- this should come from auto-registering
# dict of tagname, Response Class -- this should come from auto-registering
response_tag_dict
=
dict
([(
x
.
response_tag
,
x
)
for
x
in
responsetypes
.
__all__
])
response_tag_dict
=
dict
([(
x
.
response_tag
,
x
)
for
x
in
responsetypes
.
__all__
])
...
@@ -63,6 +66,7 @@ html_transforms = {'problem': {'tag': 'div'},
...
@@ -63,6 +66,7 @@ html_transforms = {'problem': {'tag': 'div'},
"math"
:
{
'tag'
:
'span'
},
"math"
:
{
'tag'
:
'span'
},
}
}
<<<<<<<
HEAD
global_context
=
{
'random'
:
random
,
global_context
=
{
'random'
:
random
,
'numpy'
:
numpy
,
'numpy'
:
numpy
,
'math'
:
math
,
'math'
:
math
,
...
@@ -73,6 +77,20 @@ global_context = {'random': random,
...
@@ -73,6 +77,20 @@ global_context = {'random': random,
'chemtools'
:
chem
.
chemtools
,
'chemtools'
:
chem
.
chemtools
,
'miller'
:
chem
.
miller
,
'miller'
:
chem
.
miller
,
'draganddrop'
:
verifiers
.
draganddrop
}
'draganddrop'
:
verifiers
.
draganddrop
}
=======
safe_exec_assumed_imports
=
[
"random"
,
"numpy"
,
"math"
,
"scipy"
,
"calc"
,
"eia"
,
(
"chemcalc"
,
"chem.chemcalc"
),
(
"chemtools"
,
"chem.chemtools"
),
(
"miller"
,
"chem.miller"
),
(
"draganddrop"
,
"verifiers.draganddrop"
),
]
>>>>>>>
Work
in
progress
to
sandbox
the
uses
of
eval
in
LMS
.
# These should be removed from HTML output, including all subelements
# These should be removed from HTML output, including all subelements
html_problem_semantics
=
[
"codeparam"
,
"responseparam"
,
"answer"
,
"script"
,
"hintgroup"
,
"openendedparam"
,
"openendedrubric"
]
html_problem_semantics
=
[
"codeparam"
,
"responseparam"
,
"answer"
,
"script"
,
"hintgroup"
,
"openendedparam"
,
"openendedrubric"
]
...
@@ -144,7 +162,7 @@ class LoncapaProblem(object):
...
@@ -144,7 +162,7 @@ class LoncapaProblem(object):
self
.
_process_includes
()
self
.
_process_includes
()
# construct script processor context (eg for customresponse problems)
# construct script processor context (eg for customresponse problems)
self
.
context
=
self
.
_extract_context
(
self
.
tree
,
seed
=
self
.
seed
)
self
.
context
=
self
.
_extract_context
(
self
.
tree
)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response
# transformations. This also creates the dict (self.responders) of Response
...
@@ -451,7 +469,7 @@ class LoncapaProblem(object):
...
@@ -451,7 +469,7 @@ class LoncapaProblem(object):
return
path
return
path
def
_extract_context
(
self
,
tree
,
seed
=
struct
.
unpack
(
'i'
,
os
.
urandom
(
4
))[
0
]):
# private
def
_extract_context
(
self
,
tree
):
'''
'''
Extract content of <script>...</script> from the problem.xml file, and exec it in the
Extract content of <script>...</script> from the problem.xml file, and exec it in the
context of this problem. Provides ability to randomize problems, and also set
context of this problem. Provides ability to randomize problems, and also set
...
@@ -460,14 +478,18 @@ class LoncapaProblem(object):
...
@@ -460,14 +478,18 @@ class LoncapaProblem(object):
Problem XML goes to Python execution context. Runs everything in script tags.
Problem XML goes to Python execution context. Runs everything in script tags.
'''
'''
random
.
seed
(
self
.
seed
)
random
.
seed
(
self
.
seed
)
# save global context in here also
context
=
{
'global_context'
:
global_context
}
# initialize context to have stuff in global_context
context
.
update
(
global_context
)
# TODO: REMOVE THIS COMMENTED OUT CODE.
## save global context in here also
#context = {'global_context': global_context}
#
## initialize context to have stuff in global_context
#context.update(global_context)
#
# put globals there also
# put globals there also
context
[
'__builtins__'
]
=
globals
()[
'__builtins__'
]
#context['__builtins__'] = globals()['__builtins__']
context
=
{}
# pass instance of LoncapaProblem in
# pass instance of LoncapaProblem in
context
[
'the_lcp'
]
=
self
context
[
'the_lcp'
]
=
self
...
@@ -501,7 +523,7 @@ class LoncapaProblem(object):
...
@@ -501,7 +523,7 @@ class LoncapaProblem(object):
context
[
'script_code'
]
+=
code
context
[
'script_code'
]
+=
code
try
:
try
:
# use "context" for global context; thus defs in code are global within code
# use "context" for global context; thus defs in code are global within code
exec
code
in
context
,
context
safe_exec
(
code
,
context
,
future_division
=
True
,
assumed_imports
=
safe_exec_assumed_imports
)
except
Exception
as
err
:
except
Exception
as
err
:
log
.
exception
(
"Error while execing script code: "
+
code
)
log
.
exception
(
"Error while execing script code: "
+
code
)
msg
=
"Error while executing script code:
%
s"
%
str
(
err
)
.
replace
(
'<'
,
'<'
)
msg
=
"Error while executing script code:
%
s"
%
str
(
err
)
.
replace
(
'<'
,
'<'
)
...
...
common/lib/capa/capa/responsetypes.py
View file @
33abe54e
...
@@ -37,6 +37,8 @@ from lxml import etree
...
@@ -37,6 +37,8 @@ from lxml import etree
from
lxml.html.soupparser
import
fromstring
as
fromstring_bs
# uses Beautiful Soup!!! FIXME?
from
lxml.html.soupparser
import
fromstring
as
fromstring_bs
# uses Beautiful Soup!!! FIXME?
import
xqueue_interface
import
xqueue_interface
from
codejail.safe_exec
import
safe_exec
log
=
logging
.
getLogger
(
__name__
)
log
=
logging
.
getLogger
(
__name__
)
...
@@ -968,14 +970,20 @@ def sympy_check2():
...
@@ -968,14 +970,20 @@ def sympy_check2():
cfn
=
xml
.
get
(
'cfn'
)
cfn
=
xml
.
get
(
'cfn'
)
if
cfn
:
if
cfn
:
log
.
debug
(
"cfn =
%
s"
%
cfn
)
log
.
debug
(
"cfn =
%
s"
%
cfn
)
if
cfn
in
self
.
context
:
self
.
code
=
self
.
context
[
cfn
]
def
make_check_function
(
script_code
,
cfn
):
else
:
def
check_function
(
expect
,
ans
):
msg
=
"
%
s: can't find cfn
%
s in context"
%
(
code
=
(
script_code
+
"
\n
"
+
unicode
(
self
),
cfn
)
"cfn_return =
%
s(expect, ans)
\n
"
%
cfn
)
msg
+=
"
\n
See XML source line
%
s"
%
getattr
(
self
.
xml
,
'sourceline'
,
globals_dict
=
{
'<unavailable>'
)
'expect'
:
expect
,
raise
LoncapaProblemError
(
msg
)
'ans'
:
ans
,
}
safe_exec
(
code
,
globals_dict
)
return
globals_dict
[
'cfn_return'
]
return
check_function
self
.
code
=
make_check_function
(
self
.
context
[
'script_code'
],
cfn
)
if
not
self
.
code
:
if
not
self
.
code
:
if
answer
is
None
:
if
answer
is
None
:
...
@@ -1074,6 +1082,7 @@ def sympy_check2():
...
@@ -1074,6 +1082,7 @@ def sympy_check2():
# exec the check function
# exec the check function
if
isinstance
(
self
.
code
,
basestring
):
if
isinstance
(
self
.
code
,
basestring
):
try
:
try
:
raise
Exception
(
"exec 1"
)
exec
self
.
code
in
self
.
context
[
'global_context'
],
self
.
context
exec
self
.
code
in
self
.
context
[
'global_context'
],
self
.
context
correct
=
self
.
context
[
'correct'
]
correct
=
self
.
context
[
'correct'
]
messages
=
self
.
context
[
'messages'
]
messages
=
self
.
context
[
'messages'
]
...
@@ -1083,32 +1092,15 @@ def sympy_check2():
...
@@ -1083,32 +1092,15 @@ def sympy_check2():
self
.
_handle_exec_exception
(
err
)
self
.
_handle_exec_exception
(
err
)
else
:
else
:
# self.code is not a string;
assume its a function
# self.code is not a string;
it's a function we created earlier.
# this is an interface to the Tutor2 check functions
# this is an interface to the Tutor2 check functions
fn
=
self
.
code
fn
=
self
.
code
ret
=
None
ret
=
None
log
.
debug
(
" submission =
%
s"
%
submission
)
log
.
debug
(
" submission =
%
s"
%
submission
)
try
:
try
:
answer_given
=
submission
[
0
]
if
(
answer_given
=
submission
[
0
]
if
(
len
(
idset
)
==
1
)
else
submission
len
(
idset
)
==
1
)
else
submission
ret
=
fn
(
self
.
expect
,
answer_given
)
# handle variable number of arguments in check function, for backwards compatibility
# with various Tutor2 check functions
args
=
[
self
.
expect
,
answer_given
,
student_answers
,
self
.
answer_ids
[
0
]]
argspec
=
inspect
.
getargspec
(
fn
)
nargs
=
len
(
argspec
.
args
)
-
len
(
argspec
.
defaults
or
[])
kwargs
=
{}
for
argname
in
argspec
.
args
[
nargs
:]:
kwargs
[
argname
]
=
self
.
context
[
argname
]
if
argname
in
self
.
context
else
None
log
.
debug
(
'[customresponse] answer_given=
%
s'
%
answer_given
)
log
.
debug
(
'nargs=
%
d, args=
%
s, kwargs=
%
s'
%
(
nargs
,
args
,
kwargs
))
ret
=
fn
(
*
args
[:
nargs
],
**
kwargs
)
except
Exception
as
err
:
except
Exception
as
err
:
self
.
_handle_exec_exception
(
err
)
self
.
_handle_exec_exception
(
err
)
...
@@ -1265,6 +1257,7 @@ class SymbolicResponse(CustomResponse):
...
@@ -1265,6 +1257,7 @@ class SymbolicResponse(CustomResponse):
def
setup_response
(
self
):
def
setup_response
(
self
):
self
.
xml
.
set
(
'cfn'
,
'symmath_check'
)
self
.
xml
.
set
(
'cfn'
,
'symmath_check'
)
code
=
"from symmath import *"
code
=
"from symmath import *"
raise
Exception
(
"exec 2"
)
exec
code
in
self
.
context
,
self
.
context
exec
code
in
self
.
context
,
self
.
context
CustomResponse
.
setup_response
(
self
)
CustomResponse
.
setup_response
(
self
)
...
@@ -1378,6 +1371,7 @@ class CodeResponse(LoncapaResponse):
...
@@ -1378,6 +1371,7 @@ class CodeResponse(LoncapaResponse):
penv
=
{}
penv
=
{}
penv
[
'__builtins__'
]
=
globals
()[
'__builtins__'
]
penv
[
'__builtins__'
]
=
globals
()[
'__builtins__'
]
try
:
try
:
raise
Exception
(
"exec 3"
)
exec
(
code
,
penv
,
penv
)
exec
(
code
,
penv
,
penv
)
except
Exception
as
err
:
except
Exception
as
err
:
log
.
error
(
log
.
error
(
...
@@ -1925,18 +1919,12 @@ class SchematicResponse(LoncapaResponse):
...
@@ -1925,18 +1919,12 @@ class SchematicResponse(LoncapaResponse):
self
.
code
=
answer
.
text
self
.
code
=
answer
.
text
def
get_score
(
self
,
student_answers
):
def
get_score
(
self
,
student_answers
):
from
capa_problem
import
global_context
#from capa_problem import global_context
submission
=
[
json
.
loads
(
student_answers
[
submission
=
[
k
])
for
k
in
sorted
(
self
.
answer_ids
)]
json
.
loads
(
student_answers
[
k
])
for
k
in
sorted
(
self
.
answer_ids
)
]
self
.
context
.
update
({
'submission'
:
submission
})
self
.
context
.
update
({
'submission'
:
submission
})
safe_exec
(
self
.
code
,
{},
self
.
context
)
try
:
exec
self
.
code
in
global_context
,
self
.
context
except
Exception
as
err
:
_
,
_
,
traceback_obj
=
sys
.
exc_info
()
raise
ResponseError
,
ResponseError
(
err
.
message
),
traceback_obj
cmap
=
CorrectMap
()
cmap
=
CorrectMap
()
cmap
.
set_dict
(
dict
(
zip
(
sorted
(
cmap
.
set_dict
(
dict
(
zip
(
sorted
(
self
.
answer_ids
),
self
.
context
[
'correct'
])))
self
.
answer_ids
),
self
.
context
[
'correct'
])))
...
...
common/lib/codejail/codejail/safe_exec.py
View file @
33abe54e
...
@@ -19,6 +19,11 @@ def jsonable_dict(d):
...
@@ -19,6 +19,11 @@ def jsonable_dict(d):
return
jd
return
jd
def
safe_exec
(
code
,
globals_dict
,
locals_dict
=
None
,
future_division
=
False
,
assumed_imports
=
None
):
def
safe_exec
(
code
,
globals_dict
,
locals_dict
=
None
,
future_division
=
False
,
assumed_imports
=
None
):
"""Execute code safely.
Returns None. The code can modify globals in `global_dict`.
"""
if
future_division
:
if
future_division
:
code
=
"from __future__ import division
\n
"
+
code
code
=
"from __future__ import division
\n
"
+
code
...
...
common/test/data/embedded_python/course/2013_Spring.xml
View file @
33abe54e
<course>
<course>
<chapter
url_name=
"
Graded
Chapter"
>
<chapter
url_name=
"
EmbeddedPython
Chapter"
>
<vertical
url_name=
"Homework1"
>
<vertical
url_name=
"Homework1"
>
<problem
url_name=
"
H1P1
"
>
<problem
url_name=
"
schematic_problem
"
>
<schematicresponse>
<schematicresponse>
<center>
<center>
<schematic
height=
"500"
width=
"600"
parts=
"g,n,s"
analyses=
"dc,tran"
submit_analyses=
"{"tran":[["Z",0.0000004,0.0000009,0.0000014,0.0000019,0.0000024,0.0000029,0.0000034,0.000039]]}"
initial_value=
"[["w",[112,96,128,96]],["w",[256,96,240,96]],["w",[192,96,240,96]],["s",[240,96,0],{"color":"cyan","offset":"","plot offset":"0","_json_":3},["Z"]],["w",[32,224,192,224]],["w",[96,48,192,48]],["L",[256,96,3],{"label":"Z","_json_":6},["Z"]],["r",[192,48,0],{"name":"Rpullup","r":"10K","_json_":7},["1","Z"]],["w",[32,144,32,192]],["w",[32,224,32,192]],["w",[48,192,32,192]],["w",[32,96,32,144]],["w",[48,144,32,144]],["w",[32,48,32,96]],["w",[48,96,32,96]],["w",[32,48,48,48]],["g",[32,224,0],{"_json_":16},["0"]],["v",[96,192,1],{"name":"VC","value":"square(3,0,250K)","_json_":17},["C","0"]],["v",[96,144,1],{"name":"VB","value":"square(3,0,500K)","_json_":18},["B","0"]],["v",[96,96,1],{"name":"VA","value":"square(3,0,1000K)","_json_":19},["A","0"]],["v",[96,48,1],{"name":"Vpwr","value":"dc(3)","_json_":20},["1","0"]],["L",[96,96,2],{"label":"A","_json_":21},["A"]],["w",[96,96,104,96]],["L",[96,144,2],{"label":"B","_json_":23},["B"]],["w",[96,144,104,144]],["L",[96,192,2],{"label":"C","_json_":25},["C"]],["w",[96,192,104,192]],["w",[192,96,192,112]],["s",[112,96,0],{"color":"red","offset":"15","plot offset":"0","_json_":28},["A"]],["w",[104,96,112,96]],["s",[112,144,0],{"color":"green","offset":"10","plot offset":"0","_json_":30},["B"]],["w",[104,144,112,144]],["w",[128,144,112,144]],["s",[112,192,0],{"color":"blue","offset":"5","plot offset":"0","_json_":33},["C"]],["w",[104,192,112,192]],["w",[128,192,112,192]],["view",0,0,2,"5","10","10MEG",null,"100","4us"]]"
/>
<schematic
height=
"500"
width=
"600"
parts=
"g,n,s"
analyses=
"dc,tran"
submit_analyses=
"{"tran":[["Z",0.0000004,0.0000009,0.0000014,0.0000019,0.0000024,0.0000029,0.0000034,0.000039]]}"
initial_value=
"[["w",[112,96,128,96]],["w",[256,96,240,96]],["w",[192,96,240,96]],["s",[240,96,0],{"color":"cyan","offset":"","plot offset":"0","_json_":3},["Z"]],["w",[32,224,192,224]],["w",[96,48,192,48]],["L",[256,96,3],{"label":"Z","_json_":6},["Z"]],["r",[192,48,0],{"name":"Rpullup","r":"10K","_json_":7},["1","Z"]],["w",[32,144,32,192]],["w",[32,224,32,192]],["w",[48,192,32,192]],["w",[32,96,32,144]],["w",[48,144,32,144]],["w",[32,48,32,96]],["w",[48,96,32,96]],["w",[32,48,48,48]],["g",[32,224,0],{"_json_":16},["0"]],["v",[96,192,1],{"name":"VC","value":"square(3,0,250K)","_json_":17},["C","0"]],["v",[96,144,1],{"name":"VB","value":"square(3,0,500K)","_json_":18},["B","0"]],["v",[96,96,1],{"name":"VA","value":"square(3,0,1000K)","_json_":19},["A","0"]],["v",[96,48,1],{"name":"Vpwr","value":"dc(3)","_json_":20},["1","0"]],["L",[96,96,2],{"label":"A","_json_":21},["A"]],["w",[96,96,104,96]],["L",[96,144,2],{"label":"B","_json_":23},["B"]],["w",[96,144,104,144]],["L",[96,192,2],{"label":"C","_json_":25},["C"]],["w",[96,192,104,192]],["w",[192,96,192,112]],["s",[112,96,0],{"color":"red","offset":"15","plot offset":"0","_json_":28},["A"]],["w",[104,96,112,96]],["s",[112,144,0],{"color":"green","offset":"10","plot offset":"0","_json_":30},["B"]],["w",[104,144,112,144]],["w",[128,144,112,144]],["s",[112,192,0],{"color":"blue","offset":"5","plot offset":"0","_json_":33},["C"]],["w",[104,192,112,192]],["w",[128,192,112,192]],["view",0,0,2,"5","10","10MEG",null,"100","4us"]]"
/>
</center>
</center>
<answer
type=
"loncapa/python"
>
<answer
type=
"loncapa/python"
>
# for a schematic response, submission[i] is the json representation
# for a schematic response, submission[i] is the json representation
...
@@ -44,6 +47,51 @@ correct = ['correct' if okay else 'incorrect']
...
@@ -44,6 +47,51 @@ correct = ['correct' if okay else 'incorrect']
</problem>
</problem>
<problem
url_name=
"cfn_problem"
>
<text>
<script
type=
"text/python"
system_path=
"python_lib"
>
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)
</script>
<ol
class=
"enumerate"
>
<li>
<pre>
num = 0
while num
<
= 5:
print(num)
num += 1
print("Outside of loop")
print(num)
</pre>
<p>
<customresponse
cfn=
"test_csv"
expect=
"0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
>
<textline
size=
"50"
correct_answer=
"0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
/>
</customresponse>
</p>
</li>
</ol>
</text>
</problem>
</vertical>
</vertical>
</chapter>
</chapter>
</course>
</course>
lms/djangoapps/courseware/tests/tests.py
View file @
33abe54e
...
@@ -986,7 +986,7 @@ class TestSchematicResponse(TestSubmittingProblems):
...
@@ -986,7 +986,7 @@ class TestSchematicResponse(TestSubmittingProblems):
return
resp
return
resp
def
test_get_graded
(
self
):
def
test_get_graded
(
self
):
resp
=
self
.
submit_question_answer
(
'
H1P1
'
,
resp
=
self
.
submit_question_answer
(
'
schematic_problem
'
,
[[
'transient'
,
{
'Z'
:
[
[[
'transient'
,
{
'Z'
:
[
[
0.0000004
,
2.8
],
[
0.0000004
,
2.8
],
[
0.0000009
,
2.8
],
[
0.0000009
,
2.8
],
...
@@ -1001,8 +1001,8 @@ class TestSchematicResponse(TestSubmittingProblems):
...
@@ -1001,8 +1001,8 @@ class TestSchematicResponse(TestSubmittingProblems):
respdata
=
json
.
loads
(
resp
.
content
)
respdata
=
json
.
loads
(
resp
.
content
)
self
.
assertEqual
(
respdata
[
'success'
],
'correct'
)
self
.
assertEqual
(
respdata
[
'success'
],
'correct'
)
self
.
reset_question_answer
(
'
H1P1
'
)
self
.
reset_question_answer
(
'
schematic_problem
'
)
resp
=
self
.
submit_question_answer
(
'
H1P1
'
,
resp
=
self
.
submit_question_answer
(
'
schematic_problem
'
,
[[
'transient'
,
{
'Z'
:
[
[[
'transient'
,
{
'Z'
:
[
[
0.0000004
,
2.8
],
[
0.0000004
,
2.8
],
[
0.0000009
,
0.0
],
# wrong.
[
0.0000009
,
0.0
],
# wrong.
...
@@ -1016,3 +1016,31 @@ class TestSchematicResponse(TestSubmittingProblems):
...
@@ -1016,3 +1016,31 @@ class TestSchematicResponse(TestSubmittingProblems):
)
)
respdata
=
json
.
loads
(
resp
.
content
)
respdata
=
json
.
loads
(
resp
.
content
)
self
.
assertEqual
(
respdata
[
'success'
],
'incorrect'
)
self
.
assertEqual
(
respdata
[
'success'
],
'incorrect'
)
@override_settings
(
MODULESTORE
=
TEST_DATA_XML_MODULESTORE
)
class
TestCustomResponseCfnFunction
(
TestSubmittingProblems
):
"""Check that cfn functions work properly."""
course_slug
=
"embedded_python"
course_when
=
"2013_Spring"
def
submit_question_answer
(
self
,
problem_url_name
,
responses
):
"""Particular to the embedded_python/2013_Spring course."""
problem_location
=
self
.
problem_location
(
problem_url_name
)
modx_url
=
self
.
modx_url
(
problem_location
,
'problem_check'
)
resp
=
self
.
client
.
post
(
modx_url
,
{
'input_i4x-edX-embedded_python-problem-{0}_2_1'
.
format
(
problem_url_name
):
responses
,
})
return
resp
def
test_get_graded
(
self
):
resp
=
self
.
submit_question_answer
(
'cfn_problem'
,
"0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
)
respdata
=
json
.
loads
(
resp
.
content
)
self
.
assertEqual
(
respdata
[
'success'
],
'correct'
)
self
.
reset_question_answer
(
'cfn_problem'
)
resp
=
self
.
submit_question_answer
(
'cfn_problem'
,
"xyzzy!"
)
respdata
=
json
.
loads
(
resp
.
content
)
self
.
assertEqual
(
respdata
[
'success'
],
'incorrect'
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment