Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
08c5ab3c
Commit
08c5ab3c
authored
Mar 22, 2013
by
Jay Zoldak
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
pep8 fixes
parent
45029e70
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
32 additions
and
36 deletions
+32
-36
common/lib/capa/capa/tests/test_responsetypes.py
+32
-36
No files found.
common/lib/capa/capa/tests/test_responsetypes.py
View file @
08c5ab3c
...
...
@@ -17,6 +17,7 @@ from capa.correctmap import CorrectMap
from
capa.util
import
convert_files_to_filenames
from
capa.xqueue_interface
import
dateformat
class
ResponseTest
(
unittest
.
TestCase
):
""" Base class for tests of capa responses."""
...
...
@@ -46,6 +47,7 @@ class ResponseTest(unittest.TestCase):
self
.
assertEqual
(
result
,
'incorrect'
,
msg
=
"
%
s should be marked incorrect"
%
str
(
input_str
))
class
MultiChoiceResponseTest
(
ResponseTest
):
from
response_xml_factory
import
MultipleChoiceResponseXMLFactory
xml_factory_class
=
MultipleChoiceResponseXMLFactory
...
...
@@ -91,7 +93,7 @@ class TrueFalseResponseTest(ResponseTest):
def
test_named_true_false_grade
(
self
):
problem
=
self
.
build_problem
(
choices
=
[
False
,
True
,
True
],
choice_names
=
[
'foil_1'
,
'foil_2'
,
'foil_3'
])
choice_names
=
[
'foil_1'
,
'foil_2'
,
'foil_3'
])
# Check the results
# Mark correct if and only if ALL (and only) correct chocies selected
...
...
@@ -107,6 +109,7 @@ class TrueFalseResponseTest(ResponseTest):
self
.
assert_grade
(
problem
,
'choice_foil_4'
,
'incorrect'
)
self
.
assert_grade
(
problem
,
'not_a_choice'
,
'incorrect'
)
class
ImageResponseTest
(
ResponseTest
):
from
response_xml_factory
import
ImageResponseXMLFactory
xml_factory_class
=
ImageResponseXMLFactory
...
...
@@ -145,7 +148,7 @@ class ImageResponseTest(ResponseTest):
def
test_multiple_regions_grade
(
self
):
# Define multiple regions that the user can select
region_str
=
"[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"
region_str
=
"[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"
# Expect that only points inside the regions are marked correct
problem
=
self
.
build_problem
(
regions
=
region_str
)
...
...
@@ -155,7 +158,7 @@ class ImageResponseTest(ResponseTest):
def
test_region_and_rectangle_grade
(
self
):
rectangle_str
=
"(100,100)-(200,200)"
region_str
=
"[[10,10], [20,10], [20, 30]]"
region_str
=
"[[10,10], [20,10], [20, 30]]"
# Expect that only points inside the rectangle or region are marked correct
problem
=
self
.
build_problem
(
regions
=
region_str
,
rectangle
=
rectangle_str
)
...
...
@@ -171,7 +174,7 @@ class SymbolicResponseTest(unittest.TestCase):
test_lcp
=
lcp
.
LoncapaProblem
(
open
(
symbolicresponse_file
)
.
read
(),
'1'
,
system
=
test_system
)
correct_answers
=
{
'1_2_1'
:
'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]'
,
'1_2_1_dynamath'
:
'''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>cos</mi>
...
...
@@ -239,8 +242,8 @@ class SymbolicResponseTest(unittest.TestCase):
<mo>]</mo>
</mrow>
</mstyle>
</math>
'''
,
</math>
'''
,
}
wrong_answers
=
{
'1_2_1'
:
'2'
,
'1_2_1_dynamath'
:
'''
...
...
@@ -248,7 +251,7 @@ class SymbolicResponseTest(unittest.TestCase):
<mstyle displaystyle="true">
<mn>2</mn>
</mstyle>
</math>'''
,
</math>'''
,
}
self
.
assertEquals
(
test_lcp
.
grade_answers
(
correct_answers
)
.
get_correctness
(
'1_2_1'
),
'correct'
)
self
.
assertEquals
(
test_lcp
.
grade_answers
(
wrong_answers
)
.
get_correctness
(
'1_2_1'
),
'incorrect'
)
...
...
@@ -297,14 +300,13 @@ class FormulaResponseTest(ResponseTest):
def
test_hint
(
self
):
# Sample variables x and y in the range [-10, 10]
sample_dict
=
{
'x'
:
(
-
10
,
10
),
'y'
:
(
-
10
,
10
)
}
sample_dict
=
{
'x'
:
(
-
10
,
10
),
'y'
:
(
-
10
,
10
)
}
# Give a hint if the user leaves off the coefficient
# or leaves out x
hints
=
[(
'x + 3*y'
,
'y_coefficient'
,
'Check the coefficient of y'
),
(
'2*y'
,
'missing_x'
,
'Try including the variable x'
)]
# The expected solution is numerically equivalent to x+2y
problem
=
self
.
build_problem
(
sample_dict
=
sample_dict
,
num_samples
=
10
,
...
...
@@ -324,7 +326,6 @@ class FormulaResponseTest(ResponseTest):
self
.
assertEquals
(
correct_map
.
get_hint
(
'1_2_1'
),
'Try including the variable x'
)
def
test_script
(
self
):
# Calculate the answer using a script
script
=
"calculated_ans = 'x+x'"
...
...
@@ -348,7 +349,6 @@ class StringResponseTest(ResponseTest):
from
response_xml_factory
import
StringResponseXMLFactory
xml_factory_class
=
StringResponseXMLFactory
def
test_case_sensitive
(
self
):
problem
=
self
.
build_problem
(
answer
=
"Second"
,
case_sensitive
=
True
)
...
...
@@ -400,6 +400,7 @@ class StringResponseTest(ResponseTest):
correct_map
=
problem
.
grade_answers
(
input_dict
)
self
.
assertEquals
(
correct_map
.
get_hint
(
'1_2_1'
),
""
)
class
CodeResponseTest
(
ResponseTest
):
from
response_xml_factory
import
CodeResponseXMLFactory
xml_factory_class
=
CodeResponseXMLFactory
...
...
@@ -442,7 +443,6 @@ class CodeResponseTest(ResponseTest):
self
.
assertEquals
(
self
.
problem
.
is_queued
(),
True
)
def
test_update_score
(
self
):
'''
Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
...
...
@@ -495,7 +495,6 @@ class CodeResponseTest(ResponseTest):
else
:
self
.
assertTrue
(
self
.
problem
.
correct_map
.
is_queued
(
test_id
))
# Should be queued, message undelivered
def
test_recentmost_queuetime
(
self
):
'''
Test whether the LoncapaProblem knows about the time of queue requests
...
...
@@ -538,6 +537,7 @@ class CodeResponseTest(ResponseTest):
self
.
assertEquals
(
answers_converted
[
'1_3_1'
],
[
'answer1'
,
'answer2'
,
'answer3'
])
self
.
assertEquals
(
answers_converted
[
'1_4_1'
],
[
fp
.
name
,
fp
.
name
])
class
ChoiceResponseTest
(
ResponseTest
):
from
response_xml_factory
import
ChoiceResponseXMLFactory
xml_factory_class
=
ChoiceResponseXMLFactory
...
...
@@ -554,7 +554,6 @@ class ChoiceResponseTest(ResponseTest):
# No choice 3 exists --> mark incorrect
self
.
assert_grade
(
problem
,
'choice_3'
,
'incorrect'
)
def
test_checkbox_group_grade
(
self
):
problem
=
self
.
build_problem
(
choice_type
=
'checkbox'
,
choices
=
[
False
,
True
,
True
])
...
...
@@ -587,8 +586,9 @@ class JavascriptResponseTest(ResponseTest):
param_dict
=
{
'value'
:
'4'
})
# Test that we get graded correctly
self
.
assert_grade
(
problem
,
json
.
dumps
({
0
:
4
}),
"correct"
)
self
.
assert_grade
(
problem
,
json
.
dumps
({
0
:
5
}),
"incorrect"
)
self
.
assert_grade
(
problem
,
json
.
dumps
({
0
:
4
}),
"correct"
)
self
.
assert_grade
(
problem
,
json
.
dumps
({
0
:
5
}),
"incorrect"
)
class
NumericalResponseTest
(
ResponseTest
):
from
response_xml_factory
import
NumericalResponseXMLFactory
...
...
@@ -602,7 +602,6 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses
=
[
""
,
"3.9"
,
"4.1"
,
"0"
]
self
.
assert_multiple_grade
(
problem
,
correct_responses
,
incorrect_responses
)
def
test_grade_decimal_tolerance
(
self
):
problem
=
self
.
build_problem
(
question_text
=
"What is 2 + 2 approximately?"
,
explanation
=
"The answer is 4"
,
...
...
@@ -651,7 +650,6 @@ class NumericalResponseTest(ResponseTest):
self
.
assert_multiple_grade
(
problem
,
correct_responses
,
incorrect_responses
)
class
CustomResponseTest
(
ResponseTest
):
from
response_xml_factory
import
CustomResponseXMLFactory
xml_factory_class
=
CustomResponseXMLFactory
...
...
@@ -692,7 +690,6 @@ class CustomResponseTest(ResponseTest):
overall_msg
=
correctmap
.
get_overall_message
()
self
.
assertEqual
(
overall_msg
,
"Overall message"
)
def
test_function_code_single_input
(
self
):
# For function code, we pass in these arguments:
...
...
@@ -768,7 +765,6 @@ class CustomResponseTest(ResponseTest):
correctness
=
correct_map
.
get_correctness
(
'1_2_2'
)
self
.
assertEqual
(
correctness
,
'incorrect'
)
def
test_function_code_multiple_inputs
(
self
):
# If the <customresponse> has multiple inputs associated with it,
...
...
@@ -797,7 +793,7 @@ class CustomResponseTest(ResponseTest):
cfn
=
"check_func"
,
num_inputs
=
3
)
# Grade the inputs (one input incorrect)
input_dict
=
{
'1_2_1'
:
'-999'
,
'1_2_2'
:
'2'
,
'1_2_3'
:
'3'
}
input_dict
=
{
'1_2_1'
:
'-999'
,
'1_2_2'
:
'2'
,
'1_2_3'
:
'3'
}
correct_map
=
problem
.
grade_answers
(
input_dict
)
# Expect that we receive the overall message (for the whole response)
...
...
@@ -813,7 +809,6 @@ class CustomResponseTest(ResponseTest):
self
.
assertEqual
(
correct_map
.
get_msg
(
'1_2_2'
),
'Feedback 2'
)
self
.
assertEqual
(
correct_map
.
get_msg
(
'1_2_3'
),
'Feedback 3'
)
def
test_multiple_inputs_return_one_status
(
self
):
# When given multiple inputs, the 'answer_given' argument
# to the check_func() is a list of inputs
...
...
@@ -838,7 +833,7 @@ class CustomResponseTest(ResponseTest):
cfn
=
"check_func"
,
num_inputs
=
3
)
# Grade the inputs (one input incorrect)
input_dict
=
{
'1_2_1'
:
'-999'
,
'1_2_2'
:
'2'
,
'1_2_3'
:
'3'
}
input_dict
=
{
'1_2_1'
:
'-999'
,
'1_2_2'
:
'2'
,
'1_2_3'
:
'3'
}
correct_map
=
problem
.
grade_answers
(
input_dict
)
# Everything marked incorrect
...
...
@@ -847,7 +842,7 @@ class CustomResponseTest(ResponseTest):
self
.
assertEqual
(
correct_map
.
get_correctness
(
'1_2_3'
),
'incorrect'
)
# Grade the inputs (everything correct)
input_dict
=
{
'1_2_1'
:
'1'
,
'1_2_2'
:
'2'
,
'1_2_3'
:
'3'
}
input_dict
=
{
'1_2_1'
:
'1'
,
'1_2_2'
:
'2'
,
'1_2_3'
:
'3'
}
correct_map
=
problem
.
grade_answers
(
input_dict
)
# Everything marked incorrect
...
...
@@ -902,13 +897,13 @@ class SchematicResponseTest(ResponseTest):
# To test that the context is set up correctly,
# we create a script that sets *correct* to true
# if and only if we find the *submission* (list)
script
=
"correct = ['correct' if 'test' in submission[0] else 'incorrect']"
script
=
"correct = ['correct' if 'test' in submission[0] else 'incorrect']"
problem
=
self
.
build_problem
(
answer
=
script
)
# The actual dictionary would contain schematic information
# sent from the JavaScript simulation
submission_dict
=
{
'test'
:
'test'
}
input_dict
=
{
'1_2_1'
:
json
.
dumps
(
submission_dict
)
}
input_dict
=
{
'1_2_1'
:
json
.
dumps
(
submission_dict
)
}
correct_map
=
problem
.
grade_answers
(
input_dict
)
# Expect that the problem is graded as true
...
...
@@ -916,6 +911,7 @@ class SchematicResponseTest(ResponseTest):
# is what we expect)
self
.
assertEqual
(
correct_map
.
get_correctness
(
'1_2_1'
),
'correct'
)
class
AnnotationResponseTest
(
ResponseTest
):
from
response_xml_factory
import
AnnotationResponseXMLFactory
xml_factory_class
=
AnnotationResponseXMLFactory
...
...
@@ -924,18 +920,18 @@ class AnnotationResponseTest(ResponseTest):
(
correct
,
partially
,
incorrect
)
=
(
'correct'
,
'partially-correct'
,
'incorrect'
)
answer_id
=
'1_2_1'
options
=
((
'x'
,
correct
),
(
'y'
,
partially
),
(
'z'
,
incorrect
))
make_answer
=
lambda
option_ids
:
{
answer_id
:
json
.
dumps
({
'options'
:
option_ids
})}
options
=
((
'x'
,
correct
),
(
'y'
,
partially
),
(
'z'
,
incorrect
))
make_answer
=
lambda
option_ids
:
{
answer_id
:
json
.
dumps
({
'options'
:
option_ids
})}
tests
=
[
{
'correctness'
:
correct
,
'points'
:
2
,
'answers'
:
make_answer
([
0
])
},
{
'correctness'
:
partially
,
'points'
:
1
,
'answers'
:
make_answer
([
1
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([
2
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([
0
,
1
,
2
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
(
''
)
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
(
None
)
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
{
answer_id
:
'null'
}
},
{
'correctness'
:
correct
,
'points'
:
2
,
'answers'
:
make_answer
([
0
])
},
{
'correctness'
:
partially
,
'points'
:
1
,
'answers'
:
make_answer
([
1
])},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([
2
])},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([
0
,
1
,
2
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([])},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
(
''
)},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
(
None
)},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
{
answer_id
:
'null'
}
},
]
for
(
index
,
test
)
in
enumerate
(
tests
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment