Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
bc6a085f
Commit
bc6a085f
authored
Feb 28, 2013
by
Arthur Barrett
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
converted annotationinput tests to the new responsetype testing format.
parent
1da644fc
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
74 additions
and
17 deletions
+74
-17
common/lib/capa/capa/tests/response_xml_factory.py
+36
-0
common/lib/capa/capa/tests/test_files/annotationresponse.xml
+0
-17
common/lib/capa/capa/tests/test_responsetypes.py
+38
-0
No files found.
common/lib/capa/capa/tests/response_xml_factory.py
View file @
bc6a085f
...
@@ -666,3 +666,39 @@ class StringResponseXMLFactory(ResponseXMLFactory):
...
@@ -666,3 +666,39 @@ class StringResponseXMLFactory(ResponseXMLFactory):
def
create_input_element
(
self
,
**
kwargs
):
def
create_input_element
(
self
,
**
kwargs
):
return
ResponseXMLFactory
.
textline_input_xml
(
**
kwargs
)
return
ResponseXMLFactory
.
textline_input_xml
(
**
kwargs
)
class
AnnotationResponseXMLFactory
(
ResponseXMLFactory
):
""" Factory for creating <annotationresponse> XML trees """
def
create_response_element
(
self
,
**
kwargs
):
""" Create a <annotationresponse> element """
return
etree
.
Element
(
"annotationresponse"
)
def
create_input_element
(
self
,
**
kwargs
):
""" Create a <annotationinput> element."""
title
=
kwargs
.
get
(
'title'
,
'super cool annotation'
)
text
=
kwargs
.
get
(
'text'
,
'texty text'
)
comment
=
kwargs
.
get
(
'comment'
,
'blah blah erudite comment blah blah'
)
comment_prompt
=
kwargs
.
get
(
'comment_prompt'
,
'type a commentary below'
)
tag_prompt
=
kwargs
.
get
(
'tag_prompt'
,
'select one tag'
)
options
=
kwargs
.
get
(
'options'
,
[
(
'green'
,
'correct'
),
(
'eggs'
,
'incorrect'
),
(
'ham'
,
'partially-correct'
)
])
# Create the <annotationinput> element
input_element
=
etree
.
Element
(
"annotationinput"
)
etree
.
SubElement
(
input_element
,
'title'
)
etree
.
SubElement
(
input_element
,
'text'
)
etree
.
SubElement
(
input_element
,
'comment'
)
etree
.
SubElement
(
input_element
,
'comment_prompt'
)
etree
.
SubElement
(
input_element
,
'tag_prompt'
)
options_element
=
etree
.
SubElement
(
input_element
,
'options'
)
for
(
description
,
correctness
)
in
options
:
option_element
=
etree
.
SubElement
(
options_element
,
'option'
,
{
'choice'
:
correctness
})
option_element
.
text
=
description
return
input_element
common/lib/capa/capa/tests/test_files/annotationresponse.xml
deleted
100644 → 0
View file @
1da644fc
<problem
display_name=
"Exercise 1"
>
<annotationresponse>
<annotationinput>
<title>
the title
</title>
<text>
the text
</text>
<comment>
the comment
</comment>
<comment_prompt>
Type a commentary below:
</comment_prompt>
<tag_prompt>
Select one or more tags:
</tag_prompt>
<options>
<option
choice=
"correct"
>
green
</option>
<option
choice=
"incorrect"
>
eggs
</option>
<option
choice=
"partially-correct"
>
ham
</option>
</options>
</annotationinput>
</annotationresponse>
<solution>
Instructor text here...
</solution>
</problem>
common/lib/capa/capa/tests/test_responsetypes.py
View file @
bc6a085f
...
@@ -772,3 +772,40 @@ class SchematicResponseTest(ResponseTest):
...
@@ -772,3 +772,40 @@ class SchematicResponseTest(ResponseTest):
# (That is, our script verifies that the context
# (That is, our script verifies that the context
# is what we expect)
# is what we expect)
self
.
assertEqual
(
correct_map
.
get_correctness
(
'1_2_1'
),
'correct'
)
self
.
assertEqual
(
correct_map
.
get_correctness
(
'1_2_1'
),
'correct'
)
class
AnnotationResponseTest
(
ResponseTest
):
from
response_xml_factory
import
AnnotationResponseXMLFactory
xml_factory_class
=
AnnotationResponseXMLFactory
def
test_grade
(
self
):
(
correct
,
partially
,
incorrect
)
=
(
'correct'
,
'partially-correct'
,
'incorrect'
)
answer_id
=
'1_2_1'
options
=
((
'x'
,
correct
),(
'y'
,
partially
),(
'z'
,
incorrect
))
make_answer
=
lambda
option_ids
:
{
answer_id
:
json
.
dumps
({
'options'
:
option_ids
})}
tests
=
[
{
'correctness'
:
correct
,
'points'
:
2
,
'answers'
:
make_answer
([
0
])
},
{
'correctness'
:
partially
,
'points'
:
1
,
'answers'
:
make_answer
([
1
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([
2
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([
0
,
1
,
2
])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
([])
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
(
''
)
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
make_answer
(
None
)
},
{
'correctness'
:
incorrect
,
'points'
:
0
,
'answers'
:
{
answer_id
:
'null'
}
},
]
for
(
index
,
test
)
in
enumerate
(
tests
):
expected_correctness
=
test
[
'correctness'
]
expected_points
=
test
[
'points'
]
answers
=
test
[
'answers'
]
problem
=
self
.
build_problem
(
options
=
options
)
correct_map
=
problem
.
grade_answers
(
answers
)
actual_correctness
=
correct_map
.
get_correctness
(
answer_id
)
actual_points
=
correct_map
.
get_npoints
(
answer_id
)
self
.
assertEqual
(
expected_correctness
,
actual_correctness
,
msg
=
"
%
s should be marked
%
s"
%
(
answer_id
,
expected_correctness
))
self
.
assertEqual
(
expected_points
,
actual_points
,
msg
=
"
%
s should have
%
d points"
%
(
answer_id
,
expected_points
))
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment