Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
00e2eed8
Commit
00e2eed8
authored
Feb 07, 2014
by
Nick Parlante
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Revert "Update answer-pool within Jeffs 3x"
This reverts commit
e639738e
.
parent
41231634
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
61 additions
and
63 deletions
+61
-63
CHANGELOG.rst
+1
-1
common/lib/capa/capa/capa_problem.py
+60
-53
common/lib/capa/capa/tests/test_answer_pool.py
+0
-0
common/lib/capa/capa/tests/test_targeted_feedback.py
+0
-9
No files found.
CHANGELOG.rst
View file @
00e2eed8
...
...
@@ -188,7 +188,7 @@ duplicated "\index".
Studio: Support answer pools for multiple choice question choices, so authors can provide
multiple incorrect and correct choices for a question and have 1 correct choice and n-1
incorrect choices randomly selected and shuffled before being presented to the student.
In XML: <
choicegroup
answer-pool="4"> enables an answer pool of 4 choices: 3
In XML: <
multiplechoiceresponse
answer-pool="4"> enables an answer pool of 4 choices: 3
correct choices and 1 incorrect choice. To provide multiple solution expanations, wrap
all solution elements within a <solutionset>, and make sure to add an attribute called
"explanation-id" to both the <solution> tag and its corresponding <choice> tag, and be
...
...
common/lib/capa/capa/capa_problem.py
View file @
00e2eed8
...
...
@@ -383,92 +383,103 @@ class LoncapaProblem(object):
answer_ids
.
append
(
results
.
keys
())
return
answer_ids
def
sample_from_answer_pool
(
self
,
choices
,
rnd
,
num_
pool
):
def
sample_from_answer_pool
(
self
,
choices
,
rnd
,
num_
choices
):
"""
Takes in:
1. list of choices
2. random number generator
3.
the requested size "answer-pool" number, in effect a max
3.
max number of total choices to return
Returns a
tuple
with 2 items:
Returns a
list
with 2 items:
1. the solution_id corresponding with the chosen correct answer
2. (subset) list of choice nodes with
num-1
incorrect and 1 correct
2. (subset) list of choice nodes with
3
incorrect and 1 correct
"""
correct_choices
=
[]
incorrect_choices
=
[]
subset_choices
=
[]
for
choice
in
choices
:
if
choice
.
get
(
'correct'
)
==
'true'
:
correct_choices
.
append
(
choice
)
el
se
:
el
if
choice
.
get
(
'correct'
)
==
'false'
:
incorrect_choices
.
append
(
choice
)
# TODO: check if we should require correct == "false"
# We throw an error if the problem is highly ill-formed.
# There must be at least one correct and one incorrect choice.
# TODO: perhaps this makes more sense for *all* problems, not just down in this corner.
if
len
(
correct_choices
)
<
1
or
len
(
incorrect_choices
)
<
1
:
raise
responsetypes
.
LoncapaProblemError
(
"Choicegroup must include at last 1 correct and 1 incorrect choice"
)
# Always 1 correct and num_choices at least as large as this; if not, return list with no choices
num_correct
=
1
if
len
(
correct_choices
)
<
num_correct
or
num_choices
<
num_correct
:
return
[]
#
Limit the number of incorrect choices to what we actually have
num_incorrect
=
num_
pool
-
1
#
Ensure number of incorrect choices is no more than the number of incorrect choices to choose from
num_incorrect
=
num_
choices
-
num_correct
num_incorrect
=
min
(
num_incorrect
,
len
(
incorrect_choices
))
#
Select the one correct choice
#
Use rnd given to us to generate a random number (see details in tree_using_answer_pool method)
index
=
rnd
.
randint
(
0
,
len
(
correct_choices
)
-
1
)
correct_choice
=
correct_choices
[
index
]
subset_choices
.
append
(
correct_choice
)
solution_id
=
correct_choice
.
get
(
'explanation-id'
)
# Put together the result, pushing most of the work onto rnd.shuffle()
subset_choices
=
[
correct_choice
]
rnd
.
shuffle
(
incorrect_choices
)
subset_choices
+=
incorrect_choices
[:
num_incorrect
]
rnd
.
shuffle
(
subset_choices
)
return
(
solution_id
,
subset_choices
)
def
do_answer_pool
(
self
,
tree
):
# Add incorrect choices
to_add
=
num_incorrect
while
to_add
>
0
:
index
=
rnd
.
randint
(
0
,
len
(
incorrect_choices
)
-
1
)
choice
=
incorrect_choices
[
index
]
subset_choices
.
append
(
choice
)
incorrect_choices
.
remove
(
choice
)
to_add
=
to_add
-
1
# Randomize correct answer position
index
=
rnd
.
randint
(
0
,
num_incorrect
)
if
index
!=
0
:
tmp
=
subset_choices
[
index
]
subset_choices
[
index
]
=
subset_choices
[
0
]
# where we put the correct answer
subset_choices
[
0
]
=
tmp
return
[
solution_id
,
subset_choices
]
def
tree_using_answer_pool
(
self
,
tree
):
"""
Implements the answer-pool subsetting operation in-place on the tree.
Allows for problem questions with a pool of answers, from which answer options shown to the student
and randomly selected so that there is always 1 correct answer and n-1 incorrect answers,
where the author specifies n as the value of the attribute "answer-pool" within <choicegroup>
where the user specifies n as the value of the attribute "answer-pool" within <multiplechoiceresponse>
The <multiplechoiceresponse> tag must have an attribute 'answer-pool' with integer value of n
- if so, this method will modify the tree
- if not, this method will not modify the tree
The <choicegroup> tag must have an attribute 'answer-pool' giving the desired
pool size. If that attribute is zero or not present, no operation is performed.
Calling this a second time does nothing.
These problems are colloquially known as "Gradiance" problems.
"""
# If called a second time, don't do anything, since it's in-place destructive
if
hasattr
(
self
,
'answerpool_done'
):
query
=
'//multiplechoiceresponse[@answer-pool]'
# There are no questions with an answer pool
if
not
tree
.
xpath
(
query
):
return
self
.
answerpool_done
=
True
choicegroups
=
tree
.
xpath
(
"//choicegroup[@answer-pool]"
)
# Uses self.seed -- but want to randomize every time reaches this problem,
# so problem's "randomization" should be set to "always"
rnd
=
Random
(
self
.
seed
)
for
choicegroup
in
choicegroups
:
num_str
=
choicegroup
.
get
(
'answer-pool'
)
try
:
num_choices
=
int
(
num_str
)
except
ValueError
:
raise
responsetypes
.
LoncapaProblemError
(
"answer-pool value should be an integer"
)
# choices == 0 disables the feature
if
num_choices
==
0
:
break
for
mult_choice_response
in
tree
.
xpath
(
query
):
# Determine number of choices to display; if invalid number of choices, skip over
num_choices
=
mult_choice_response
.
get
(
'answer-pool'
)
if
not
num_choices
.
isdigit
():
continue
num_choices
=
int
(
num_choices
)
if
num_choices
<
1
:
continue
choices_list
=
list
(
choicegroup
.
getchildren
())
# Grab the first choicegroup (there should only be one within each <multiplechoiceresponse> tag)
choicegroup
=
mult_choice_response
.
xpath
(
'./choicegroup[@type="MultipleChoice"]'
)[
0
]
choices_list
=
list
(
choicegroup
.
iter
(
'choice'
))
# Remove all choices in the choices_list (we will add some back in later)
for
choice
in
choices_list
:
choicegroup
.
remove
(
choice
)
# Sample from the answer pool to get the subset choices and solution id
(
solution_id
,
subset_choices
)
=
self
.
sample_from_answer_pool
(
choices_list
,
rnd
,
num_choices
)
[
solution_id
,
subset_choices
]
=
self
.
sample_from_answer_pool
(
choices_list
,
rnd
,
num_choices
)
# Add back in randomly selected choices
for
choice
in
subset_choices
:
...
...
@@ -476,7 +487,7 @@ class LoncapaProblem(object):
# Filter out solutions that don't correspond to the correct answer we selected to show
# Note that this means that if the user simply provides a <solution> tag, nothing is filtered
solutionset
=
choicegroup
.
xpath
(
'.
./following-sibling::solutionset'
)
solutionset
=
mult_choice_response
.
xpath
(
'
./following-sibling::solutionset'
)
if
len
(
solutionset
)
!=
0
:
solutionset
=
solutionset
[
0
]
solutions
=
solutionset
.
xpath
(
'./solution'
)
...
...
@@ -484,7 +495,7 @@ class LoncapaProblem(object):
if
solution
.
get
(
'explanation-id'
)
!=
solution_id
:
solutionset
.
remove
(
solution
)
def
do
_targeted_feedback
(
self
,
tree
):
def
tree_using
_targeted_feedback
(
self
,
tree
):
"""
Allows for problem questions to show targeted feedback, which are choice-level explanations.
Targeted feedback is automatically visible after a student has submitted their answers.
...
...
@@ -500,11 +511,7 @@ the "Show Answer" setting to "Never" because now there's no need for a "Show Ans
button because no solution will show up if you were to click the "Show Answer" button
"""
# If called a second time, don't do anything, since it's in-place destructive
if
hasattr
(
self
,
'targeted_done'
):
return
self
.
targeted_done
=
True
# Note that if there are no questions with targeted feedback, the body of the for loop is not executed
for
mult_choice_response
in
tree
.
xpath
(
'//multiplechoiceresponse[@targeted-feedback]'
):
show_explanation
=
mult_choice_response
.
get
(
'targeted-feedback'
)
==
'alwaysShowCorrectChoiceExplanation'
...
...
@@ -571,8 +578,8 @@ button because no solution will show up if you were to click the "Show Answer" b
'''
Main method called externally to get the HTML to be rendered for this capa Problem.
'''
self
.
do
_answer_pool
(
self
.
tree
)
self
.
do
_targeted_feedback
(
self
.
tree
)
self
.
tree_using
_answer_pool
(
self
.
tree
)
self
.
tree_using
_targeted_feedback
(
self
.
tree
)
html
=
contextualize_text
(
etree
.
tostring
(
self
.
_extract_html
(
self
.
tree
)),
self
.
context
)
return
html
...
...
common/lib/capa/capa/tests/test_answer_pool.py
View file @
00e2eed8
This diff is collapsed.
Click to expand it.
common/lib/capa/capa/tests/test_targeted_feedback.py
View file @
00e2eed8
...
...
@@ -142,9 +142,6 @@ class CapaTargetedFeedbackTest(unittest.TestCase):
self
.
assertRegexpMatches
(
without_new_lines
,
r"<div>.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*</div>"
)
self
.
assertNotRegexpMatches
(
without_new_lines
,
r"feedback1|feedback2|feedback3|feedbackC"
)
# Check that calling it multiple times yields the same thing
the_html2
=
problem
.
get_html
()
self
.
assertEquals
(
the_html
,
the_html2
)
def
test_targeted_feedback_student_answer1
(
self
):
xml_str
=
textwrap
.
dedent
(
"""
...
...
@@ -210,9 +207,6 @@ class CapaTargetedFeedbackTest(unittest.TestCase):
self
.
assertRegexpMatches
(
without_new_lines
,
r"<targetedfeedback explanation-id=\"feedback3\">.*3rd WRONG solution"
)
self
.
assertNotRegexpMatches
(
without_new_lines
,
r"feedback1|feedback2|feedbackC"
)
# Check that calling it multiple times yields the same thing
the_html2
=
problem
.
get_html
()
self
.
assertEquals
(
the_html
,
the_html2
)
def
test_targeted_feedback_student_answer2
(
self
):
xml_str
=
textwrap
.
dedent
(
"""
...
...
@@ -346,9 +340,6 @@ class CapaTargetedFeedbackTest(unittest.TestCase):
self
.
assertRegexpMatches
(
without_new_lines
,
r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation"
)
self
.
assertNotRegexpMatches
(
without_new_lines
,
r"<div>\{.*'1_solution_1'.*\}</div>"
)
self
.
assertNotRegexpMatches
(
without_new_lines
,
r"feedback2|feedback3"
)
# Check that calling it multiple times yields the same thing
the_html2
=
problem
.
get_html
()
self
.
assertEquals
(
the_html
,
the_html2
)
def
test_targeted_feedback_no_show_solution_explanation
(
self
):
xml_str
=
textwrap
.
dedent
(
"""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment