Commit 7615a063 by Sarina Canelake

Merge pull request #310 from robertjmarks/master

Adds in changes for stat2.3x question type
parents e84a41b4 4d880db1
......@@ -81,3 +81,4 @@ Felix Sun <felixsun@mit.edu>
Adam Palay <adam@edx.org>
Ian Hoover <ihoover@edx.org>
Mukul Goyal <miki@edx.org>
Robert Marks <rmarks@edx.org>
......@@ -5,6 +5,7 @@ These are notable changes in edx-platform. This is a rolling list of changes,
in roughly chronological order, most recent first. Add your entries at or near
the top. Include a label indicating the component affected.
Common: Added *experimental* support for jsinput type.
Common: Added setting to specify Celery Broker vhost
......@@ -23,6 +24,8 @@ Studio: Added support for uploading and managing PDF textbooks
Common: Student information is now passed to the tracking log via POST instead of GET.
Blades: Added functionality and tests for new capa input type: choicetextresponse.
Common: Add tests for documentation generation to test suite
Blades: User answer now preserved (and changeable) after clicking "show answer" in choice problems
......@@ -45,7 +48,7 @@ history of background tasks for a given problem and student.
Blades: Small UX fix on capa multiple-choice problems. Make labels only
as wide as the text to reduce accidental choice selections.
Studio:
Studio:
- use xblock field defaults to initialize all new instances' fields and
only use templates as override samples.
- create new instances via in memory create_xmodule and related methods rather
......
......@@ -460,10 +460,10 @@ class JSInput(InputTypeBase):
DO NOT USE! HAS NOT BEEN TESTED BEYOND 700X PROBLEMS, AND MAY CHANGE IN
BACKWARDS-INCOMPATIBLE WAYS.
Inputtype for general javascript inputs. Intended to be used with
customresponse.
customresponse.
Loads in a sandboxed iframe to help prevent css and js conflicts between
frame and top-level window.
frame and top-level window.
iframe sandbox whitelist:
- allow-scripts
- allow-popups
......@@ -474,9 +474,9 @@ class JSInput(InputTypeBase):
window elements.
Example:
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
width="400"/>
See the documentation in the /doc/public folder for more information.
......@@ -500,7 +500,7 @@ class JSInput(InputTypeBase):
Attribute('width', "400"), # iframe width
Attribute('height', "300")] # iframe height
def _extra_context(self):
context = {
......@@ -510,11 +510,12 @@ class JSInput(InputTypeBase):
return context
registry.register(JSInput)
#-----------------------------------------------------------------------------
class TextLine(InputTypeBase):
"""
A text line input. Can do math preview if "math"="1" is specified.
......@@ -1368,3 +1369,209 @@ class AnnotationInput(InputTypeBase):
return extra_context
registry.register(AnnotationInput)
class ChoiceTextGroup(InputTypeBase):
"""
Groups of radiobutton/checkboxes with text inputs.
Examples:
RadioButton problem
<problem>
<startouttext/>
A person rolls a standard die 100 times and records the results.
On the first roll they received a "1". Given this information
select the correct choice and fill in numbers to make it accurate.
<endouttext/>
<choicetextresponse>
<radiotextgroup>
<choice correct="false">The lowest number rolled was:
<decoy_input/> and the highest number rolled was:
<decoy_input/> .</choice>
<choice correct="true">The lowest number rolled was <numtolerance_input answer="1"/>
and there is not enough information to determine the highest number rolled.
</choice>
<choice correct="false">There is not enough information to determine the lowest
number rolled, and the highest number rolled was:
<decoy_input/> .
</choice>
</radiotextgroup>
</choicetextresponse>
</problem>
CheckboxProblem:
<problem>
<startouttext/>
A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6
and records the results. The first number they pick is \(\sqrt{2}\) Given this information
select the correct choices and fill in numbers to make them accurate.
<endouttext/>
<choicetextresponse>
<checkboxtextgroup>
<choice correct="true">
The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/>
</choice>
<choice correct="false">
The highest number selected was <decoy_input/> .
</choice>
<choice correct="true">There is not enough information given to determine the highest number
which was selected.
</choice>
<choice correct="false">There is not enough information given to determine the lowest number
selected.
</choice>
</checkboxtextgroup>
</choicetextresponse>
</problem>
In the preceding examples the <decoy_input/> is used to generate a textinput html element
in the problem's display. Since it is inside of an incorrect choice, no answer given
for it will be correct, and thus specifying an answer for it is not needed.
"""
template = "choicetext.html"
tags = ['radiotextgroup', 'checkboxtextgroup']
def setup(self):
"""
Performs setup for the initial rendering of the problem.
`self.html_input_type` determines whether this problem is displayed
with radiobuttons or checkboxes
If the initial value of `self.value` is '' change it to {} so that
the template has an empty dictionary to work with.
sets the value of self.choices to be equal to the return value of
`self.extract_choices`
"""
self.text_input_values = {}
if self.tag == 'radiotextgroup':
self.html_input_type = "radio"
elif self.tag == 'checkboxtextgroup':
self.html_input_type = "checkbox"
else:
raise Exception("ChoiceGroup: unexpected tag {0}".format(self.tag))
if self.value == '':
# Make `value` an empty dictionary, if it currently has an empty
# value. This is necessary because the template expects a
# dictionary.
self.value = {}
self.choices = self.extract_choices(self.xml)
@classmethod
def get_attributes(cls):
"""
Returns a list of `Attribute` for this problem type
"""
return [
Attribute("show_correctness", "always"),
Attribute("submitted_message", "Answer received.")
]
def _extra_context(self):
"""
Returns a dictionary of extra content necessary for rendering this InputType.
`input_type` is either 'radio' or 'checkbox' indicating whether the choices for
this problem will have radiobuttons or checkboxes.
"""
return {
'input_type': self.html_input_type,
'choices': self.choices
}
@staticmethod
def extract_choices(element):
"""
Extracts choices from the xml for this problem type.
If we have xml that is as follows(choice names will have been assigned
by now)
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0" answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
Choices are used for rendering the problem properly
The function will setup choices as follows:
choices =[
("1_2_1_choiceinput_0bc",
[{'type': 'text', 'contents': "The number", 'tail_text': '',
'value': ''
},
{'type': 'textinput',
'contents': "1_2_1_choiceinput0_numtolerance_input_0",
'tail_text': 'Is the mean of the list',
'value': ''
}
]
),
("1_2_1_choiceinput_1bc",
[{'type': 'text', 'contents': "False demonstration choice",
'tail_text': '',
'value': ''
}
]
)
]
"""
choices = []
for choice in element:
if choice.tag != 'choice':
raise Exception(
"[capa.inputtypes.extract_choices] Expected a <choice>" +
"tag; got {0} instead".format(choice.tag)
)
components = []
choice_text = ''
if choice.text is not None:
choice_text += choice.text
# Initialize our dict for the next content
adder = {
'type': 'text',
'contents': choice_text,
'tail_text': '',
'value': ''
}
components.append(adder)
for elt in choice:
# for elements in the choice e.g. <text> <numtolerance_input>
adder = {
'type': 'text',
'contents': '',
'tail_text': '',
'value': ''
}
tag_type = elt.tag
# If the current `elt` is a <numtolerance_input> set the
# `adder`type to 'numtolerance_input', and 'contents' to
# the `elt`'s name.
# Treat decoy_inputs and numtolerance_inputs the same in order
# to prevent students from reading the Html and figuring out
# which inputs are valid
if tag_type in ('numtolerance_input', 'decoy_input'):
# We set this to textinput, so that we get a textinput html
# element.
adder['type'] = 'textinput'
adder['contents'] = elt.get('name')
else:
adder['contents'] = elt.text
# Add any tail text("is the mean" in the example)
adder['tail_text'] = elt.tail if elt.tail else ''
components.append(adder)
# Add the tuple for the current choice to the list of choices
choices.append((choice.get("name"), components))
return choices
registry.register(ChoiceTextGroup)
......@@ -2097,6 +2097,333 @@ class AnnotationResponse(LoncapaResponse):
return option_ids[0]
return None
class ChoiceTextResponse(LoncapaResponse):
"""
Allows for multiple choice responses with text inputs
Desired semantics match those of NumericalResponse and
ChoiceResponse.
"""
response_tag = 'choicetextresponse'
max_inputfields = 1
allowed_inputfields = ['choicetextgroup',
'checkboxtextgroup',
'radiotextgroup'
]
def setup_response(self):
"""
Sets up three dictionaries for use later:
`correct_choices`: These are the correct binary choices(radio/checkbox)
`correct_inputs`: These are the numerical/string answers for required
inputs.
`answer_values`: This is a dict, keyed by the name of the binary choice
which contains the correct answers for the text inputs separated by
commas e.g. "1, 0.5"
`correct_choices` and `correct_inputs` are used for grading the problem
and `answer_values` is used for displaying correct answers.
"""
context = self.context
self.correct_choices = {}
self.assign_choice_names()
self.correct_inputs = {}
self.answer_values = {self.answer_id: []}
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id'))
for node in correct_xml:
# For each correct choice, set the `parent_name` to the
# current choice's name
parent_name = node.get('name')
# Add the name of the correct binary choice to the
# correct choices list as a key. The value is not important.
self.correct_choices[parent_name] = {'answer': ''}
# Add the name of the parent to the list of correct answers
self.answer_values[self.answer_id].append(parent_name)
answer_list = []
# Loop over <numtolerance_input> elements inside of the correct choices
for child in node:
answer = child.get('answer', None)
if not answer:
# If the question creator does not specify an answer for a
# <numtolerance_input> inside of a correct choice, raise an error
raise LoncapaProblemError(
"Answer not provided for numtolerance_input"
)
# Contextualize the answer to allow script generated answers.
answer = contextualize_text(answer, context)
input_name = child.get('name')
# Contextualize the tolerance to value.
tolerance = contextualize_text(
child.get('tolerance', '0'),
context
)
# Add the answer and tolerance information for the current
# numtolerance_input to `correct_inputs`
self.correct_inputs[input_name] = {
'answer': answer,
'tolerance': tolerance
}
# Add the correct answer for this input to the list for show
answer_list.append(answer)
# Turn the list of numtolerance_input answers into a comma separated string.
self.answer_values[parent_name] = ', '.join(answer_list)
# Turn correct choices into a set. Allows faster grading.
self.correct_choices = set(self.correct_choices.keys())
def assign_choice_names(self):
"""
Initialize name attributes in <choice> and <numtolerance_input> tags
for this response.
Example:
Assuming for simplicity that `self.answer_id` = '1_2_1'
Before the function is called `self.xml` =
<radiotextgroup>
<choice correct = "true">
The number
<numtolerance_input answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false">
False demonstration choice
</choice>
</radiotextgroup>
After this is called the choices and numtolerance_inputs will have a name
attribute initialized and self.xml will be:
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0"
answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
"""
for index, choice in enumerate(
self.xml.xpath('//*[@id=$id]//choice', id=self.xml.get('id'))
):
# Set the name attribute for <choices>
# "bc" is appended at the end to indicate that this is a
# binary choice as opposed to a numtolerance_input, this convention
# is used when grading the problem
choice.set(
"name",
self.answer_id + "_choiceinput_" + str(index) + "bc"
)
# Set Name attributes for <numtolerance_input> elements
# Look for all <numtolerance_inputs> inside this choice.
numtolerance_inputs = choice.findall('numtolerance_input')
# Look for all <decoy_input> inside this choice
decoys = choice.findall('decoy_input')
# <decoy_input> would only be used in choices which do not contain
# <numtolerance_input>
inputs = numtolerance_inputs if numtolerance_inputs else decoys
# Give each input inside of the choice a name combining
# The ordinality of the choice, and the ordinality of the input
# within that choice e.g. 1_2_1_choiceinput_0_numtolerance_input_1
for ind, child in enumerate(inputs):
child.set(
"name",
self.answer_id + "_choiceinput_" + str(index) +
"_numtolerance_input_" + str(ind)
)
def get_score(self, student_answers):
"""
Returns a `CorrectMap` showing whether `student_answers` are correct.
`student_answers` contains keys for binary inputs(radiobutton,
checkbox) and numerical inputs. Keys ending with 'bc' are binary
choice inputs otherwise they are text fields.
This method first separates the two
types of answers and then grades them in separate methods.
The student is only correct if they have both the binary inputs and
numerical inputs correct.
"""
answer_dict = student_answers.get(self.answer_id, "")
binary_choices, numtolerance_inputs = self._split_answers_dict(answer_dict)
# Check the binary choices first.
choices_correct = self._check_student_choices(binary_choices)
inputs_correct = self._check_student_inputs(numtolerance_inputs)
# Only return correct if the student got both the binary
# and numtolerance_inputs are correct
correct = choices_correct and inputs_correct
return CorrectMap(
self.answer_id,
'correct' if correct else 'incorrect'
)
def get_answers(self):
"""
Returns a dictionary containing the names of binary choices as keys
and a string of answers to any numtolerance_inputs which they may have
e.g {choice_1bc : "answer1, answer2", choice_2bc : ""}
"""
return self.answer_values
def _split_answers_dict(self, a_dict):
"""
Returns two dicts:
`binary_choices` : dictionary {input_name: input_value} for
the binary choices which the student selected.
and
`numtolerance_choices` : a dictionary {input_name: input_value}
for the numtolerance_inputs inside of choices which were selected
Determines if an input is inside of a binary input by looking at
the beginning of it's name.
For example. If a binary_choice was named '1_2_1_choiceinput_0bc'
All of the numtolerance_inputs in it would have an idea that begins
with '1_2_1_choice_input_0_numtolerance_input'
Splits the name of the numtolerance_input at the occurence of
'_numtolerance_input_' and appends 'bc' to the end to get the name
of the choice it is contained in.
Example:
`a_dict` = {
'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc',
'1_2_1_choiceinput_0_numtolerance_input_0': '1',
'1_2_1_choiceinput_0_numtolerance_input_1': '2'
'1_2_1_choiceinput_1_numtolerance_input_0': '3'
}
In this case, the binary choice is '1_2_1_choiceinput_0bc', and
the numtolerance_inputs associated with it are
'1_2_1_choiceinput_0_numtolerance_input_0', and
'1_2_1_choiceinput_0_numtolerance_input_1'.
so the two return dictionaries would be
`binary_choices` = {'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc'}
and
`numtolerance_choices` ={
'1_2_1_choiceinput_0_numtolerance_input_0': '1',
'1_2_1_choiceinput_0_numtolerance_input_1': '2'
}
The entry '1_2_1_choiceinput_1_numtolerance_input_0': '3' is discarded
because it was not inside of a selected binary choice, and no validation
should be performed on numtolerance_inputs inside of non-selected choices.
"""
# Initialize the two dictionaries that are returned
numtolerance_choices = {}
binary_choices = {}
# `selected_choices` is a list of binary choices which were "checked/selected"
# when the student submitted the problem.
# Keys in a_dict ending with 'bc' refer to binary choices.
selected_choices = [key for key in a_dict if key.endswith("bc")]
for key in selected_choices:
binary_choices[key] = a_dict[key]
# Convert the name of a numtolerance_input into the name of the binary
# choice that it is contained within, and append it to the list if
# the numtolerance_input's parent binary_choice is contained in
# `selected_choices`.
selected_numtolerance_inputs = [
key for key in a_dict if key.partition("_numtolerance_input_")[0] + "bc"
in selected_choices
]
for key in selected_numtolerance_inputs:
numtolerance_choices[key] = a_dict[key]
return (binary_choices, numtolerance_choices)
def _check_student_choices(self, choices):
"""
Compares student submitted checkbox/radiobutton answers against
the correct answers. Returns True or False.
True if all of the correct choices are selected and no incorrect
choices are selected.
"""
student_choices = set(choices)
required_selected = len(self.correct_choices - student_choices) == 0
no_extra_selected = len(student_choices - self.correct_choices) == 0
correct = required_selected and no_extra_selected
return correct
def _check_student_inputs(self, numtolerance_inputs):
"""
Compares student submitted numerical answers against the correct
answers and tolerances.
`numtolerance_inputs` is a dictionary {answer_name : answer_value}
Performs numerical validation by means of calling
`compare_with_tolerance()` on all of `numtolerance_inputs`
Performs a call to `compare_with_tolerance` even on values for
decoy_inputs. This is used to validate their numericality and
raise an error if the student entered a non numerical expression.
Returns True if and only if all student inputs are correct.
"""
inputs_correct = True
for answer_name, answer_value in numtolerance_inputs.iteritems():
# If `self.corrrect_inputs` does not contain an entry for
# `answer_name`, this means that answer_name is a decoy
# input's value, and validation of its numericality is the
# only thing of interest from the later call to
# `compare_with_tolerance`.
params = self.correct_inputs.get(answer_name, {'answer': 0})
correct_ans = params['answer']
# Set the tolerance to '0' if it was not specified in the xml
tolerance = params.get('tolerance', '0')
# Make sure that the staff answer is a valid number
try:
correct_ans = complex(correct_ans)
except ValueError:
log.debug(
"Content error--answer" +
"'{0}' is not a valid complex number".format(correct_ans)
)
raise StudentInputError(
"The Staff answer could not be interpreted as a number."
)
# Compare the student answer to the staff answer/ or to 0
# if all that is important is verifying numericality
try:
partial_correct = compare_with_tolerance(
evaluator(dict(), dict(), answer_value),
correct_ans,
tolerance
)
except:
# Use the traceback-preserving version of re-raising with a
# different type
_, _, trace = sys.exc_info()
raise StudentInputError(
"Could not interpret '{0}' as a number{1}".format(
cgi.escape(answer_value),
trace
)
)
# Ignore the results of the comparisons which were just for
# Numerical Validation.
if answer_name in self.correct_inputs and not partial_correct:
# If any input is not correct, set the return value to False
inputs_correct = False
return inputs_correct
#-----------------------------------------------------------------------------
# TEMPORARY: List of all response subclasses
......@@ -2116,4 +2443,5 @@ __all__ = [CodeResponse,
MultipleChoiceResponse,
TrueFalseResponse,
JavascriptResponse,
AnnotationResponse]
AnnotationResponse,
ChoiceTextResponse]
<% element_checked = False %>
% for choice_id, _ in choices:
<%choice_id = choice_id %>
%if choice_id in value:
<% element_checked = True %>
%endif
%endfor
<section id="choicetextinput_${id}" class="choicetextinput">
<form class="choicetextgroup capa_inputtype" id="inputtype_${id}">
<div class="script_placeholder" data-src="/static/js/capa/choicetextinput.js"/>
<div class="indicator_container">
% if input_type == 'checkbox' or not element_checked:
% if status == 'unsubmitted':
<span class="unanswered" style="display:inline-block;" id="status_${id}"></span>
% elif status == 'correct':
<span class="correct" id="status_${id}"></span>
% elif status == 'incorrect':
<span class="incorrect" id="status_${id}"></span>
% elif status == 'incomplete':
<span class="incorrect" id="status_${id}"></span>
% endif
% endif
</div>
<fieldset>
% for choice_id, choice_description in choices:
<%choice_id= choice_id %>
<section id="forinput${choice_id}"
% if input_type == 'radio' and choice_id in value :
<%
if status == 'correct':
correctness = 'correct'
elif status == 'incorrect':
correctness = 'incorrect'
else:
correctness = None
%>
% if correctness:
class="choicetextgroup_${correctness}"
% endif
% endif
>
<input class="ctinput" type="${input_type}" name="choiceinput_${id}" id="${choice_id}" value="${choice_id}"
% if choice_id in value:
checked="true"
% endif
/>
% for content_node in choice_description:
% if content_node['type'] == 'text':
<span class="mock_label">
${content_node['contents']}
</span>
% else:
<% my_id = content_node.get('contents','') %>
<% my_val = value.get(my_id,'') %>
<input class="ctinput" type="text" name="${content_node['contents']}" id="${content_node['contents']}" value="${my_val|h} "/>
%endif
<span class="mock_label">
${content_node['tail_text']}
</span>
% endfor
<p id="answer_${choice_id}" class="answer"></p>
</section>
% endfor
<span id="answer_${id}"></span>
</fieldset>
<input class= "choicetextvalue" type="hidden" name="input_${id}{}" id="input_${id}" value="${value|h}" />
% if show_correctness == "never" and (value or status not in ['unsubmitted']):
<div class="capa_alert">${submitted_message}</div>
%endif
</form>
</section>
......@@ -779,3 +779,109 @@ class SymbolicResponseXMLFactory(ResponseXMLFactory):
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if type(choices[0]) not in [list, tuple]:
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if type(answers) not in [list, tuple]:
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input
......@@ -714,3 +714,170 @@ class DragAndDropTemplateTest(TemplateTestCase):
# escaping the HTML. We should be able to traverse the XML tree.
xpath = "//div[@class='drag_and_drop_problem_json']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class ChoiceTextGroupTemplateTest(TemplateTestCase):
"""Test mako template for `<choicetextgroup>` input"""
TEMPLATE_NAME = 'choicetext.html'
VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
EMPTY_DICT = {'1_choiceinput_0_textinput_0': '',
'1_choiceinput_1_textinput_0': ''}
BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0',
'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
def setUp(self):
choices = [('1_choiceinput_0bc',
[{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'}]),
('1_choiceinput_1bc', [{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'}])]
self.context = {'id': '1',
'choices': choices,
'status': 'correct',
'input_type': 'radio',
'value': self.VALUE_DICT}
super(ChoiceTextGroupTemplateTest, self).setUp()
def test_grouping_tag(self):
"""
Tests whether we are using a section or a label to wrap choice elements.
Section is used for checkbox, so inputting text does not deselect
"""
input_tags = ('radio', 'checkbox')
self.context['status'] = 'correct'
xpath = "//section[@id='forinput1_choiceinput_0bc']"
self.context['value'] = {}
for input_type in input_tags:
self.context['input_type'] = input_type
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
def test_problem_marked_correct(self):
"""Test conditions under which the entire problem
(not a particular option) is marked correct"""
self.context['status'] = 'correct'
self.context['input_type'] = 'checkbox'
self.context['value'] = self.VALUE_DICT
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked incorrect"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': 'incorrect', 'input_type': 'radio', 'value': {}},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': 'incomplete', 'input_type': 'radio', 'value': {}},
{'status': 'incomplete', 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': 'incomplete', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': 'incomplete', 'input_type': 'checkbox', 'value': self.VALUE_DICT}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_problem_marked_unsubmitted(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked unanswered"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': 'unsubmitted', 'input_type': 'radio', 'value': {}},
{'status': 'unsubmitted', 'input_type': 'radio', 'value': self.EMPTY_DICT},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': {}},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.EMPTY_DICT},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}]
self.context['status'] = 'unanswered'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_option_marked_correct(self):
"""Test conditions under which a particular option
(not the entire problem) is marked correct."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = 'correct'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""Test conditions under which a particular option
(not the entire problem) is marked incorrect."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = 'incorrect'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
......@@ -860,3 +860,94 @@ class AnnotationInputTest(unittest.TestCase):
self.maxDiff = None
self.assertDictEqual(context, expected)
class TestChoiceText(unittest.TestCase):
"""
Tests for checkboxtextgroup inputs
"""
@staticmethod
def build_choice_element(node_type, contents, tail_text, value):
"""
Builds a content node for a choice.
"""
# When xml is being parsed numtolerance_input and decoy_input tags map to textinput type
# in order to provide the template with correct rendering information.
if node_type in ('numtolerance_input', 'decoy_input'):
node_type = 'textinput'
choice = {'type': node_type, 'contents': contents, 'tail_text': tail_text, 'value': value}
return choice
def check_group(self, tag, choice_tag, expected_input_type):
"""
Build a radio or checkbox group, parse it and check the resuls against the
expected output.
`tag` should be 'checkboxtextgroup' or 'radiotextgroup'
`choice_tag` is either 'choice' for proper xml, or any other value to trigger an error.
`expected_input_type` is either 'radio' or 'checkbox'.
"""
xml_str = """
<{tag}>
<{choice_tag} correct="false" name="choiceinput_0">this is<numtolerance_input name="choiceinput_0_textinput_0"/>false</{choice_tag}>
<choice correct="true" name="choiceinput_1">Is a number<decoy_input name="choiceinput_1_textinput_0"/><text>!</text></choice>
</{tag}>
""".format(tag=tag, choice_tag=choice_tag)
element = etree.fromstring(xml_str)
state = {
'value': '{}',
'id': 'choicetext_input',
'status': 'answered'
}
first_input = self.build_choice_element('numtolerance_input', 'choiceinput_0_textinput_0', 'false', '')
second_input = self.build_choice_element('decoy_input', 'choiceinput_1_textinput_0', '', '')
first_choice_content = self.build_choice_element('text', 'this is', '', '')
second_choice_content = self.build_choice_element('text', 'Is a number', '', '')
second_choice_text = self.build_choice_element('text', "!", '', '')
choices = [
('choiceinput_0', [first_choice_content, first_input]),
('choiceinput_1', [second_choice_content, second_input, second_choice_text])
]
expected = {
'msg': '',
'input_type': expected_input_type,
'choices': choices,
'show_correctness': 'always',
'submitted_message': 'Answer received.'
}
expected.update(state)
the_input = lookup_tag(tag)(test_system(), element, state)
context = the_input._get_render_context()
self.assertEqual(context, expected)
def test_radiotextgroup(self):
"""
Test that a properly formatted radiotextgroup problem generates
expected ouputs
"""
self.check_group('radiotextgroup', 'choice', 'radio')
def test_checkboxtextgroup(self):
"""
Test that a properly formatted checkboxtextgroup problem generates
expected ouput
"""
self.check_group('checkboxtextgroup', 'choice', 'checkbox')
def test_invalid_tag(self):
"""
Test to ensure that an unrecognized inputtype tag causes an error
"""
with self.assertRaises(Exception):
self.check_group('invalid', 'choice', 'checkbox')
def test_invalid_input_tag(self):
"""
Test to ensure having a tag other than <choice> inside of
a checkbox or radiotextgroup problem raises an error.
"""
with self.assertRaisesRegexp(Exception, "Error in xml"):
self.check_group('checkboxtextgroup', 'invalid', 'checkbox')
......@@ -1429,3 +1429,357 @@ class AnnotationResponseTest(ResponseTest):
msg="%s should be marked %s" % (answer_id, expected_correctness))
self.assertEqual(expected_points, actual_points,
msg="%s should have %d points" % (answer_id, expected_points))
class ChoiceTextResponseTest(ResponseTest):
"""
Class containing setup and tests for ChoiceText responsetype.
"""
from response_xml_factory import ChoiceTextResponseXMLFactory
xml_factory_class = ChoiceTextResponseXMLFactory
# `TEST_INPUTS` is a dictionary mapping from
# test_name to a representation of inputs for a test problem.
TEST_INPUTS = {
"1_choice_0_input_correct": [(True, [])],
"1_choice_0_input_incorrect": [(False, [])],
"1_choice_0_input_invalid_choice": [(False, []), (True, [])],
"1_choice_1_input_correct": [(True, ["123"])],
"1_input_script_correct": [(True, ["2"])],
"1_input_script_incorrect": [(True, ["3.25"])],
"1_choice_2_inputs_correct": [(True, ["123", "456"])],
"1_choice_2_inputs_tolerance": [(True, ["123 + .5", "456 + 9"])],
"1_choice_2_inputs_1_wrong": [(True, ["0", "456"])],
"1_choice_2_inputs_both_wrong": [(True, ["0", "0"])],
"1_choice_2_inputs_inputs_blank": [(True, ["", ""])],
"1_choice_2_inputs_empty": [(False, [])],
"1_choice_2_inputs_fail_tolerance": [(True, ["123 + 1.5", "456 + 9"])],
"1_choice_1_input_within_tolerance": [(True, ["122.5"])],
"1_choice_1_input_answer_incorrect": [(True, ["345"])],
"1_choice_1_input_choice_incorrect": [(False, ["123"])],
"2_choices_0_inputs_correct": [(False, []), (True, [])],
"2_choices_0_inputs_incorrect": [(True, []), (False, [])],
"2_choices_0_inputs_blank": [(False, []), (False, [])],
"2_choices_1_input_1_correct": [(False, []), (True, ["123"])],
"2_choices_1_input_1_incorrect": [(True, []), (False, ["123"])],
"2_choices_1_input_input_wrong": [(False, []), (True, ["321"])],
"2_choices_1_input_1_blank": [(False, []), (False, [])],
"2_choices_1_input_2_correct": [(True, []), (False, ["123"])],
"2_choices_1_input_2_incorrect": [(False, []), (True, ["123"])],
"2_choices_2_inputs_correct": [(True, ["123"]), (False, [])],
"2_choices_2_inputs_wrong_choice": [(False, ["123"]), (True, [])],
"2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])]
}
# `TEST_SCENARIOS` is a dictionary of the form
# {Test_Name" : (Test_Problem_name, correctness)}
# correctness represents whether the problem should be graded as
# correct or incorrect when the test is run.
TEST_SCENARIOS = {
"1_choice_0_input_correct": ("1_choice_0_input", "correct"),
"1_choice_0_input_incorrect": ("1_choice_0_input", "incorrect"),
"1_choice_0_input_invalid_choice": ("1_choice_0_input", "incorrect"),
"1_input_script_correct": ("1_input_script", "correct"),
"1_input_script_incorrect": ("1_input_script", "incorrect"),
"1_choice_2_inputs_correct": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_tolerance": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_1_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_both_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_inputs_blank": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_empty": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_fail_tolerance": ("1_choice_2_inputs", "incorrect"),
"1_choice_1_input_correct": ("1_choice_1_input", "correct"),
"1_choice_1_input_within_tolerance": ("1_choice_1_input", "correct"),
"1_choice_1_input_answer_incorrect": ("1_choice_1_input", "incorrect"),
"1_choice_1_input_choice_incorrect": ("1_choice_1_input", "incorrect"),
"2_choices_0_inputs_correct": ("2_choices_0_inputs", "correct"),
"2_choices_0_inputs_incorrect": ("2_choices_0_inputs", "incorrect"),
"2_choices_0_inputs_blank": ("2_choices_0_inputs", "incorrect"),
"2_choices_1_input_1_correct": ("2_choices_1_input_1", "correct"),
"2_choices_1_input_1_incorrect": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_input_wrong": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_1_blank": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_2_correct": ("2_choices_1_input_2", "correct"),
"2_choices_1_input_2_incorrect": ("2_choices_1_input_2", "incorrect"),
"2_choices_2_inputs_correct": ("2_choices_2_inputs", "correct"),
"2_choices_2_inputs_wrong_choice": ("2_choices_2_inputs", "incorrect"),
"2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect")
}
# Dictionary that maps from problem_name to arguments for
# _make_problem, that will create the problem.
TEST_PROBLEM_ARGS = {
"1_choice_0_input": {"choices": ("true", {}), "script": ''},
"1_choice_1_input": {
"choices": ("true", {"answer": "123", "tolerance": "1"}),
"script": ''
},
"1_input_script": {
"choices": ("true", {"answer": "$computed_response", "tolerance": "1"}),
"script": "computed_response = math.sqrt(4)"
},
"1_choice_2_inputs": {
"choices": [
(
"true", (
{"answer": "123", "tolerance": "1"},
{"answer": "456", "tolerance": "10"}
)
)
],
"script": ''
},
"2_choices_0_inputs": {
"choices": [("false", {}), ("true", {})],
"script": ''
},
"2_choices_1_input_1": {
"choices": [
("false", {}), ("true", {"answer": "123", "tolerance": "0"})
],
"script": ''
},
"2_choices_1_input_2": {
"choices": [("true", {}), ("false", {"answer": "123", "tolerance": "0"})],
"script": ''
},
"2_choices_2_inputs": {
"choices": [
("true", {"answer": "123", "tolerance": "0"}),
("false", {"answer": "999", "tolerance": "0"})
],
"script": ''
}
}
def _make_problem(self, choices, in_type='radiotextgroup', script=''):
"""
Convenience method to fill in default values for script and
type if needed, then call self.build_problem
"""
return self.build_problem(
choices=choices,
type=in_type,
script=script
)
def _make_answer_dict(self, choice_list):
"""
Convenience method to make generation of answers less tedious,
pass in an iterable argument with elements of the form: [bool, [ans,]]
Will generate an answer dict for those options
"""
answer_dict = {}
for index, choice_answers_pair in enumerate(choice_list):
# Choice is whether this choice is correct
# Answers contains a list of answers to textinpts for the choice
choice, answers = choice_answers_pair
if choice:
# Radio/Checkbox inputs in choicetext problems follow
# a naming convention that gives them names ending with "bc"
choice_id = "1_2_1_choiceinput_{index}bc".format(index=index)
choice_value = "choiceinput_{index}".format(index=index)
answer_dict[choice_id] = choice_value
# Build the names for the numtolerance_inputs and add their answers
# to `answer_dict`.
for ind, answer in enumerate(answers):
# In `answer_id` `index` represents the ordinality of the
# choice and `ind` represents the ordinality of the
# numtolerance_input inside the parent choice.
answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format(
index=index,
ind=ind
)
answer_dict[answer_id] = answer
return answer_dict
def test_invalid_xml(self):
"""
Test that build problem raises errors for invalid options
"""
with self.assertRaises(Exception):
self.build_problem(type="invalidtextgroup")
def test_valid_xml(self):
"""
Test that `build_problem` builds valid xml
"""
self.build_problem()
self.assertTrue(True)
def test_unchecked_input_not_validated(self):
"""
Test that a student can have a non numeric answer in an unselected
choice without causing an error to be raised when the problem is
checked.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]),
"incorrect"
)
def test_interpret_error(self):
"""
Test that student answers that cannot be interpeted as numbers
cause the response type to raise an error.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected correct choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["Platypus"])]),
"correct"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected incorrect choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]),
"correct"
)
def test_staff_answer_error(self):
broken_problem = self._make_problem(
[("true", {"answer": "Platypus", "tolerance": "0"}),
("true", {"answer": "edX", "tolerance": "0"})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(
StudentInputError,
"The Staff answer could not be interpreted as a number."
):
self.assert_grade(
broken_problem,
self._make_answer_dict(
[(True, ["1"]), (True, ["1"])]
),
"correct"
)
def test_radio_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
radiotextgroup.
"""
for name, inputs in self.TEST_INPUTS.iteritems():
# Turn submission into the form expected when grading this problem.
submission = self._make_answer_dict(inputs)
# Lookup the problem_name, and the whether this test problem
# and inputs should be graded as correct or incorrect.
problem_name, correctness = self.TEST_SCENARIOS[name]
# Load the args needed to build the problem for this test.
problem_args = self.TEST_PROBLEM_ARGS[problem_name]
test_choices = problem_args["choices"]
test_script = problem_args["script"]
# Build the actual problem for the test.
test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script)
# Make sure the actual grade matches the expected grade.
self.assert_grade(
test_problem,
submission,
correctness,
msg="{0} should be {1}".format(
name,
correctness
)
)
def test_checkbox_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
checkboxtextgroup.
"""
# Dictionary from name of test_scenario to (problem_name, correctness)
# Correctness is used to test whether the problem was graded properly
scenarios = {
"2_choices_correct": ("checkbox_two_choices", "correct"),
"2_choices_incorrect": ("checkbox_two_choices", "incorrect"),
"2_choices_2_inputs_correct": (
"checkbox_2_choices_2_inputs",
"correct"
),
"2_choices_2_inputs_missing_choice": (
"checkbox_2_choices_2_inputs",
"incorrect"
),
"2_choices_2_inputs_wrong_input": (
"checkbox_2_choices_2_inputs",
"incorrect"
)
}
# Dictionary scenario_name: test_inputs
inputs = {
"2_choices_correct": [(True, []), (True, [])],
"2_choices_incorrect": [(True, []), (False, [])],
"2_choices_2_inputs_correct": [(True, ["123"]), (True, ["456"])],
"2_choices_2_inputs_missing_choice": [
(True, ["123"]), (False, ["456"])
],
"2_choices_2_inputs_wrong_input": [
(True, ["123"]), (True, ["654"])
]
}
# Two choice zero input problem with both choices being correct.
checkbox_two_choices = self._make_problem(
[("true", {}), ("true", {})], "checkboxtextgroup"
)
# Two choice two input problem with both choices correct.
checkbox_two_choices_two_inputs = self._make_problem(
[("true", {"answer": "123", "tolerance": "0"}),
("true", {"answer": "456", "tolerance": "0"})
],
"checkboxtextgroup"
)
# Dictionary problem_name: problem
problems = {
"checkbox_two_choices": checkbox_two_choices,
"checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs
}
for name, inputs in inputs.iteritems():
submission = self._make_answer_dict(inputs)
# Load the test problem's name and desired correctness
problem_name, correctness = scenarios[name]
# Load the problem
problem = problems[problem_name]
# Make sure the actual grade matches the expected grade
self.assert_grade(
problem,
submission,
correctness,
msg="{0} should be {1}".format(name, correctness)
)
......@@ -776,6 +776,13 @@ class CapaModule(CapaFields, XModule):
then the output dict would contain {'1': ['test'] }
(the value is a list).
Some other inputs such as ChoiceTextInput expect a dict of values in the returned
dict If the key ends with '{}' then we will assume that the value is a json
encoded dict and deserialize it.
For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'}
then the output dict would contain {'1': {"1_2_1": 1} }
(the value is a dictionary)
Raises an exception if:
-A key in the `data` dictionary does not contain at least one underscore
......@@ -802,11 +809,22 @@ class CapaModule(CapaFields, XModule):
# the same form input (e.g. checkbox inputs). The convention is that
# if the name ends with '[]' (which looks like an array), then the
# answer will be an array.
# if the name ends with '{}' (Which looks like a dict),
# then the answer will be a dict
is_list_key = name.endswith('[]')
name = name[:-2] if is_list_key else name
is_dict_key = name.endswith('{}')
name = name[:-2] if is_list_key or is_dict_key else name
if is_list_key:
val = data.getlist(key)
elif is_dict_key:
try:
val = json.loads(data[key])
# If the submission wasn't deserializable, raise an error.
except(KeyError, ValueError):
raise ValueError(
u"Invalid submission: {val} for {key}".format(val=data[key], key=key)
)
else:
val = data[key]
......
......@@ -929,4 +929,32 @@ section.problem {
}
}
}
.choicetextgroup{
input[type="text"]{
margin-bottom: 0.5em;
}
@extend .choicegroup;
label.choicetextgroup_correct, section.choicetextgroup_correct{
@extend label.choicegroup_correct;
input[type="text"] {
border-color: green;
}
}
label.choicetextgroup_incorrect, section.choicetextgroup_incorrect{
@extend label.choicegroup_incorrect;
}
label.choicetextgroup_show_correct, section.choicetextgroup_show_correct{
&:after{
content: url('../images/correct-icon.png');
margin-left:15px;
}
}
span.mock_label{
cursor : default;
}
}
}
......@@ -223,6 +223,58 @@ describe 'Problem', ->
expect($('label[for="input_1_1_3"]')).toHaveAttr 'correct_answer', 'true'
expect($('label[for="input_1_2_1"]')).not.toHaveAttr 'correct_answer', 'true'
describe 'radio text question', ->
radio_text_xml='''
<section class="problem">
<div><p></p><span><section id="choicetextinput_1_2_1" class="choicetextinput">
<form class="choicetextgroup capa_inputtype" id="inputtype_1_2_1">
<div class="indicator_container">
<span class="unanswered" style="display:inline-block;" id="status_1_2_1"></span>
</div>
<fieldset>
<section id="forinput1_2_1_choiceinput_0bc">
<input class="ctinput" type="radio" name="choiceinput_1_2_1" id="1_2_1_choiceinput_0bc" value="choiceinput_0"">
<input class="ctinput" type="text" name="choiceinput_0_textinput_0" id="1_2_1_choiceinput_0_textinput_0" value=" ">
<p id="answer_1_2_1_choiceinput_0bc" class="answer"></p>
</>
<section id="forinput1_2_1_choiceinput_1bc">
<input class="ctinput" type="radio" name="choiceinput_1_2_1" id="1_2_1_choiceinput_1bc" value="choiceinput_1" >
<input class="ctinput" type="text" name="choiceinput_1_textinput_0" id="1_2_1_choiceinput_1_textinput_0" value=" " >
<p id="answer_1_2_1_choiceinput_1bc" class="answer"></p>
</section>
<section id="forinput1_2_1_choiceinput_2bc">
<input class="ctinput" type="radio" name="choiceinput_1_2_1" id="1_2_1_choiceinput_2bc" value="choiceinput_2" >
<input class="ctinput" type="text" name="choiceinput_2_textinput_0" id="1_2_1_choiceinput_2_textinput_0" value=" " >
<p id="answer_1_2_1_choiceinput_2bc" class="answer"></p>
</section></fieldset><input class="choicetextvalue" type="hidden" name="input_1_2_1" id="input_1_2_1"></form>
</section></span></div>
</section>
'''
beforeEach ->
# Append a radiotextresponse problem to the problem, so we can check it's javascript functionality
@problem.el.prepend(radio_text_xml)
it 'sets the correct class on the section for the correct choice', ->
spyOn($, 'postWithPrefix').andCallFake (url, callback) ->
callback answers: "1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"
@problem.show()
expect($('#forinput1_2_1_choiceinput_0bc').attr('class')).toEqual(
'choicetextgroup_show_correct')
expect($('#answer_1_2_1_choiceinput_0bc').text()).toEqual('3')
expect($('#answer_1_2_1_choiceinput_1bc').text()).toEqual('')
expect($('#answer_1_2_1_choiceinput_2bc').text()).toEqual('')
it 'Should not disable input fields', ->
spyOn($, 'postWithPrefix').andCallFake (url, callback) ->
callback answers: "1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"
@problem.show()
expect($('input#1_2_1_choiceinput_0bc').attr('disabled')).not.toEqual('disabled')
expect($('input#1_2_1_choiceinput_1bc').attr('disabled')).not.toEqual('disabled')
expect($('input#1_2_1_choiceinput_2bc').attr('disabled')).not.toEqual('disabled')
expect($('input#1_2_1').attr('disabled')).not.toEqual('disabled')
describe 'when the answers are already shown', ->
beforeEach ->
@problem.el.addClass 'showed'
......
......@@ -403,6 +403,14 @@ class @Problem
answer = JSON.parse(answers[answer_id])
display.showAnswer(answer)
choicetextgroup: (element, display, answers) =>
element = $(element)
input_id = element.attr('id').replace(/inputtype_/,'')
answer = answers[input_id]
for choice in answer
element.find("section#forinput#{choice}").addClass 'choicetextgroup_show_correct'
inputtypeHideAnswerMethods:
choicegroup: (element, display) =>
element = $(element)
......@@ -410,3 +418,7 @@ class @Problem
javascriptinput: (element, display) =>
display.hideAnswer()
choicetextgroup: (element, display) =>
element = $(element)
element.find("section[id^='forinput']").removeClass('choicetextgroup_show_correct')
(function () {
var update = function () {
// Whenever a value changes create a new serialized version of this
// problem's inputs and set the hidden input fields value to equal it.
var parent = $(this).closest('.problems-wrapper');
// find the closest parent problems-wrapper and use that as the problem
// grab the input id from the input
// real_input is the hidden input field
var real_input = $('input.choicetextvalue', parent);
var all_inputs = $('.choicetextinput .ctinput', parent);
var user_inputs = {};
$(all_inputs).each(function (index, elt) {
var node = $(elt);
var name = node.attr('id');
var val = node.val();
var radio_value = node.attr('value');
var type = node.attr('type');
var is_checked = node.attr('checked');
if (type === "radio" || type === "checkbox") {
if (is_checked === "checked" || is_checked === "true") {
user_inputs[name] = radio_value;
}
} else {
user_inputs[name] = val;
}
});
var val_string = JSON.stringify(user_inputs);
//this is what gets submitted as the answer, we deserialize it later
real_input.val(val_string);
};
var check_parent = function (event) {
// This looks for the containing choice of a textinput
// and sets it to be checked.
var elt = $(event.target);
var parent_container = elt.closest('section[id^="forinput"]');
var choice = parent_container.find("input[type='checkbox'], input[type='radio']");
choice.attr("checked", "checked");
choice.change();
//need to check it then trigger the change event
};
var imitate_label = function (event) {
// This causes a section to check and uncheck
// a radiobutton/checkbox whenever a user clicks on it
// If the button/checkbox is disabled, nothing happens
var elt = $(event.target);
var parent_container = elt.closest('section[id^="forinput"]');
var choice = parent_container.find("input[type='checkbox'], input[type='radio']");
if (choice.attr("type") === "radio") {
choice.attr("checked", "checked");
} else {
if (choice.attr('checked')) {
choice.prop("checked", false);
} else {
choice.prop("checked", true);
}
}
choice.change();
update();
};
var choices = $('.mock_label');
var inputs = $('.choicetextinput .ctinput');
var text_inputs = $('.choicetextinput .ctinput[type="text"]');
// update on load
inputs.each(update);
// and on every change
// This allows text inside of choices to behave as if they were part of
// a label for the choice's button/checkbox
choices.click(imitate_label);
inputs.bind("change", update);
text_inputs.click(check_parent);
}).call(this);
......@@ -21,6 +21,8 @@ Feature: Answer problems
| formula |
| script |
| code |
| radio_text |
| checkbox_text |
Scenario: I can answer a problem incorrectly
Given External graders respond "incorrect"
......@@ -40,6 +42,8 @@ Feature: Answer problems
| formula |
| script |
| code |
| radio_text |
| checkbox_text |
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
......@@ -57,6 +61,8 @@ Feature: Answer problems
| numerical |
| formula |
| script |
| radio_text |
| checkbox_text |
Scenario: I can reset a problem
......@@ -84,6 +90,10 @@ Feature: Answer problems
| formula | incorrect |
| script | correct |
| script | incorrect |
| radio_text | correct |
| radio_text | incorrect |
| checkbox_text | correct |
| checkbox_text | incorrect |
Scenario: I can answer a problem with one attempt correctly and not reset
......
......@@ -18,7 +18,7 @@ from capa.tests.response_xml_factory import OptionResponseXMLFactory, \
ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \
StringResponseXMLFactory, NumericalResponseXMLFactory, \
FormulaResponseXMLFactory, CustomResponseXMLFactory, \
CodeResponseXMLFactory
CodeResponseXMLFactory, ChoiceTextResponseXMLFactory
from nose.tools import assert_true
......@@ -131,6 +131,32 @@ PROBLEM_DICT = {
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', },
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'radio_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['span.incorrect', 'section.choicetextgroup_incorrect'],
'unanswered': ['span.unanswered']},
'checkbox_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']}
}
......@@ -196,6 +222,19 @@ def answer_problem(problem_type, correctness):
# (configured in the problem XML above)
pass
elif problem_type == 'radio_text' or problem_type == 'checkbox_text':
input_value = "8" if correctness == 'correct' else "5"
choice = "choiceinput_0bc" if correctness == 'correct' else "choiceinput_1bc"
world.css_check(inputfield(problem_type, choice=choice))
world.css_fill(
inputfield(
problem_type,
choice="choiceinput_0_numtolerance_input_0"
),
input_value
)
def problem_has_answer(problem_type, answer_class):
if problem_type == "drop down":
......@@ -244,6 +283,17 @@ def problem_has_answer(problem_type, answer_class):
expected = "x^2+2*x+y" if answer_class == 'correct' else 'x^2'
assert_textfield('formula', expected)
elif problem_type in ("radio_text", "checkbox_text"):
if answer_class == 'blank':
expected = ('', '')
assert_choicetext_values(problem_type, (), expected)
elif answer_class == 'incorrect':
expected = ('5', '')
assert_choicetext_values(problem_type, ["choiceinput_1bc"], expected)
else:
expected = ('8', '')
assert_choicetext_values(problem_type, ["choiceinput_0bc"], expected)
else:
# The other response types use random data,
# which would be difficult to check
......@@ -292,6 +342,12 @@ def inputfield(problem_type, choice=None, input_num=1):
sel = ("input#input_i4x-edx-model_course-problem-%s_2_%s" %
(problem_type.replace(" ", "_"), str(input_num)))
# this is necessary due to naming requirement for this problem type
if problem_type in ("radio_text", "checkbox_text"):
sel = "input#i4x-edx-model_course-problem-{0}_2_{1}".format(
problem_type.replace(" ", "_"), str(input_num)
)
if choice is not None:
base = "_choice_" if problem_type == "multiple choice" else "_"
sel = sel + base + str(choice)
......@@ -325,3 +381,29 @@ def assert_checked(problem_type, choices):
def assert_textfield(problem_type, expected_text, input_num=1):
element_value = world.css_value(inputfield(problem_type, input_num=input_num))
assert element_value == expected_text
def assert_choicetext_values(problem_type, choices, expected_values):
"""
Asserts that only the given choices are checked, and given
text fields have a desired value
"""
# Names of the radio buttons or checkboxes
all_choices = ['choiceinput_0bc', 'choiceinput_1bc']
# Names of the numtolerance_inputs
all_inputs = [
"choiceinput_0_numtolerance_input_0",
"choiceinput_1_numtolerance_input_0"
]
for this_choice in all_choices:
element = world.css_find(inputfield(problem_type, choice=this_choice))
if this_choice in choices:
assert element.checked
else:
assert not element.checked
for (name, expected) in zip(all_inputs, expected_values):
element = world.css_find(inputfield(problem_type, name))
# Remove any trailing spaces that may have been added
assert element.value.strip() == expected
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment