Commit 8b5473b6 by Will Daly

Wrote unit tests for NumericalResponse capa response type

parent ff35d3e6
......@@ -1048,19 +1048,24 @@ def sympy_check2():
correct = ['correct'] * len(idset) if ret['ok'] else ['incorrect'] * len(idset)
msg = ret['msg']
if 1:
def _cleanup_msg_html(msg_html):
cleaned = msg_html
# try to clean up message html
msg = '<html>' + msg + '</html>'
msg = msg.replace('&#60;', '&lt;')
#msg = msg.replace('&lt;','<')
msg = etree.tostring(fromstring_bs(msg, convertEntities=None),
cleaned = '<html>' + cleaned + '</html>'
cleaned = cleaned.replace('&#60;', '&lt;')
cleaned = etree.tostring(fromstring_bs(cleaned, convertEntities=None),
pretty_print=True)
#msg = etree.tostring(fromstring_bs(msg),pretty_print=True)
msg = msg.replace('&#13;', '')
#msg = re.sub('<html>(.*)</html>','\\1',msg,flags=re.M|re.DOTALL) # python 2.7
msg = re.sub('(?ms)<html>(.*)</html>', '\\1', msg)
cleaned = cleaned.replace('&#13;', '')
cleaned = re.sub('(?ms)<html>(.*)</html>', '\\1', cleaned)
return cleaned
messages[0] = msg
if type(msg) == str:
messages[0] = _cleanup_msg_html(msg)
elif type(msg) == list:
for i in range(0, len(msg)):
messages[i] = _cleanup_msg_html(msg[i])
else:
correct = ['correct'] * len(idset) if ret else ['incorrect'] * len(idset)
......
......@@ -426,3 +426,70 @@ class JavascriptResponseTest(unittest.TestCase):
self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect')
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
from response_xml_factory import NumericalResponseXMLFactory
class NumericalResponseTest(unittest.TestCase):
def setUp(self):
self.xml_factory = NumericalResponseXMLFactory()
def test_grade_exact(self):
xml = self.xml_factory.build_xml(question_text="What is 2 + 2?",
explanation="The answer is 4",
answer=4)
correct_responses = ["4", "4.0", "4.00"]
incorrect_responses = ["", "3.9", "4.1", "0"]
self._test_grading(xml, correct_responses, incorrect_responses)
def test_grade_decimal_tolerance(self):
xml = self.xml_factory.build_xml(question_text="What is 2 + 2 approximately?",
explanation="The answer is 4",
answer=4,
tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
incorrect_responses = ["", "4.11", "3.89", "0"]
self._test_grading(xml, correct_responses, incorrect_responses)
def test_grade_percent_tolerance(self):
xml = self.xml_factory.build_xml(question_text="What is 2 + 2 approximately?",
explanation="The answer is 4",
answer=4,
tolerance="10%")
correct_responses = ["4.0", "4.3", "3.7", "4.30", "3.70"]
incorrect_responses = ["", "4.5", "3.5", "0"]
self._test_grading(xml, correct_responses, incorrect_responses)
def test_grade_with_script(self):
script_text = "computed_response = math.sqrt(4)"
xml = self.xml_factory.build_xml(question_text="What is sqrt(4)?",
explanation="The answer is 2",
answer="$computed_response",
script=script_text)
correct_responses = ["2", "2.0"]
incorrect_responses = ["", "2.01", "1.99", "0"]
self._test_grading(xml, correct_responses, incorrect_responses)
def test_grade_with_script_and_tolerance(self):
script_text = "computed_response = math.sqrt(4)"
xml = self.xml_factory.build_xml(question_text="What is sqrt(4)?",
explanation="The answer is 2",
answer="$computed_response",
tolerance="0.1",
script=script_text)
correct_responses = ["2", "2.0", "2.05", "1.95"]
incorrect_responses = ["", "2.11", "1.89", "0"]
self._test_grading(xml, correct_responses, incorrect_responses)
def _test_grading(self, xml, correct_answers, incorrect_answers):
problem = lcp.LoncapaProblem(xml, '1', system=test_system)
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'correct')
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment