Commit d6715749 by Felix Sun

Addressed pull request comments.

Fixed broken tests.
parent e6db6e9e
...@@ -917,7 +917,7 @@ class NumericalResponse(LoncapaResponse): ...@@ -917,7 +917,7 @@ class NumericalResponse(LoncapaResponse):
# TODO: add check_hint_condition(self, hxml_set, student_answers) # TODO: add check_hint_condition(self, hxml_set, student_answers)
def answer_compare(self, a, b): def compare_answer(self, a, b):
""" """
Outside-facing function that lets us compare two numerical answers, Outside-facing function that lets us compare two numerical answers,
with this problem's tolerance. with this problem's tolerance.
...@@ -1870,6 +1870,11 @@ class FormulaResponse(LoncapaResponse): ...@@ -1870,6 +1870,11 @@ class FormulaResponse(LoncapaResponse):
return out return out
def check_formula(self, expected, given, samples): def check_formula(self, expected, given, samples):
"""
Given an expected answer string, a given (student-produced) answer
string, and a samples string, return whether the given answer is
"correct" or "incorrect".
"""
var_dict_list = self.randomize_variables(samples) var_dict_list = self.randomize_variables(samples)
student_result = self.hash_answers(given, var_dict_list) student_result = self.hash_answers(given, var_dict_list)
instructor_result = self.hash_answers(expected, var_dict_list) instructor_result = self.hash_answers(expected, var_dict_list)
...@@ -1879,7 +1884,7 @@ class FormulaResponse(LoncapaResponse): ...@@ -1879,7 +1884,7 @@ class FormulaResponse(LoncapaResponse):
return "incorrect" return "incorrect"
return "correct" return "correct"
def answer_compare(self, a, b): def compare_answer(self, a, b):
""" """
An external interface for comparing whether a and b are equal. An external interface for comparing whether a and b are equal.
""" """
......
...@@ -929,12 +929,12 @@ class NumericalResponseTest(ResponseTest): ...@@ -929,12 +929,12 @@ class NumericalResponseTest(ResponseTest):
with self.assertRaisesRegexp(StudentInputError, msg_regex): with self.assertRaisesRegexp(StudentInputError, msg_regex):
problem.grade_answers({'1_2_1': 'foobar'}) problem.grade_answers({'1_2_1': 'foobar'})
def test_answer_compare(self): def test_compare_answer(self):
"""Tests the answer compare function.""" """Tests the answer compare function."""
problem = self.build_problem(answer="42") problem = self.build_problem(answer="42")
responder = problem.responders.values()[0] responder = problem.responders.values()[0]
self.assertTrue(responder.answer_compare('48', '8*6')) self.assertTrue(responder.compare_answer('48', '8*6'))
self.assertFalse(responder.answer_compare('48', '9*5')) self.assertFalse(responder.compare_answer('48', '9*5'))
def test_validate_answer(self): def test_validate_answer(self):
"""Tests the answer validation function.""" """Tests the answer validation function."""
......
...@@ -18,8 +18,6 @@ from xblock.core import Scope, String, Integer, Boolean, Dict, List ...@@ -18,8 +18,6 @@ from xblock.core import Scope, String, Integer, Boolean, Dict, List
from capa.responsetypes import FormulaResponse from capa.responsetypes import FormulaResponse
from calc import UndefinedVariable
from django.utils.html import escape from django.utils.html import escape
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -84,10 +82,10 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule): ...@@ -84,10 +82,10 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
self.answer_to_str = self.formula_answer_to_str self.answer_to_str = self.formula_answer_to_str
else: else:
self.answer_to_str = self.numerical_answer_to_str self.answer_to_str = self.numerical_answer_to_str
# answer_compare is expected to return whether its two inputs are close enough # compare_answer is expected to return whether its two inputs are close enough
# to be equal, or raise a StudentInputError if one of the inputs is malformatted. # to be equal, or raise a StudentInputError if one of the inputs is malformatted.
try: try:
self.answer_compare = responder.answer_compare self.compare_answer = responder.compare_answer
self.validate_answer = responder.validate_answer self.validate_answer = responder.validate_answer
except AttributeError: except AttributeError:
# This response type is not supported! # This response type is not supported!
...@@ -144,7 +142,7 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule): ...@@ -144,7 +142,7 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
Look in self.hints, and find all answer keys that are "equal with tolerance" Look in self.hints, and find all answer keys that are "equal with tolerance"
to the input answer. to the input answer.
""" """
return [key for key in self.hints if self.answer_compare(key, answer)] return [key for key in self.hints if self.compare_answer(key, answer)]
def handle_ajax(self, dispatch, data): def handle_ajax(self, dispatch, data):
""" """
...@@ -182,6 +180,7 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule): ...@@ -182,6 +180,7 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
- 'rand_hint_1' and 'rand_hint_2' are two random hints to the answer in `data`. - 'rand_hint_1' and 'rand_hint_2' are two random hints to the answer in `data`.
- 'answer' is the parsed answer that was submitted. - 'answer' is the parsed answer that was submitted.
""" """
# First, validate our inputs.
try: try:
answer = self.answer_to_str(data) answer = self.answer_to_str(data)
except (ValueError, AttributeError): except (ValueError, AttributeError):
...@@ -194,7 +193,8 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule): ...@@ -194,7 +193,8 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
return return
if answer not in self.user_submissions: if answer not in self.user_submissions:
self.user_submissions += [answer] self.user_submissions += [answer]
# Look for a hint to give.
# Next, find all of the hints that could possibly go with this answer.
# Make a local copy of self.hints - this means we only need to do one json unpacking. # Make a local copy of self.hints - this means we only need to do one json unpacking.
# (This is because xblocks storage makes the following command a deep copy.) # (This is because xblocks storage makes the following command a deep copy.)
local_hints = self.hints local_hints = self.hints
...@@ -209,6 +209,8 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule): ...@@ -209,6 +209,8 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
temp_dict[key] = value + [matching_answer] temp_dict[key] = value + [matching_answer]
matching_hints.update(local_hints[matching_answer]) matching_hints.update(local_hints[matching_answer])
# matching_hints now maps pk's to lists of [hint, votes, matching_answer] # matching_hints now maps pk's to lists of [hint, votes, matching_answer]
# Finally, randomly choose a subset of matching_hints to actually show.
if len(matching_hints) == 0: if len(matching_hints) == 0:
# No hints to give. Return. # No hints to give. Return.
return return
......
...@@ -33,7 +33,6 @@ class @Hinter ...@@ -33,7 +33,6 @@ class @Hinter
return string.replace(/[!"#$%&'()*+,.\/:;<=>?@\[\\\]^`{|}~]/g, '\\$&') return string.replace(/[!"#$%&'()*+,.\/:;<=>?@\[\\\]^`{|}~]/g, '\\$&')
bind: => bind: =>
window.update_schematics()
@$('input.vote').click @vote @$('input.vote').click @vote
@$('input.submit-hint').click @submit_hint @$('input.submit-hint').click @submit_hint
@$('.custom-hint').click @clear_default_text @$('.custom-hint').click @clear_default_text
......
...@@ -120,10 +120,10 @@ class CHModuleFactory(object): ...@@ -120,10 +120,10 @@ class CHModuleFactory(object):
return False return False
responder.validate_answer = validate_answer responder.validate_answer = validate_answer
def answer_compare(a, b): def compare_answer(a, b):
""" A fake answer comparer """ """ A fake answer comparer """
return a == b return a == b
responder.answer_compare = answer_compare responder.compare_answer = compare_answer
capa_module.lcp.responders = {'responder0': responder} capa_module.lcp.responders = {'responder0': responder}
capa_module.displayable_items = lambda: [capa_module] capa_module.displayable_items = lambda: [capa_module]
...@@ -359,9 +359,10 @@ class CrowdsourceHinterTest(unittest.TestCase): ...@@ -359,9 +359,10 @@ class CrowdsourceHinterTest(unittest.TestCase):
Someone has gotten the problem correct on the first try. Someone has gotten the problem correct on the first try.
Output should be empty. Output should be empty.
""" """
mock_module = CHModuleFactory.create(previous_answers=[]) mock_module = CHModuleFactory.create(previous_answers=[], user_submissions=[])
json_in = {'problem_name': '42.5'} json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in) out = mock_module.get_feedback(json_in)
print out
self.assertTrue(out is None) self.assertTrue(out is None)
def test_getfeedback_1wronganswer_nohints(self): def test_getfeedback_1wronganswer_nohints(self):
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
<%def name="get_feedback()"> <%def name="get_feedback()">
<% <%
def unspace(string): def unspace(in_str):
""" """
HTML id's can't have spaces in them. This little function HTML id's can't have spaces in them. This little function
removes spaces. removes spaces.
""" """
return ''.join(string.split()) return ''.join(in_str.split())
# Make a list of all hints shown. (This is fed back to the site as pk_list.) # Make a list of all hints shown. (This is fed back to the site as pk_list.)
# At the same time, determine whether any hints were shown at all. # At the same time, determine whether any hints were shown at all.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment