Commit 85ebdff7 by Diana Huang

Merge pull request #1905 from MITx/fix/sarina/new_formularesponse_defaults

Fix/sarina/new formularesponse defaults
parents cbf9acfe f95331dc
......@@ -24,7 +24,9 @@ default_functions = {'sin': numpy.sin,
'arccos': numpy.arccos,
'arcsin': numpy.arcsin,
'arctan': numpy.arctan,
'abs': numpy.abs
'abs': numpy.abs,
'fact': math.factorial,
'factorial': math.factorial
}
default_variables = {'j': numpy.complex(0, 1),
'e': numpy.e,
......@@ -112,18 +114,18 @@ def evaluator(variables, functions, string, cs=False):
return float('nan')
ops = {"^": operator.pow,
"*": operator.mul,
"/": operator.truediv,
"+": operator.add,
"-": operator.sub,
}
"*": operator.mul,
"/": operator.truediv,
"+": operator.add,
"-": operator.sub,
}
# We eliminated extreme ones, since they're rarely used, and potentially
# confusing. They may also conflict with variables if we ever allow e.g.
# 5R instead of 5*R
suffixes = {'%': 0.01, 'k': 1e3, 'M': 1e6, 'G': 1e9,
'T': 1e12, # 'P':1e15,'E':1e18,'Z':1e21,'Y':1e24,
'c': 1e-2, 'm': 1e-3, 'u': 1e-6,
'n': 1e-9, 'p': 1e-12} # ,'f':1e-15,'a':1e-18,'z':1e-21,'y':1e-24}
'T': 1e12, # 'P':1e15,'E':1e18,'Z':1e21,'Y':1e24,
'c': 1e-2, 'm': 1e-3, 'u': 1e-6,
'n': 1e-9, 'p': 1e-12} # ,'f':1e-15,'a':1e-18,'z':1e-21,'y':1e-24}
def super_float(text):
''' Like float, but with si extensions. 1k goes to 1000'''
......@@ -246,4 +248,9 @@ if __name__ == '__main__':
print evaluator({}, {}, "5+1*j")
print evaluator({}, {}, "j||1")
print evaluator({}, {}, "e^(j*pi)")
print evaluator({}, {}, "5+7 QWSEKO")
print evaluator({}, {}, "fact(5)")
print evaluator({}, {}, "factorial(5)")
try:
print evaluator({}, {}, "5+7 QWSEKO")
except UndefinedVariable:
print "Successfully caught undefined variable"
......@@ -7,6 +7,7 @@ from datetime import datetime
import json
from nose.plugins.skip import SkipTest
import os
import random
import unittest
import textwrap
......@@ -14,7 +15,7 @@ from . import test_system
import capa.capa_problem as lcp
from capa.responsetypes import LoncapaProblemError, \
StudentInputError, ResponseError
StudentInputError, ResponseError
from capa.correctmap import CorrectMap
from capa.util import convert_files_to_filenames
from capa.xqueue_interface import dateformat
......@@ -33,10 +34,13 @@ class ResponseTest(unittest.TestCase):
xml = self.xml_factory.build_xml(**kwargs)
return lcp.LoncapaProblem(xml, '1', system=test_system)
def assert_grade(self, problem, submission, expected_correctness):
def assert_grade(self, problem, submission, expected_correctness, msg=None):
input_dict = {'1_2_1': submission}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness)
if msg is None:
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness)
else:
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness, msg)
def assert_answer_format(self, problem):
answers = problem.get_question_answers()
......@@ -357,6 +361,83 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, '2*x', 'correct')
self.assert_grade(problem, '3*x', 'incorrect')
def test_parallel_resistors(self):
"""Test parallel resistors"""
sample_dict = {'R1': (10, 10), 'R2': (2, 2), 'R3': (5, 5), 'R4': (1, 1)}
# Test problem
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="R1||R2")
# Expect answer to be marked correct
input_formula = "R1||R2"
self.assert_grade(problem, input_formula, "correct")
# Expect random number to be marked incorrect
input_formula = "13"
self.assert_grade(problem, input_formula, "incorrect")
# Expect incorrect answer marked incorrect
input_formula = "R3||R4"
self.assert_grade(problem, input_formula, "incorrect")
def test_default_variables(self):
"""Test the default variables provided in common/lib/capa/capa/calc.py"""
# which are: j (complex number), e, pi, k, c, T, q
# Sample x in the range [-10,10]
sample_dict = {'x': (-10, 10)}
default_variables = [('j', 2, 3), ('e', 2, 3), ('pi', 2, 3), ('c', 2, 3), ('T', 2, 3),
('k', 2 * 10 ** 23, 3 * 10 ** 23), # note k = scipy.constants.k = 1.3806488e-23
('q', 2 * 10 ** 19, 3 * 10 ** 19)] # note k = scipy.constants.e = 1.602176565e-19
for (var, cscalar, iscalar) in default_variables:
# The expected solution is numerically equivalent to cscalar*var
correct = '{0}*x*{1}'.format(cscalar, var)
incorrect = '{0}*x*{1}'.format(iscalar, var)
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer=correct)
# Expect that the inputs are graded correctly
self.assert_grade(problem, correct, 'correct',
msg="Failed on variable {0}; the given, correct answer was {1} but graded 'incorrect'".format(var, correct))
self.assert_grade(problem, incorrect, 'incorrect',
msg="Failed on variable {0}; the given, incorrect answer was {1} but graded 'correct'".format(var, incorrect))
def test_default_functions(self):
"""Test the default functions provided in common/lib/capa/capa/calc.py"""
# which are: sin, cos, tan, sqrt, log10, log2, ln,
# arccos, arcsin, arctan, abs,
# fact, factorial
w = random.randint(3, 10)
sample_dict = {'x': (-10, 10), # Sample x in the range [-10,10]
'y': (1, 10), # Sample y in the range [1,10] - logs, arccos need positive inputs
'z': (-1, 1), # Sample z in the range [1,10] - for arcsin, arctan
'w': (w, w)} # Sample w is a random, positive integer - factorial needs a positive, integer input,
# and the way formularesponse is defined, we can only specify a float range
default_functions = [('sin', 2, 3, 'x'), ('cos', 2, 3, 'x'), ('tan', 2, 3, 'x'), ('sqrt', 2, 3, 'y'), ('log10', 2, 3, 'y'),
('log2', 2, 3, 'y'), ('ln', 2, 3, 'y'), ('arccos', 2, 3, 'z'), ('arcsin', 2, 3, 'z'), ('arctan', 2, 3, 'x'),
('abs', 2, 3, 'x'), ('fact', 2, 3, 'w'), ('factorial', 2, 3, 'w')]
for (func, cscalar, iscalar, var) in default_functions:
print 'func is: {0}'.format(func)
# The expected solution is numerically equivalent to cscalar*func(var)
correct = '{0}*{1}({2})'.format(cscalar, func, var)
incorrect = '{0}*{1}({2})'.format(iscalar, func, var)
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer=correct)
# Expect that the inputs are graded correctly
self.assert_grade(problem, correct, 'correct',
msg="Failed on function {0}; the given, correct answer was {1} but graded 'incorrect'".format(func, correct))
self.assert_grade(problem, incorrect, 'incorrect',
msg="Failed on function {0}; the given, incorrect answer was {1} but graded 'correct'".format(func, incorrect))
class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory
......@@ -904,14 +985,13 @@ class CustomResponseTest(ResponseTest):
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_module_imports_inline(self):
'''
Check that the correct modules are available to custom
response scripts
'''
for module_name in ['random', 'numpy', 'math', 'scipy',
for module_name in ['random', 'numpy', 'math', 'scipy',
'calc', 'eia', 'chemcalc', 'chemtools',
'miller', 'draganddrop']:
......@@ -921,26 +1001,25 @@ class CustomResponseTest(ResponseTest):
script = textwrap.dedent('''
correct[0] = 'correct'
assert('%s' in globals())''' % module_name)
# Create the problem
problem = self.build_problem(answer=script)
# Expect that we can grade an answer without
# Expect that we can grade an answer without
# getting an exception
try:
problem.grade_answers({'1_2_1': '42'})
except ResponseError:
self.fail("Could not use name '%s' in custom response"
% module_name)
self.fail("Could not use name '{0}s' in custom response".format(module_name))
def test_module_imports_function(self):
'''
Check that the correct modules are available to custom
response scripts
'''
for module_name in ['random', 'numpy', 'math', 'scipy',
for module_name in ['random', 'numpy', 'math', 'scipy',
'calc', 'eia', 'chemcalc', 'chemtools',
'miller', 'draganddrop']:
......@@ -951,18 +1030,17 @@ class CustomResponseTest(ResponseTest):
def check_func(expect, answer_given):
assert('%s' in globals())
return True''' % module_name)
# Create the problem
problem = self.build_problem(script=script, cfn="check_func")
# Expect that we can grade an answer without
# Expect that we can grade an answer without
# getting an exception
try:
problem.grade_answers({'1_2_1': '42'})
except ResponseError:
self.fail("Could not use name '%s' in custom response"
% module_name)
self.fail("Could not use name '{0}s' in custom response".format(module_name))
class SchematicResponseTest(ResponseTest):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment