Commit e6dfa8e8 by cahrens

Merge branch 'master' into feature/christina/metadata-ui

parents 6c06599d 46967a78
......@@ -61,6 +61,8 @@
<div class="wrapper wrapper-view">
<%include file="widgets/header.html" />
## remove this block after advanced settings notification is rewritten
<%block name="view_alerts"></%block>
<div id="page-alert"></div>
<%block name="content"></%block>
......@@ -72,9 +74,13 @@
<%include file="widgets/footer.html" />
<%include file="widgets/tender.html" />
## remove this block after advanced settings notification is rewritten
<%block name="view_notifications"></%block>
<div id="page-notification"></div>
</div>
## remove this block after advanced settings notification is rewritten
<%block name="view_prompts"></%block>
<div id="page-prompt"></div>
<%block name="jsextra"></%block>
</body>
......
# .coveragerc for common/lib/calc
[run]
data_file = reports/common/lib/calc/.coverage
source = common/lib/calc
branch = true
[report]
ignore_errors = True
[html]
title = Calc Python Test Coverage Report
directory = reports/common/lib/calc/cover
[xml]
output = reports/common/lib/calc/coverage.xml
......@@ -144,6 +144,8 @@ def evaluator(variables, functions, string, cs=False):
return x
def parallel(x): # Parallel resistors [ 1 2 ] => 2/3
# convert from pyparsing.ParseResults, which doesn't support '0 in x'
x = list(x)
if len(x) == 1:
return x[0]
if 0 in x:
......@@ -230,27 +232,3 @@ def evaluator(variables, functions, string, cs=False):
expr << Optional((plus | minus)) + term + ZeroOrMore((plus | minus) + term) # -5 + 4 - 3
expr = expr.setParseAction(sum_parse_action)
return (expr + stringEnd).parseString(string)[0]
if __name__ == '__main__':
variables = {'R1': 2.0, 'R3': 4.0}
functions = {'sin': numpy.sin, 'cos': numpy.cos}
print "X", evaluator(variables, functions, "10000||sin(7+5)-6k")
print "X", evaluator(variables, functions, "13")
print evaluator({'R1': 2.0, 'R3': 4.0}, {}, "13")
print evaluator({'e1': 1, 'e2': 1.0, 'R3': 7, 'V0': 5, 'R5': 15, 'I1': 1, 'R4': 6}, {}, "e2")
print evaluator({'a': 2.2997471478310274, 'k': 9, 'm': 8, 'x': 0.66009498411213041}, {}, "5")
print evaluator({}, {}, "-1")
print evaluator({}, {}, "-(7+5)")
print evaluator({}, {}, "-0.33")
print evaluator({}, {}, "-.33")
print evaluator({}, {}, "5+1*j")
print evaluator({}, {}, "j||1")
print evaluator({}, {}, "e^(j*pi)")
print evaluator({}, {}, "fact(5)")
print evaluator({}, {}, "factorial(5)")
try:
print evaluator({}, {}, "5+7 QWSEKO")
except UndefinedVariable:
print "Successfully caught undefined variable"
......@@ -10,7 +10,6 @@ import random
import unittest
import textwrap
import mock
import textwrap
from . import new_loncapa_problem, test_system
......@@ -223,7 +222,6 @@ class SymbolicResponseTest(ResponseTest):
for (input_str, input_mathml) in incorrect_inputs:
self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
def test_complex_number_grade(self):
problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
......@@ -323,7 +321,7 @@ class SymbolicResponseTest(ResponseTest):
dynamath_input,
expected_correctness):
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input) }
'1_2_1_dynamath': str(dynamath_input)}
correct_map = problem.grade_answers(input_dict)
......@@ -349,10 +347,18 @@ class OptionResponseTest(ResponseTest):
class FormulaResponseTest(ResponseTest):
"""
Test the FormulaResponse class
"""
from response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory
def test_grade(self):
"""
Test basic functionality of FormulaResponse
Specifically, if it can understand equivalence of formulae
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
......@@ -373,6 +379,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_hint(self):
"""
Test the hint-giving functionality of FormulaResponse
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
......@@ -401,6 +410,10 @@ class FormulaResponseTest(ResponseTest):
'Try including the variable x')
def test_script(self):
"""
Test if python script can be used to generate answers
"""
# Calculate the answer using a script
script = "calculated_ans = 'x+x'"
......@@ -419,7 +432,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, '3*x', 'incorrect')
def test_parallel_resistors(self):
"""Test parallel resistors"""
"""
Test parallel resistors
"""
sample_dict = {'R1': (10, 10), 'R2': (2, 2), 'R3': (5, 5), 'R4': (1, 1)}
# Test problem
......@@ -440,8 +455,11 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_default_variables(self):
"""Test the default variables provided in common/lib/capa/capa/calc.py"""
# which are: j (complex number), e, pi, k, c, T, q
"""
Test the default variables provided in calc.py
which are: j (complex number), e, pi, k, c, T, q
"""
# Sample x in the range [-10,10]
sample_dict = {'x': (-10, 10)}
......@@ -464,11 +482,14 @@ class FormulaResponseTest(ResponseTest):
msg="Failed on variable {0}; the given, incorrect answer was {1} but graded 'correct'".format(var, incorrect))
def test_default_functions(self):
"""Test the default functions provided in common/lib/capa/capa/calc.py"""
# which are: sin, cos, tan, sqrt, log10, log2, ln,
# arccos, arcsin, arctan, abs,
# fact, factorial
"""
Test the default functions provided in common/lib/capa/capa/calc.py
which are:
sin, cos, tan, sqrt, log10, log2, ln,
arccos, arcsin, arctan, abs,
fact, factorial
"""
w = random.randint(3, 10)
sample_dict = {'x': (-10, 10), # Sample x in the range [-10,10]
'y': (1, 10), # Sample y in the range [1,10] - logs, arccos need positive inputs
......@@ -496,8 +517,10 @@ class FormulaResponseTest(ResponseTest):
msg="Failed on function {0}; the given, incorrect answer was {1} but graded 'correct'".format(func, incorrect))
def test_grade_infinity(self):
# This resolves a bug where a problem with relative tolerance would
# pass with any arbitrarily large student answer.
"""
Test that a large input on a problem with relative tolerance isn't
erroneously marked as correct.
"""
sample_dict = {'x': (1, 2)}
......@@ -514,8 +537,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_grade_nan(self):
# Attempt to produce a value which causes the student's answer to be
# evaluated to nan. See if this is resolved correctly.
"""
Test that expressions that evaluate to NaN are not marked as correct.
"""
sample_dict = {'x': (1, 2)}
......@@ -532,6 +556,18 @@ class FormulaResponseTest(ResponseTest):
input_formula = "x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_raises_zero_division_err(self):
"""
See if division by zero raises an error.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory
......@@ -592,7 +628,7 @@ class StringResponseTest(ResponseTest):
problem = self.build_problem(
answer="Michigan",
hintfn="gimme_a_hint",
script = textwrap.dedent("""
script=textwrap.dedent("""
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
answer = student_answers[aid]
......@@ -898,6 +934,14 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_raises_zero_division_err(self):
"""See if division by zero is handled correctly"""
problem = self.build_problem(question_text="What 5 * 10?",
explanation="The answer is 50",
answer="5e+1") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory
......
This source diff could not be displayed because it is too large. You can view the blob instead.
34d1996e44f78168a73297217b3a0973c2ae90e1
\ No newline at end of file
......@@ -115,6 +115,11 @@ xmodule can be tested independently, with this:
rake test_common/lib/xmodule
other module level tests include
* `rake test_common/lib/capa`
* `rake test_common/lib/calc`
To run a single django test class:
rake test_lms[courseware.tests.tests:testViewAuth]
......
b154ce99fb5c8d413ba769e8cc0df94ed674c3f4
\ No newline at end of file
2b8c58b098bdb17f9ddcbb2098f94c50fdcedf60
\ No newline at end of file
7d8b9879f7e5b859910edba7249661eedd3fcf37
\ No newline at end of file
caf8b43337faa75cef5da5cd090010215a67b1bd
\ No newline at end of file
b4d043bb1ca4a8815d4a388a2c9d96038211417b
\ No newline at end of file
6718f0c6e851376b5478baff94e1f1f4449bd938
\ No newline at end of file
......@@ -10,6 +10,9 @@ end
# the ENV_TOKENS to the templating context.
def preprocess_with_mako(filename)
# simple command-line invocation of Mako engine
# cdodge: the .gsub() are used to translate true->True and false->False to make the generated
# python actually valid python. This is just a short term hack to unblock the release train
# until a real fix can be made by people who know this better
mako = "from mako.template import Template;" +
"print Template(filename=\"#{filename}\")" +
# Total hack. It works because a Python dict literal has
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment