Commit 8cc93cb5 by JonahStanley

Merge branch 'master' into jonahstanley/add-more-tests

parents 286fcece 46967a78
......@@ -61,6 +61,8 @@
<div class="wrapper wrapper-view">
<%include file="widgets/header.html" />
## remove this block after advanced settings notification is rewritten
<%block name="view_alerts"></%block>
<div id="page-alert"></div>
<%block name="content"></%block>
......@@ -72,9 +74,13 @@
<%include file="widgets/footer.html" />
<%include file="widgets/tender.html" />
## remove this block after advanced settings notification is rewritten
<%block name="view_notifications"></%block>
<div id="page-notification"></div>
</div>
## remove this block after advanced settings notification is rewritten
<%block name="view_prompts"></%block>
<div id="page-prompt"></div>
<%block name="jsextra"></%block>
</body>
......
The code in this directory is based on:
django-mako Copyright (c) 2008 Mikeal Rogers
and is redistributed here with modifications under the same Apache 2.0 license
as the orginal.
================================================================================
django-mako
================================================================================
......
......@@ -527,12 +527,12 @@ def _do_create_account(post_vars):
js = {'success': False}
# Figure out the cause of the integrity error
if len(User.objects.filter(username=post_vars['username'])) > 0:
js['value'] = "An account with this username already exists."
js['value'] = "An account with the Public Username '" + post_vars['username'] + "' already exists."
js['field'] = 'username'
return HttpResponse(json.dumps(js))
if len(User.objects.filter(email=post_vars['email'])) > 0:
js['value'] = "An account with this e-mail already exists."
js['value'] = "An account with the Email '" + post_vars['email'] + "' already exists."
js['field'] = 'email'
return HttpResponse(json.dumps(js))
......
# .coveragerc for common/lib/calc
[run]
data_file = reports/common/lib/calc/.coverage
source = common/lib/calc
branch = true
[report]
ignore_errors = True
[html]
title = Calc Python Test Coverage Report
directory = reports/common/lib/calc/cover
[xml]
output = reports/common/lib/calc/coverage.xml
......@@ -144,6 +144,8 @@ def evaluator(variables, functions, string, cs=False):
return x
def parallel(x): # Parallel resistors [ 1 2 ] => 2/3
# convert from pyparsing.ParseResults, which doesn't support '0 in x'
x = list(x)
if len(x) == 1:
return x[0]
if 0 in x:
......@@ -230,27 +232,3 @@ def evaluator(variables, functions, string, cs=False):
expr << Optional((plus | minus)) + term + ZeroOrMore((plus | minus) + term) # -5 + 4 - 3
expr = expr.setParseAction(sum_parse_action)
return (expr + stringEnd).parseString(string)[0]
if __name__ == '__main__':
variables = {'R1': 2.0, 'R3': 4.0}
functions = {'sin': numpy.sin, 'cos': numpy.cos}
print "X", evaluator(variables, functions, "10000||sin(7+5)-6k")
print "X", evaluator(variables, functions, "13")
print evaluator({'R1': 2.0, 'R3': 4.0}, {}, "13")
print evaluator({'e1': 1, 'e2': 1.0, 'R3': 7, 'V0': 5, 'R5': 15, 'I1': 1, 'R4': 6}, {}, "e2")
print evaluator({'a': 2.2997471478310274, 'k': 9, 'm': 8, 'x': 0.66009498411213041}, {}, "5")
print evaluator({}, {}, "-1")
print evaluator({}, {}, "-(7+5)")
print evaluator({}, {}, "-0.33")
print evaluator({}, {}, "-.33")
print evaluator({}, {}, "5+1*j")
print evaluator({}, {}, "j||1")
print evaluator({}, {}, "e^(j*pi)")
print evaluator({}, {}, "fact(5)")
print evaluator({}, {}, "factorial(5)")
try:
print evaluator({}, {}, "5+7 QWSEKO")
except UndefinedVariable:
print "Successfully caught undefined variable"
"""
Unit tests for calc.py
"""
import unittest
import numpy
import calc
class EvaluatorTest(unittest.TestCase):
"""
Run tests for calc.evaluator
Go through all functionalities as specifically as possible--
work from number input to functions and complex expressions
Also test custom variable substitutions (i.e.
`evaluator({'x':3.0},{}, '3*x')`
gives 9.0) and more.
"""
def test_number_input(self):
"""
Test different kinds of float inputs
"""
easy_eval = lambda x: calc.evaluator({}, {}, x)
self.assertEqual(easy_eval("13"), 13)
self.assertEqual(easy_eval("3.14"), 3.14)
self.assertEqual(easy_eval(".618033989"), 0.618033989)
self.assertEqual(easy_eval("-13"), -13)
self.assertEqual(easy_eval("-3.14"), -3.14)
self.assertEqual(easy_eval("-.618033989"), -0.618033989)
# See also test_exponential_answer and test_si_suffix
def test_exponential_answer(self):
"""
Test for correct interpretation of scientific notation
"""
answer = 50
correct_responses = ["50", "50.0", "5e1", "5e+1",
"50e0", "50.0e0", "500e-1"]
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
for input_str in correct_responses:
result = calc.evaluator({}, {}, input_str)
fail_msg = "Expected '{0}' to equal {1}".format(
input_str, answer)
self.assertEqual(answer, result, msg=fail_msg)
for input_str in incorrect_responses:
result = calc.evaluator({}, {}, input_str)
fail_msg = "Expected '{0}' to not equal {1}".format(
input_str, answer)
self.assertNotEqual(answer, result, msg=fail_msg)
def test_si_suffix(self):
"""
Test calc.py's unique functionality of interpreting si 'suffixes'.
For instance 'k' stand for 'kilo-' so '1k' should be 1,000
"""
test_mapping = [('4.2%', 0.042), ('2.25k', 2250), ('8.3M', 8300000),
('9.9G', 9.9e9), ('1.2T', 1.2e12), ('7.4c', 0.074),
('5.4m', 0.0054), ('8.7u', 0.0000087),
('5.6n', 5.6e-9), ('4.2p', 4.2e-12)]
for (expr, answer) in test_mapping:
tolerance = answer * 1e-6 # Make rel. tolerance, because of floats
fail_msg = "Failure in testing suffix '{0}': '{1}' was not {2}"
fail_msg = fail_msg.format(expr[-1], expr, answer)
self.assertAlmostEqual(calc.evaluator({}, {}, expr), answer,
delta=tolerance, msg=fail_msg)
def test_operator_sanity(self):
"""
Test for simple things like '5+2' and '5/2'
"""
var1 = 5.0
var2 = 2.0
operators = [('+', 7), ('-', 3), ('*', 10), ('/', 2.5), ('^', 25)]
for (operator, answer) in operators:
input_str = "{0} {1} {2}".format(var1, operator, var2)
result = calc.evaluator({}, {}, input_str)
fail_msg = "Failed on operator '{0}': '{1}' was not {2}".format(
operator, input_str, answer)
self.assertEqual(answer, result, msg=fail_msg)
def test_raises_zero_division_err(self):
"""
Ensure division by zero gives an error
"""
self.assertRaises(ZeroDivisionError, calc.evaluator,
{}, {}, '1/0')
self.assertRaises(ZeroDivisionError, calc.evaluator,
{}, {}, '1/0.0')
self.assertRaises(ZeroDivisionError, calc.evaluator,
{'x': 0.0}, {}, '1/x')
def test_parallel_resistors(self):
"""
Test the parallel resistor operator ||
The formula is given by
a || b || c ...
= 1 / (1/a + 1/b + 1/c + ...)
It is the resistance of a parallel circuit of resistors with resistance
a, b, c, etc&. See if this evaulates correctly.
"""
self.assertEqual(calc.evaluator({}, {}, '1||1'), 0.5)
self.assertEqual(calc.evaluator({}, {}, '1||1||2'), 0.4)
self.assertEqual(calc.evaluator({}, {}, "j||1"), 0.5 + 0.5j)
def test_parallel_resistors_with_zero(self):
"""
Check the behavior of the || operator with 0
"""
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, '0||1')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, '0.0||1')))
self.assertTrue(numpy.isnan(calc.evaluator({'x': 0.0}, {}, 'x||1')))
def assert_function_values(self, fname, ins, outs, tolerance=1e-3):
"""
Helper function to test many values at once
Test the accuracy of evaluator's use of the function given by fname
Specifically, the equality of `fname(ins[i])` against outs[i].
This is used later to test a whole bunch of f(x) = y at a time
"""
for (arg, val) in zip(ins, outs):
input_str = "{0}({1})".format(fname, arg)
result = calc.evaluator({}, {}, input_str)
fail_msg = "Failed on function {0}: '{1}' was not {2}".format(
fname, input_str, val)
self.assertAlmostEqual(val, result, delta=tolerance, msg=fail_msg)
def test_trig_functions(self):
"""
Test the trig functions provided in calc.py
which are: sin, cos, tan, arccos, arcsin, arctan
"""
angles = ['-pi/4', '0', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']
sin_values = [-0.707, 0, 0.5, 0.588, -0.707, 0.707, 1.298 + 0.635j]
cos_values = [0.707, 1, 0.866, 0.809, -0.707, 0.707, 0.834 - 0.989j]
tan_values = [-1, 0, 0.577, 0.727, 1, 1, 0.272 + 1.084j]
# Cannot test tan(pi/2) b/c pi/2 is a float and not precise...
self.assert_function_values('sin', angles, sin_values)
self.assert_function_values('cos', angles, cos_values)
self.assert_function_values('tan', angles, tan_values)
# Include those where the real part is between -pi/2 and pi/2
arcsin_inputs = ['-0.707', '0', '0.5', '0.588', '1.298 + 0.635*j']
arcsin_angles = [-0.785, 0, 0.524, 0.629, 1 + 1j]
self.assert_function_values('arcsin', arcsin_inputs, arcsin_angles)
# Rather than throwing an exception, numpy.arcsin gives nan
# self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(-1.1)')))
# self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(1.1)')))
# Disabled for now because they are giving a runtime warning... :-/
# Include those where the real part is between 0 and pi
arccos_inputs = ['1', '0.866', '0.809', '0.834-0.989*j']
arccos_angles = [0, 0.524, 0.628, 1 + 1j]
self.assert_function_values('arccos', arccos_inputs, arccos_angles)
# self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(-1.1)')))
# self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(1.1)')))
# Has the same range as arcsin
arctan_inputs = ['-1', '0', '0.577', '0.727', '0.272 + 1.084*j']
arctan_angles = arcsin_angles
self.assert_function_values('arctan', arctan_inputs, arctan_angles)
def test_other_functions(self):
"""
Test the non-trig functions provided in calc.py
Specifically:
sqrt, log10, log2, ln, abs,
fact, factorial
"""
# Test sqrt
self.assert_function_values('sqrt',
[0, 1, 2, 1024], # -1
[0, 1, 1.414, 32]) # 1j
# sqrt(-1) is NAN not j (!!).
# Test logs
self.assert_function_values('log10',
[0.1, 1, 3.162, 1000000, '1+j'],
[-1, 0, 0.5, 6, 0.151 + 0.341j])
self.assert_function_values('log2',
[0.5, 1, 1.414, 1024, '1+j'],
[-1, 0, 0.5, 10, 0.5 + 1.133j])
self.assert_function_values('ln',
[0.368, 1, 1.649, 2.718, 42, '1+j'],
[-1, 0, 0.5, 1, 3.738, 0.347 + 0.785j])
# Test abs
self.assert_function_values('abs', [-1, 0, 1, 'j'], [1, 0, 1, 1])
# Test factorial
fact_inputs = [0, 1, 3, 7]
fact_values = [1, 1, 6, 5040]
self.assert_function_values('fact', fact_inputs, fact_values)
self.assert_function_values('factorial', fact_inputs, fact_values)
self.assertRaises(ValueError, calc.evaluator, {}, {}, "fact(-1)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "fact(0.5)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "factorial(-1)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "factorial(0.5)")
def test_constants(self):
"""
Test the default constants provided in calc.py
which are: j (complex number), e, pi, k, c, T, q
"""
# Of the form ('expr', python value, tolerance (or None for exact))
default_variables = [('j', 1j, None),
('e', 2.7183, 1e-3),
('pi', 3.1416, 1e-3),
# c = speed of light
('c', 2.998e8, 1e5),
# 0 deg C = T Kelvin
('T', 298.15, 0.01),
# Note k = scipy.constants.k = 1.3806488e-23
('k', 1.3806488e-23, 1e-26),
# Note q = scipy.constants.e = 1.602176565e-19
('q', 1.602176565e-19, 1e-22)]
for (variable, value, tolerance) in default_variables:
fail_msg = "Failed on constant '{0}', not within bounds".format(
variable)
result = calc.evaluator({}, {}, variable)
if tolerance is None:
self.assertEqual(value, result, msg=fail_msg)
else:
self.assertAlmostEqual(value, result,
delta=tolerance, msg=fail_msg)
def test_complex_expression(self):
"""
Calculate combinations of operators and default functions
"""
self.assertAlmostEqual(
calc.evaluator({}, {}, "(2^2+1.0)/sqrt(5e0)*5-1"),
10.180,
delta=1e-3)
self.assertAlmostEqual(
calc.evaluator({}, {}, "1+1/(1+1/(1+1/(1+1)))"),
1.6,
delta=1e-3)
self.assertAlmostEqual(
calc.evaluator({}, {}, "10||sin(7+5)"),
-0.567, delta=0.01)
self.assertAlmostEqual(calc.evaluator({}, {}, "sin(e)"),
0.41, delta=0.01)
self.assertAlmostEqual(calc.evaluator({}, {}, "k*T/q"),
0.025, delta=1e-3)
self.assertAlmostEqual(calc.evaluator({}, {}, "e^(j*pi)"),
-1, delta=1e-5)
def test_simple_vars(self):
"""
Substitution of variables into simple equations
"""
variables = {'x': 9.72, 'y': 7.91, 'loooooong': 6.4}
# Should not change value of constant
# even with different numbers of variables...
self.assertEqual(calc.evaluator({'x': 9.72}, {}, '13'), 13)
self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, '13'), 13)
self.assertEqual(calc.evaluator(variables, {}, '13'), 13)
# Easy evaluation
self.assertEqual(calc.evaluator(variables, {}, 'x'), 9.72)
self.assertEqual(calc.evaluator(variables, {}, 'y'), 7.91)
self.assertEqual(calc.evaluator(variables, {}, 'loooooong'), 6.4)
# Test a simple equation
self.assertAlmostEqual(calc.evaluator(variables, {}, '3*x-y'),
21.25, delta=0.01) # = 3 * 9.72 - 7.91
self.assertAlmostEqual(calc.evaluator(variables, {}, 'x*y'),
76.89, delta=0.01)
self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, "13"), 13)
self.assertEqual(calc.evaluator(variables, {}, "13"), 13)
self.assertEqual(
calc.evaluator({
'a': 2.2997471478310274, 'k': 9, 'm': 8,
'x': 0.66009498411213041},
{}, "5"),
5)
def test_variable_case_sensitivity(self):
"""
Test the case sensitivity flag and corresponding behavior
"""
self.assertEqual(
calc.evaluator({'R1': 2.0, 'R3': 4.0}, {}, "r1*r3"),
8.0)
variables = {'t': 1.0}
self.assertEqual(calc.evaluator(variables, {}, "t"), 1.0)
self.assertEqual(calc.evaluator(variables, {}, "T"), 1.0)
self.assertEqual(calc.evaluator(variables, {}, "t", cs=True), 1.0)
# Recall 'T' is a default constant, with value 298.15
self.assertAlmostEqual(calc.evaluator(variables, {}, "T", cs=True),
298, delta=0.2)
def test_simple_funcs(self):
"""
Subsitution of custom functions
"""
variables = {'x': 4.712}
functions = {'id': lambda x: x}
self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)
self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)
self.assertEqual(calc.evaluator(variables, functions, 'id(x)'), 4.712)
functions.update({'f': numpy.sin})
self.assertAlmostEqual(calc.evaluator(variables, functions, 'f(x)'),
-1, delta=1e-3)
def test_function_case_sensitivity(self):
"""
Test the case sensitivity of functions
"""
functions = {'f': lambda x: x,
'F': lambda x: x + 1}
# Test case insensitive evaluation
# Both evaulations should call the same function
self.assertEqual(calc.evaluator({}, functions, 'f(6)'),
calc.evaluator({}, functions, 'F(6)'))
# Test case sensitive evaluation
self.assertNotEqual(calc.evaluator({}, functions, 'f(6)', cs=True),
calc.evaluator({}, functions, 'F(6)', cs=True))
def test_undefined_vars(self):
"""
Check to see if the evaluator catches undefined variables
"""
variables = {'R1': 2.0, 'R3': 4.0}
self.assertRaises(calc.UndefinedVariable, calc.evaluator,
{}, {}, "5+7 QWSEKO")
self.assertRaises(calc.UndefinedVariable, calc.evaluator,
{'r1': 5}, {}, "r1+r2")
self.assertRaises(calc.UndefinedVariable, calc.evaluator,
variables, {}, "r1*r3", cs=True)
......@@ -469,6 +469,7 @@ class LoncapaProblem(object):
random_seed=self.seed,
python_path=python_path,
cache=self.system.cache,
slug=self.problem_id,
)
except Exception as err:
log.exception("Error while execing script code: " + all_code)
......
......@@ -140,6 +140,8 @@ class LoncapaResponse(object):
self.context = context
self.system = system
self.id = xml.get('id')
for abox in inputfields:
if abox.tag not in self.allowed_inputfields:
msg = "%s: cannot have input field %s" % (
......@@ -286,7 +288,7 @@ class LoncapaResponse(object):
}
try:
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'])
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'], slug=self.id)
except Exception as err:
msg = 'Error %s in evaluating hint function %s' % (err, hintfn)
msg += "\nSee XML source line %s" % getattr(
......@@ -935,7 +937,6 @@ class CustomResponse(LoncapaResponse):
# if <customresponse> has an "expect" (or "answer") attribute then save
# that
self.expect = xml.get('expect') or xml.get('answer')
self.myid = xml.get('id')
log.debug('answer_ids=%s' % self.answer_ids)
......@@ -972,7 +973,7 @@ class CustomResponse(LoncapaResponse):
'ans': ans,
}
globals_dict.update(kwargs)
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'])
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'], slug=self.id)
return globals_dict['cfn_return']
return check_function
......@@ -981,7 +982,7 @@ class CustomResponse(LoncapaResponse):
if not self.code:
if answer is None:
log.error("[courseware.capa.responsetypes.customresponse] missing"
" code checking script! id=%s" % self.myid)
" code checking script! id=%s" % self.id)
self.code = ''
else:
answer_src = answer.get('src')
......@@ -1034,7 +1035,7 @@ class CustomResponse(LoncapaResponse):
# note that this doesn't help the "cfn" version - only the exec version
self.context.update({
# my ID
'response_id': self.myid,
'response_id': self.id,
# expected answer (if given as attribute)
'expect': self.expect,
......@@ -1089,7 +1090,7 @@ class CustomResponse(LoncapaResponse):
# exec the check function
if isinstance(self.code, basestring):
try:
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache, slug=self.id)
except Exception as err:
self._handle_exec_exception(err)
......@@ -1813,7 +1814,7 @@ class SchematicResponse(LoncapaResponse):
]
self.context.update({'submission': submission})
try:
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache, slug=self.id)
except Exception as err:
msg = 'Error %s in evaluating SchematicResponse' % err
raise ResponseError(msg)
......
......@@ -71,7 +71,7 @@ def update_hash(hasher, obj):
@statsd.timed('capa.safe_exec.time')
def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None):
def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None, slug=None):
"""
Execute python code safely.
......@@ -87,6 +87,9 @@ def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None
to cache the execution, taking into account the code, the values of the globals,
and the random seed.
`slug` is an arbitrary string, a description that's meaningful to the
caller, that will be used in log messages.
"""
# Check the cache for a previous result.
if cache:
......@@ -112,7 +115,7 @@ def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None
try:
codejail_safe_exec(
code_prolog + LAZY_IMPORTS + code, globals_dict,
python_path=python_path,
python_path=python_path, slug=slug,
)
except SafeExecException as e:
emsg = e.message
......
"""Tests for the logic in input type mako templates."""
"""
Tests for the logic in input type mako templates.
"""
import unittest
import capa
import os.path
import json
from lxml import etree
from mako.template import Template as MakoTemplate
from mako import exceptions
class TemplateError(Exception):
"""Error occurred while rendering a Mako template"""
"""
Error occurred while rendering a Mako template.
"""
pass
class TemplateTestCase(unittest.TestCase):
"""Utilitites for testing templates"""
"""
Utilitites for testing templates.
"""
# Subclasses override this to specify the file name of the template
# to be loaded from capa/templates.
......@@ -23,7 +30,9 @@ class TemplateTestCase(unittest.TestCase):
TEMPLATE_NAME = None
def setUp(self):
"""Load the template"""
"""
Load the template under test.
"""
capa_path = capa.__path__[0]
self.template_path = os.path.join(capa_path,
'templates',
......@@ -33,18 +42,31 @@ class TemplateTestCase(unittest.TestCase):
template_file.close()
def render_to_xml(self, context_dict):
"""Render the template using the `context_dict` dict.
Returns an `etree` XML element."""
"""
Render the template using the `context_dict` dict.
Returns an `etree` XML element.
"""
try:
xml_str = self.template.render_unicode(**context_dict)
except:
raise TemplateError(exceptions.text_error_template().render())
return etree.fromstring(xml_str)
# Attempt to construct an XML tree from the template
# This makes it easy to use XPath to make assertions, rather
# than dealing with a string.
# We modify the string slightly by wrapping it in <test>
# tags, to ensure it has one root element.
try:
xml = etree.fromstring("<test>" + xml_str + "</test>")
except Exception as exc:
raise TemplateError("Could not parse XML from '{0}': {1}".format(
xml_str, str(exc)))
else:
return xml
def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1):
"""Asserts that the xml tree has an element satisfying `xpath`.
"""
Asserts that the xml tree has an element satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
......@@ -57,7 +79,8 @@ class TemplateTestCase(unittest.TestCase):
self.assertEqual(len(xml_root.xpath(xpath)), exact_num, msg=message)
def assert_no_xpath(self, xml_root, xpath, context_dict):
"""Asserts that the xml tree does NOT have an element
"""
Asserts that the xml tree does NOT have an element
satisfying `xpath`.
`xml_root` is an etree XML element
......@@ -67,7 +90,8 @@ class TemplateTestCase(unittest.TestCase):
self.assert_has_xpath(xml_root, xpath, context_dict, exact_num=0)
def assert_has_text(self, xml_root, xpath, text, exact=True):
"""Find the element at `xpath` in `xml_root` and assert
"""
Find the element at `xpath` in `xml_root` and assert
that its text is `text`.
`xml_root` is an etree XML element
......@@ -88,7 +112,9 @@ class TemplateTestCase(unittest.TestCase):
class ChoiceGroupTemplateTest(TemplateTestCase):
"""Test mako template for `<choicegroup>` input"""
"""
Test mako template for `<choicegroup>` input.
"""
TEMPLATE_NAME = 'choicegroup.html'
......@@ -103,8 +129,10 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
super(ChoiceGroupTemplateTest, self).setUp()
def test_problem_marked_correct(self):
"""Test conditions under which the entire problem
(not a particular option) is marked correct"""
"""
Test conditions under which the entire problem
(not a particular option) is marked correct.
"""
self.context['status'] = 'correct'
self.context['input_type'] = 'checkbox'
......@@ -123,8 +151,10 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
self.context)
def test_problem_marked_incorrect(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked incorrect"""
"""
Test all conditions under which the entire problem
(not a particular option) is marked incorrect.
"""
conditions = [
{'status': 'incorrect', 'input_type': 'radio', 'value': ''},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': []},
......@@ -151,8 +181,10 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
self.context)
def test_problem_marked_unsubmitted(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked unanswered"""
"""
Test all conditions under which the entire problem
(not a particular option) is marked unanswered.
"""
conditions = [
{'status': 'unsubmitted', 'input_type': 'radio', 'value': ''},
{'status': 'unsubmitted', 'input_type': 'radio', 'value': []},
......@@ -181,8 +213,10 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
self.context)
def test_option_marked_correct(self):
"""Test conditions under which a particular option
(not the entire problem) is marked correct."""
"""
Test conditions under which a particular option
(not the entire problem) is marked correct.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
......@@ -200,8 +234,10 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""Test conditions under which a particular option
(not the entire problem) is marked incorrect."""
"""
Test conditions under which a particular option
(not the entire problem) is marked incorrect.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
......@@ -219,7 +255,8 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
self.assert_no_xpath(xml, xpath, self.context)
def test_never_show_correctness(self):
"""Test conditions under which we tell the template to
"""
Test conditions under which we tell the template to
NOT show correct/incorrect, but instead show a message.
This is used, for example, by the Justice course to ask
......@@ -268,8 +305,10 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
self.context['submitted_message'])
def test_no_message_before_submission(self):
"""Ensure that we don't show the `submitted_message`
before submitting"""
"""
Ensure that we don't show the `submitted_message`
before submitting.
"""
conditions = [
{'input_type': 'radio', 'status': 'unsubmitted', 'value': ''},
......@@ -298,7 +337,9 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
class TextlineTemplateTest(TemplateTestCase):
"""Test mako template for `<textline>` input"""
"""
Test mako template for `<textline>` input.
"""
TEMPLATE_NAME = 'textline.html'
......@@ -405,3 +446,271 @@ class TextlineTemplateTest(TemplateTestCase):
xpath = "//span[@class='message']"
self.assert_has_text(xml, xpath, self.context['msg'])
class AnnotationInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<annotationinput>` input.
"""
TEMPLATE_NAME = 'annotationinput.html'
def setUp(self):
self.context = {'id': 2,
'value': '<p>Test value</p>',
'title': '<h1>This is a title</h1>',
'text': '<p><b>This</b> is a test.</p>',
'comment': '<p>This is a test comment</p>',
'comment_prompt': '<p>This is a test comment prompt</p>',
'comment_value': '<p>This is the value of a test comment</p>',
'tag_prompt': '<p>This is a tag prompt</p>',
'options': [],
'has_options_value': False,
'debug': False,
'status': 'unsubmitted',
'return_to_annotation': False,
'msg': '<p>This is a test message</p>', }
super(AnnotationInputTemplateTest, self).setUp()
def test_return_to_annotation(self):
"""
Test link for `Return to Annotation` appears if and only if
the flag is set.
"""
xpath = "//a[@class='annotation-return']"
# If return_to_annotation set, then show the link
self.context['return_to_annotation'] = True
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
# Otherwise, do not show the links
self.context['return_to_annotation'] = False
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_option_selection(self):
"""
Test that selected options are selected.
"""
# Create options 0-4 and select option 2
self.context['options_value'] = [2]
self.context['options'] = [
{'id': id_num,
'choice': 'correct',
'description': '<p>Unescaped <b>HTML {0}</b></p>'.format(id_num)}
for id_num in range(0, 5)]
xml = self.render_to_xml(self.context)
# Expect that each option description is visible
# with unescaped HTML.
# Since the HTML is unescaped, we can traverse the XML tree
for id_num in range(0, 5):
xpath = "//span[@data-id='{0}']/p/b".format(id_num)
self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False)
# Expect that the correct option is selected
xpath = "//span[contains(@class,'selected')]/p/b"
self.assert_has_text(xml, xpath, 'HTML 2', exact=False)
def test_submission_status(self):
"""
Test that the submission status displays correctly.
"""
# Test cases of `(input_status, expected_css_class)` tuples
test_cases = [('unsubmitted', 'unanswered'),
('incomplete', 'incorrect'),
('incorrect', 'incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = input_status
xml = self.render_to_xml(self.context)
xpath = "//span[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# If individual options are being marked, then expect
# just the option to be marked incorrect, not the whole problem
self.context['has_options_value'] = True
self.context['status'] = 'incorrect'
xpath = "//span[@class='incorrect']"
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_display_html_comment(self):
"""
Test that HTML comment and comment prompt render.
"""
self.context['comment'] = "<p>Unescaped <b>comment HTML</b></p>"
self.context['comment_prompt'] = "<p>Prompt <b>prompt HTML</b></p>"
self.context['text'] = "<p>Unescaped <b>text</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'prompt HTML')
xpath = "//div[@class='block block-comment']/p/b"
self.assert_has_text(xml, xpath, 'comment HTML')
xpath = "//div[@class='block block-highlight']/p/b"
self.assert_has_text(xml, xpath, 'text')
def test_display_html_tag_prompt(self):
"""
Test that HTML tag prompts render.
"""
self.context['tag_prompt'] = "<p>Unescaped <b>HTML</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class MathStringTemplateTest(TemplateTestCase):
"""
Test mako template for `<mathstring>` input.
"""
TEMPLATE_NAME = 'mathstring.html'
def setUp(self):
self.context = {'isinline': False, 'mathstr': '', 'tail': ''}
super(MathStringTemplateTest, self).setUp()
def test_math_string_inline(self):
self.context['isinline'] = True
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjaxinline]y = ax^2 + bx + c[/mathjaxinline]')
def test_math_string_not_inline(self):
self.context['isinline'] = False
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjax]y = ax^2 + bx + c[/mathjax]')
def test_tail_html(self):
self.context['tail'] = "<p>This is some <b>tail</b> <em>HTML</em></p>"
xml = self.render_to_xml(self.context)
# HTML from `tail` should NOT be escaped.
# We should be able to traverse it as part of the XML tree
xpath = "//section[@class='math-string']/span[2]/p/b"
self.assert_has_text(xml, xpath, 'tail')
xpath = "//section[@class='math-string']/span[2]/p/em"
self.assert_has_text(xml, xpath, 'HTML')
class OptionInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<optioninput>` input.
"""
TEMPLATE_NAME = 'optioninput.html'
def setUp(self):
self.context = {'id': 2, 'options': [], 'status': 'unsubmitted', 'value': 0}
super(OptionInputTemplateTest, self).setUp()
def test_select_options(self):
# Create options 0-4, and select option 2
self.context['options'] = [(id_num, '<b>Option {0}</b>'.format(id_num))
for id_num in range(0, 5)]
self.context['value'] = 2
xml = self.render_to_xml(self.context)
# Should have a dummy default
xpath = "//option[@value='option_2_dummy_default']"
self.assert_has_xpath(xml, xpath, self.context)
# Should have each of the options, with the correct description
# The description HTML should NOT be escaped
# (that's why we descend into the <b> tag)
for id_num in range(0, 5):
xpath = "//option[@value='{0}']/b".format(id_num)
self.assert_has_text(xml, xpath, 'Option {0}'.format(id_num))
# Should have the correct option selected
xpath = "//option[@selected='true']/b"
self.assert_has_text(xml, xpath, 'Option 2')
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class)`
test_cases = [('unsubmitted', 'unanswered'),
('correct', 'correct'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = input_status
xml = self.render_to_xml(self.context)
xpath = "//span[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
class DragAndDropTemplateTest(TemplateTestCase):
"""
Test mako template for `<draganddropinput>` input.
"""
TEMPLATE_NAME = 'drag_and_drop_input.html'
def setUp(self):
self.context = {'id': 2,
'drag_and_drop_json': '',
'value': 0,
'status': 'unsubmitted',
'msg': ''}
super(DragAndDropTemplateTest, self).setUp()
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class, expected_text)`
test_cases = [('unsubmitted', 'unanswered', 'unanswered'),
('correct', 'correct', 'correct'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (input_status, expected_css_class, expected_text) in test_cases:
self.context['status'] = input_status
xml = self.render_to_xml(self.context)
# Expect a <div> with the status
xpath = "//div[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# Expect a <p> with the status
xpath = "//p[@class='status']"
self.assert_has_text(xml, xpath, expected_text, exact=False)
def test_drag_and_drop_json_html(self):
json_with_html = json.dumps({'test': '<p>Unescaped <b>HTML</b></p>'})
self.context['drag_and_drop_json'] = json_with_html
xml = self.render_to_xml(self.context)
# Assert that the JSON-encoded string was inserted without
# escaping the HTML. We should be able to traverse the XML tree.
xpath = "//div[@class='drag_and_drop_problem_json']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
......@@ -10,7 +10,6 @@ import random
import unittest
import textwrap
import mock
import textwrap
from . import new_loncapa_problem, test_system
......@@ -190,7 +189,7 @@ class SymbolicResponseTest(ResponseTest):
def test_grade_single_input(self):
problem = self.build_problem(math_display=True,
expect="2*x+3*y")
expect="2*x+3*y")
# Correct answers
correct_inputs = [
......@@ -223,7 +222,6 @@ class SymbolicResponseTest(ResponseTest):
for (input_str, input_mathml) in incorrect_inputs:
self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
def test_complex_number_grade(self):
problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
......@@ -241,7 +239,7 @@ class SymbolicResponseTest(ResponseTest):
# Correct answer
with mock.patch.object(requests, 'post') as mock_post:
# Simulate what the LaTeX-to-MathML server would
# Simulate what the LaTeX-to-MathML server would
# send for the correct response input
mock_post.return_value.text = correct_snuggletex_response
......@@ -323,7 +321,7 @@ class SymbolicResponseTest(ResponseTest):
dynamath_input,
expected_correctness):
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input) }
'1_2_1_dynamath': str(dynamath_input)}
correct_map = problem.grade_answers(input_dict)
......@@ -349,10 +347,18 @@ class OptionResponseTest(ResponseTest):
class FormulaResponseTest(ResponseTest):
"""
Test the FormulaResponse class
"""
from response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory
def test_grade(self):
"""
Test basic functionality of FormulaResponse
Specifically, if it can understand equivalence of formulae
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
......@@ -373,6 +379,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_hint(self):
"""
Test the hint-giving functionality of FormulaResponse
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
......@@ -401,6 +410,10 @@ class FormulaResponseTest(ResponseTest):
'Try including the variable x')
def test_script(self):
"""
Test if python script can be used to generate answers
"""
# Calculate the answer using a script
script = "calculated_ans = 'x+x'"
......@@ -419,7 +432,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, '3*x', 'incorrect')
def test_parallel_resistors(self):
"""Test parallel resistors"""
"""
Test parallel resistors
"""
sample_dict = {'R1': (10, 10), 'R2': (2, 2), 'R3': (5, 5), 'R4': (1, 1)}
# Test problem
......@@ -440,8 +455,11 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_default_variables(self):
"""Test the default variables provided in common/lib/capa/capa/calc.py"""
# which are: j (complex number), e, pi, k, c, T, q
"""
Test the default variables provided in calc.py
which are: j (complex number), e, pi, k, c, T, q
"""
# Sample x in the range [-10,10]
sample_dict = {'x': (-10, 10)}
......@@ -464,11 +482,14 @@ class FormulaResponseTest(ResponseTest):
msg="Failed on variable {0}; the given, incorrect answer was {1} but graded 'correct'".format(var, incorrect))
def test_default_functions(self):
"""Test the default functions provided in common/lib/capa/capa/calc.py"""
# which are: sin, cos, tan, sqrt, log10, log2, ln,
# arccos, arcsin, arctan, abs,
# fact, factorial
"""
Test the default functions provided in common/lib/capa/capa/calc.py
which are:
sin, cos, tan, sqrt, log10, log2, ln,
arccos, arcsin, arctan, abs,
fact, factorial
"""
w = random.randint(3, 10)
sample_dict = {'x': (-10, 10), # Sample x in the range [-10,10]
'y': (1, 10), # Sample y in the range [1,10] - logs, arccos need positive inputs
......@@ -496,8 +517,10 @@ class FormulaResponseTest(ResponseTest):
msg="Failed on function {0}; the given, incorrect answer was {1} but graded 'correct'".format(func, incorrect))
def test_grade_infinity(self):
# This resolves a bug where a problem with relative tolerance would
# pass with any arbitrarily large student answer.
"""
Test that a large input on a problem with relative tolerance isn't
erroneously marked as correct.
"""
sample_dict = {'x': (1, 2)}
......@@ -514,8 +537,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_grade_nan(self):
# Attempt to produce a value which causes the student's answer to be
# evaluated to nan. See if this is resolved correctly.
"""
Test that expressions that evaluate to NaN are not marked as correct.
"""
sample_dict = {'x': (1, 2)}
......@@ -532,6 +556,18 @@ class FormulaResponseTest(ResponseTest):
input_formula = "x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_raises_zero_division_err(self):
"""
See if division by zero raises an error.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory
......@@ -592,7 +628,7 @@ class StringResponseTest(ResponseTest):
problem = self.build_problem(
answer="Michigan",
hintfn="gimme_a_hint",
script = textwrap.dedent("""
script=textwrap.dedent("""
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
answer = student_answers[aid]
......@@ -898,6 +934,14 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_raises_zero_division_err(self):
"""See if division by zero is handled correctly"""
problem = self.build_problem(question_text="What 5 * 10?",
explanation="The answer is 50",
answer="5e+1") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory
......@@ -947,8 +991,8 @@ class CustomResponseTest(ResponseTest):
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
#
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING }
#
script = textwrap.dedent("""
......
"""
Modules that get shown to the users when an error has occured while
loading or rendering other modules
"""
import hashlib
import logging
import json
......@@ -22,12 +27,19 @@ log = logging.getLogger(__name__)
class ErrorFields(object):
"""
XBlock fields used by the ErrorModules
"""
contents = String(scope=Scope.content)
error_msg = String(scope=Scope.content)
display_name = String(scope=Scope.settings)
class ErrorModule(ErrorFields, XModule):
"""
Module that gets shown to staff when there has been an error while
loading or rendering other modules
"""
def get_html(self):
'''Show an error to staff.
......@@ -42,6 +54,10 @@ class ErrorModule(ErrorFields, XModule):
class NonStaffErrorModule(ErrorFields, XModule):
"""
Module that gets shown to students when there has been an error while
loading or rendering other modules
"""
def get_html(self):
'''Show an error to a student.
TODO (vshnayder): proper style, divs, etc.
......@@ -61,7 +77,7 @@ class ErrorDescriptor(ErrorFields, JSONEditingDescriptor):
module_class = ErrorModule
@classmethod
def _construct(self, system, contents, error_msg, location):
def _construct(cls, system, contents, error_msg, location):
if location.name is None:
location = location._replace(
......@@ -80,7 +96,7 @@ class ErrorDescriptor(ErrorFields, JSONEditingDescriptor):
'contents': contents,
'display_name': 'Error: ' + location.name
}
return ErrorDescriptor(
return cls(
system,
location,
model_data,
......
......@@ -268,7 +268,7 @@ class MongoModuleStore(ModuleStoreBase):
query = {'_id.org': location.org,
'_id.course': location.course,
'_id.category': {'$in': ['course', 'chapter', 'sequential', 'vertical',
'wrapper', 'problemset', 'conditional']}
'wrapper', 'problemset', 'conditional', 'randomize']}
}
# we just want the Location, children, and inheritable metadata
record_filter = {'_id': 1, 'definition.children': 1}
......
---
metadata:
display_name: default
data_dir: a_made_up_name
display_name: Video Alpha 1
version: 1
data: |
<videoalpha youtube="0.75:JMD_ifUUfsU,1.0:OEoXaMPEzfM,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY"/>
<videoalpha show_captions="true" sub="name_of_file" youtube="0.75:JMD_ifUUfsU,1.0:OEoXaMPEzfM,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" >
<source src="https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4"/>
<source src="https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm"/>
<source src="https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv"/>
</videoalpha>
children: []
"""
Tests for ErrorModule and NonStaffErrorModule
"""
import unittest
from xmodule.tests import test_system
import xmodule.error_module as error_module
class TestErrorModule(unittest.TestCase):
"""
Tests for ErrorModule and ErrorDescriptor
"""
def setUp(self):
self.system = test_system()
self.org = "org"
self.course = "course"
self.fake_xml = "<problem />"
self.broken_xml = "<problem>"
self.error_msg = "Error"
def test_error_module_create(self):
descriptor = error_module.ErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course)
self.assertTrue(isinstance(descriptor, error_module.ErrorDescriptor))
def test_error_module_rendering(self):
descriptor = error_module.ErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course, self.error_msg)
module = descriptor.xmodule(self.system)
rendered_html = module.get_html()
self.assertIn(self.error_msg, rendered_html)
self.assertIn(self.fake_xml, rendered_html)
class TestNonStaffErrorModule(TestErrorModule):
"""
Tests for NonStaffErrorModule and NonStaffErrorDescriptor
"""
def test_non_staff_error_module_create(self):
descriptor = error_module.NonStaffErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course)
self.assertTrue(isinstance(descriptor, error_module.NonStaffErrorDescriptor))
def test_non_staff_error_module_rendering(self):
descriptor = error_module.NonStaffErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course)
module = descriptor.xmodule(self.system)
rendered_html = module.get_html()
self.assertNotIn(self.error_msg, rendered_html)
self.assertNotIn(self.fake_xml, rendered_html)
......@@ -4,10 +4,12 @@
import json
import unittest
from lxml import etree
from xmodule.poll_module import PollDescriptor
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.word_cloud_module import WordCloudDescriptor
from xmodule.videoalpha_module import VideoAlphaDescriptor
class PostData:
"""Class which emulate postdata."""
......@@ -117,3 +119,33 @@ class WordCloudModuleTest(LogicTest):
)
self.assertEqual(100.0, sum(i['percent'] for i in response['top_words']) )
class VideoAlphaModuleTest(LogicTest):
descriptor_class = VideoAlphaDescriptor
raw_model_data = {
'data': '<videoalpha />'
}
def test_get_timeframe_no_parameters(self):
xmltree = etree.fromstring('<videoalpha>test</videoalpha>')
output = self.xmodule._get_timeframe(xmltree)
self.assertEqual(output, ('', ''))
def test_get_timeframe_with_one_parameter(self):
xmltree = etree.fromstring(
'<videoalpha start_time="00:04:07">test</videoalpha>'
)
output = self.xmodule._get_timeframe(xmltree)
self.assertEqual(output, (247, ''))
def test_get_timeframe_with_two_parameters(self):
xmltree = etree.fromstring(
'''<videoalpha
start_time="00:04:07"
end_time="13:04:39"
>test</videoalpha>'''
)
output = self.xmodule._get_timeframe(xmltree)
self.assertEqual(output, (247, 47079))
......@@ -93,7 +93,7 @@ class VideoAlphaModule(VideoAlphaFields, XModule):
return result
def _get_timeframe(self, xmltree):
""" Converts 'from' and 'to' parameters in video tag to seconds.
""" Converts 'start_time' and 'end_time' parameters in video tag to seconds.
If there are no parameters, returns empty string. """
def parse_time(s):
......@@ -103,11 +103,13 @@ class VideoAlphaModule(VideoAlphaFields, XModule):
return ''
else:
x = time.strptime(s, '%H:%M:%S')
return datetime.timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()
return datetime.timedelta(
hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec
).total_seconds()
return parse_time(xmltree.get('from')), parse_time(xmltree.get('to'))
return parse_time(xmltree.get('start_time')), parse_time(xmltree.get('end_time'))
def handle_ajax(self, dispatch, get):
"""Handle ajax calls to this video.
......
This source diff could not be displayed because it is too large. You can view the blob instead.
34d1996e44f78168a73297217b3a0973c2ae90e1
\ No newline at end of file
......@@ -115,6 +115,11 @@ xmodule can be tested independently, with this:
rake test_common/lib/xmodule
other module level tests include
* `rake test_common/lib/capa`
* `rake test_common/lib/calc`
To run a single django test class:
rake test_lms[courseware.tests.tests:testViewAuth]
......
lms/static/images/pinned.png

518 Bytes | W: | H:

lms/static/images/pinned.png

49.5 KB | W: | H:

lms/static/images/pinned.png
lms/static/images/pinned.png
lms/static/images/pinned.png
lms/static/images/pinned.png
  • 2-up
  • Swipe
  • Onion skin
b154ce99fb5c8d413ba769e8cc0df94ed674c3f4
\ No newline at end of file
2b8c58b098bdb17f9ddcbb2098f94c50fdcedf60
\ No newline at end of file
7d8b9879f7e5b859910edba7249661eedd3fcf37
\ No newline at end of file
caf8b43337faa75cef5da5cd090010215a67b1bd
\ No newline at end of file
b4d043bb1ca4a8815d4a388a2c9d96038211417b
\ No newline at end of file
6718f0c6e851376b5478baff94e1f1f4449bd938
\ No newline at end of file
lms/static/images/unpinned.png

498 Bytes | W: | H:

lms/static/images/unpinned.png

48.1 KB | W: | H:

lms/static/images/unpinned.png
lms/static/images/unpinned.png
lms/static/images/unpinned.png
lms/static/images/unpinned.png
  • 2-up
  • Swipe
  • Onion skin
......@@ -33,8 +33,8 @@
// colophon
.colophon {
margin-right: flex-gutter(2);
width: flex-grid(6,12);
margin-right: flex-gutter();
width: flex-grid(8,12);
float: left;
.nav-colophon {
......@@ -71,7 +71,7 @@
p {
float: left;
width: 460px;
width: flex-grid(6,8);
margin-left: $baseline;
padding-left: $baseline;
font-size: em(13);
......@@ -91,7 +91,6 @@
text-align: right;
li {
margin-right: ($baseline/10);
display: inline-block;
&:last-child {
......@@ -154,9 +153,5 @@
.colophon-about img {
margin-top: ($baseline*1.5);
}
.colophon-about p {
width: 360px;
}
}
}
......@@ -14,7 +14,6 @@ header.global {
padding: 18px 10px 0px;
max-width: grid-width(12);
min-width: 760px;
width: flex-grid(12);
}
h1.logo {
......
......@@ -9,7 +9,7 @@
<div class="content">
<div class="log-in-form">
<h2>Log in to your courses</h2>
<form id="login_form" data-remote="true" method="post" action="/login">
<form id="login_form" data-remote="true" method="post" action="/login_ajax">
<div class="row">
<label>Email</label>
<input name="email" type="email" class="email-field" tabindex="1">
......
......@@ -22,10 +22,6 @@ urlpatterns = ('', # nopep8
url(r'^admin_dashboard$', 'dashboard.views.dashboard'),
# Adding to allow debugging issues when prod is mysteriously different from staging
# (specifically missing get parameters in certain cases)
url(r'^debug_request$', 'util.views.debug_request'),
url(r'^change_email$', 'student.views.change_email_request', name="change_email"),
url(r'^email_confirm/(?P<key>[^/]*)$', 'student.views.confirm_email_change'),
url(r'^change_name$', 'student.views.change_name_request', name="change_name"),
......@@ -334,6 +330,13 @@ if settings.DEBUG or settings.MITX_FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
## Jasmine and admin
urlpatterns += (url(r'^admin/', include(admin.site.urls)),)
if settings.DEBUG:
# Originally added to allow debugging issues when prod is
# mysteriously different from staging (specifically missing get
# parameters in certain cases), but removing from prod because
# it's a security risk.
urlpatterns += (url(r'^debug_request$', 'util.views.debug_request'),)
if settings.MITX_FEATURES.get('AUTH_USE_OPENID'):
urlpatterns += (
url(r'^openid/login/$', 'django_openid_auth.views.login_begin', name='openid-login'),
......
......@@ -10,11 +10,14 @@ end
# the ENV_TOKENS to the templating context.
def preprocess_with_mako(filename)
# simple command-line invocation of Mako engine
# cdodge: the .gsub() are used to translate true->True and false->False to make the generated
# python actually valid python. This is just a short term hack to unblock the release train
# until a real fix can be made by people who know this better
mako = "from mako.template import Template;" +
"print Template(filename=\"#{filename}\")" +
# Total hack. It works because a Python dict literal has
# the same format as a JSON object.
".render(env=#{ENV_TOKENS.to_json});"
".render(env=#{ENV_TOKENS.to_json.gsub("true","True").gsub("false","False")});"
# strip off the .mako extension
output_filename = filename.chomp(File.extname(filename))
......
......@@ -9,4 +9,4 @@
# Our libraries:
-e git+https://github.com/edx/XBlock.git@2144a25d#egg=XBlock
-e git+https://github.com/edx/codejail.git@72cf791#egg=codejail
-e git+https://github.com/edx/codejail.git@5fb5fa0#egg=codejail
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment