Commit 8cc93cb5 by JonahStanley

Merge branch 'master' into jonahstanley/add-more-tests

parents 286fcece 46967a78
......@@ -61,6 +61,8 @@
<div class="wrapper wrapper-view">
<%include file="widgets/header.html" />
## remove this block after advanced settings notification is rewritten
<%block name="view_alerts"></%block>
<div id="page-alert"></div>
<%block name="content"></%block>
......@@ -72,9 +74,13 @@
<%include file="widgets/footer.html" />
<%include file="widgets/tender.html" />
## remove this block after advanced settings notification is rewritten
<%block name="view_notifications"></%block>
<div id="page-notification"></div>
</div>
## remove this block after advanced settings notification is rewritten
<%block name="view_prompts"></%block>
<div id="page-prompt"></div>
<%block name="jsextra"></%block>
</body>
......
The code in this directory is based on:
django-mako Copyright (c) 2008 Mikeal Rogers
and is redistributed here with modifications under the same Apache 2.0 license
as the orginal.
================================================================================
django-mako
================================================================================
......
......@@ -527,12 +527,12 @@ def _do_create_account(post_vars):
js = {'success': False}
# Figure out the cause of the integrity error
if len(User.objects.filter(username=post_vars['username'])) > 0:
js['value'] = "An account with this username already exists."
js['value'] = "An account with the Public Username '" + post_vars['username'] + "' already exists."
js['field'] = 'username'
return HttpResponse(json.dumps(js))
if len(User.objects.filter(email=post_vars['email'])) > 0:
js['value'] = "An account with this e-mail already exists."
js['value'] = "An account with the Email '" + post_vars['email'] + "' already exists."
js['field'] = 'email'
return HttpResponse(json.dumps(js))
......
# .coveragerc for common/lib/calc
[run]
data_file = reports/common/lib/calc/.coverage
source = common/lib/calc
branch = true
[report]
ignore_errors = True
[html]
title = Calc Python Test Coverage Report
directory = reports/common/lib/calc/cover
[xml]
output = reports/common/lib/calc/coverage.xml
......@@ -144,6 +144,8 @@ def evaluator(variables, functions, string, cs=False):
return x
def parallel(x): # Parallel resistors [ 1 2 ] => 2/3
# convert from pyparsing.ParseResults, which doesn't support '0 in x'
x = list(x)
if len(x) == 1:
return x[0]
if 0 in x:
......@@ -230,27 +232,3 @@ def evaluator(variables, functions, string, cs=False):
expr << Optional((plus | minus)) + term + ZeroOrMore((plus | minus) + term) # -5 + 4 - 3
expr = expr.setParseAction(sum_parse_action)
return (expr + stringEnd).parseString(string)[0]
if __name__ == '__main__':
variables = {'R1': 2.0, 'R3': 4.0}
functions = {'sin': numpy.sin, 'cos': numpy.cos}
print "X", evaluator(variables, functions, "10000||sin(7+5)-6k")
print "X", evaluator(variables, functions, "13")
print evaluator({'R1': 2.0, 'R3': 4.0}, {}, "13")
print evaluator({'e1': 1, 'e2': 1.0, 'R3': 7, 'V0': 5, 'R5': 15, 'I1': 1, 'R4': 6}, {}, "e2")
print evaluator({'a': 2.2997471478310274, 'k': 9, 'm': 8, 'x': 0.66009498411213041}, {}, "5")
print evaluator({}, {}, "-1")
print evaluator({}, {}, "-(7+5)")
print evaluator({}, {}, "-0.33")
print evaluator({}, {}, "-.33")
print evaluator({}, {}, "5+1*j")
print evaluator({}, {}, "j||1")
print evaluator({}, {}, "e^(j*pi)")
print evaluator({}, {}, "fact(5)")
print evaluator({}, {}, "factorial(5)")
try:
print evaluator({}, {}, "5+7 QWSEKO")
except UndefinedVariable:
print "Successfully caught undefined variable"
......@@ -469,6 +469,7 @@ class LoncapaProblem(object):
random_seed=self.seed,
python_path=python_path,
cache=self.system.cache,
slug=self.problem_id,
)
except Exception as err:
log.exception("Error while execing script code: " + all_code)
......
......@@ -140,6 +140,8 @@ class LoncapaResponse(object):
self.context = context
self.system = system
self.id = xml.get('id')
for abox in inputfields:
if abox.tag not in self.allowed_inputfields:
msg = "%s: cannot have input field %s" % (
......@@ -286,7 +288,7 @@ class LoncapaResponse(object):
}
try:
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'])
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'], slug=self.id)
except Exception as err:
msg = 'Error %s in evaluating hint function %s' % (err, hintfn)
msg += "\nSee XML source line %s" % getattr(
......@@ -935,7 +937,6 @@ class CustomResponse(LoncapaResponse):
# if <customresponse> has an "expect" (or "answer") attribute then save
# that
self.expect = xml.get('expect') or xml.get('answer')
self.myid = xml.get('id')
log.debug('answer_ids=%s' % self.answer_ids)
......@@ -972,7 +973,7 @@ class CustomResponse(LoncapaResponse):
'ans': ans,
}
globals_dict.update(kwargs)
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'])
safe_exec.safe_exec(code, globals_dict, python_path=self.context['python_path'], slug=self.id)
return globals_dict['cfn_return']
return check_function
......@@ -981,7 +982,7 @@ class CustomResponse(LoncapaResponse):
if not self.code:
if answer is None:
log.error("[courseware.capa.responsetypes.customresponse] missing"
" code checking script! id=%s" % self.myid)
" code checking script! id=%s" % self.id)
self.code = ''
else:
answer_src = answer.get('src')
......@@ -1034,7 +1035,7 @@ class CustomResponse(LoncapaResponse):
# note that this doesn't help the "cfn" version - only the exec version
self.context.update({
# my ID
'response_id': self.myid,
'response_id': self.id,
# expected answer (if given as attribute)
'expect': self.expect,
......@@ -1089,7 +1090,7 @@ class CustomResponse(LoncapaResponse):
# exec the check function
if isinstance(self.code, basestring):
try:
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache, slug=self.id)
except Exception as err:
self._handle_exec_exception(err)
......@@ -1813,7 +1814,7 @@ class SchematicResponse(LoncapaResponse):
]
self.context.update({'submission': submission})
try:
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache, slug=self.id)
except Exception as err:
msg = 'Error %s in evaluating SchematicResponse' % err
raise ResponseError(msg)
......
......@@ -71,7 +71,7 @@ def update_hash(hasher, obj):
@statsd.timed('capa.safe_exec.time')
def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None):
def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None, slug=None):
"""
Execute python code safely.
......@@ -87,6 +87,9 @@ def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None
to cache the execution, taking into account the code, the values of the globals,
and the random seed.
`slug` is an arbitrary string, a description that's meaningful to the
caller, that will be used in log messages.
"""
# Check the cache for a previous result.
if cache:
......@@ -112,7 +115,7 @@ def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None
try:
codejail_safe_exec(
code_prolog + LAZY_IMPORTS + code, globals_dict,
python_path=python_path,
python_path=python_path, slug=slug,
)
except SafeExecException as e:
emsg = e.message
......
......@@ -10,7 +10,6 @@ import random
import unittest
import textwrap
import mock
import textwrap
from . import new_loncapa_problem, test_system
......@@ -190,7 +189,7 @@ class SymbolicResponseTest(ResponseTest):
def test_grade_single_input(self):
problem = self.build_problem(math_display=True,
expect="2*x+3*y")
expect="2*x+3*y")
# Correct answers
correct_inputs = [
......@@ -223,7 +222,6 @@ class SymbolicResponseTest(ResponseTest):
for (input_str, input_mathml) in incorrect_inputs:
self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
def test_complex_number_grade(self):
problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
......@@ -241,7 +239,7 @@ class SymbolicResponseTest(ResponseTest):
# Correct answer
with mock.patch.object(requests, 'post') as mock_post:
# Simulate what the LaTeX-to-MathML server would
# Simulate what the LaTeX-to-MathML server would
# send for the correct response input
mock_post.return_value.text = correct_snuggletex_response
......@@ -323,7 +321,7 @@ class SymbolicResponseTest(ResponseTest):
dynamath_input,
expected_correctness):
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input) }
'1_2_1_dynamath': str(dynamath_input)}
correct_map = problem.grade_answers(input_dict)
......@@ -349,10 +347,18 @@ class OptionResponseTest(ResponseTest):
class FormulaResponseTest(ResponseTest):
"""
Test the FormulaResponse class
"""
from response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory
def test_grade(self):
"""
Test basic functionality of FormulaResponse
Specifically, if it can understand equivalence of formulae
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
......@@ -373,6 +379,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_hint(self):
"""
Test the hint-giving functionality of FormulaResponse
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
......@@ -401,6 +410,10 @@ class FormulaResponseTest(ResponseTest):
'Try including the variable x')
def test_script(self):
"""
Test if python script can be used to generate answers
"""
# Calculate the answer using a script
script = "calculated_ans = 'x+x'"
......@@ -419,7 +432,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, '3*x', 'incorrect')
def test_parallel_resistors(self):
"""Test parallel resistors"""
"""
Test parallel resistors
"""
sample_dict = {'R1': (10, 10), 'R2': (2, 2), 'R3': (5, 5), 'R4': (1, 1)}
# Test problem
......@@ -440,8 +455,11 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_default_variables(self):
"""Test the default variables provided in common/lib/capa/capa/calc.py"""
# which are: j (complex number), e, pi, k, c, T, q
"""
Test the default variables provided in calc.py
which are: j (complex number), e, pi, k, c, T, q
"""
# Sample x in the range [-10,10]
sample_dict = {'x': (-10, 10)}
......@@ -464,11 +482,14 @@ class FormulaResponseTest(ResponseTest):
msg="Failed on variable {0}; the given, incorrect answer was {1} but graded 'correct'".format(var, incorrect))
def test_default_functions(self):
"""Test the default functions provided in common/lib/capa/capa/calc.py"""
# which are: sin, cos, tan, sqrt, log10, log2, ln,
# arccos, arcsin, arctan, abs,
# fact, factorial
"""
Test the default functions provided in common/lib/capa/capa/calc.py
which are:
sin, cos, tan, sqrt, log10, log2, ln,
arccos, arcsin, arctan, abs,
fact, factorial
"""
w = random.randint(3, 10)
sample_dict = {'x': (-10, 10), # Sample x in the range [-10,10]
'y': (1, 10), # Sample y in the range [1,10] - logs, arccos need positive inputs
......@@ -496,8 +517,10 @@ class FormulaResponseTest(ResponseTest):
msg="Failed on function {0}; the given, incorrect answer was {1} but graded 'correct'".format(func, incorrect))
def test_grade_infinity(self):
# This resolves a bug where a problem with relative tolerance would
# pass with any arbitrarily large student answer.
"""
Test that a large input on a problem with relative tolerance isn't
erroneously marked as correct.
"""
sample_dict = {'x': (1, 2)}
......@@ -514,8 +537,9 @@ class FormulaResponseTest(ResponseTest):
self.assert_grade(problem, input_formula, "incorrect")
def test_grade_nan(self):
# Attempt to produce a value which causes the student's answer to be
# evaluated to nan. See if this is resolved correctly.
"""
Test that expressions that evaluate to NaN are not marked as correct.
"""
sample_dict = {'x': (1, 2)}
......@@ -532,6 +556,18 @@ class FormulaResponseTest(ResponseTest):
input_formula = "x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_raises_zero_division_err(self):
"""
See if division by zero raises an error.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory
......@@ -592,7 +628,7 @@ class StringResponseTest(ResponseTest):
problem = self.build_problem(
answer="Michigan",
hintfn="gimme_a_hint",
script = textwrap.dedent("""
script=textwrap.dedent("""
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
answer = student_answers[aid]
......@@ -898,6 +934,14 @@ class NumericalResponseTest(ResponseTest):
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_raises_zero_division_err(self):
"""See if division by zero is handled correctly"""
problem = self.build_problem(question_text="What 5 * 10?",
explanation="The answer is 50",
answer="5e+1") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory
......@@ -947,8 +991,8 @@ class CustomResponseTest(ResponseTest):
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
#
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING }
#
script = textwrap.dedent("""
......
"""
Modules that get shown to the users when an error has occured while
loading or rendering other modules
"""
import hashlib
import logging
import json
......@@ -22,12 +27,19 @@ log = logging.getLogger(__name__)
class ErrorFields(object):
"""
XBlock fields used by the ErrorModules
"""
contents = String(scope=Scope.content)
error_msg = String(scope=Scope.content)
display_name = String(scope=Scope.settings)
class ErrorModule(ErrorFields, XModule):
"""
Module that gets shown to staff when there has been an error while
loading or rendering other modules
"""
def get_html(self):
'''Show an error to staff.
......@@ -42,6 +54,10 @@ class ErrorModule(ErrorFields, XModule):
class NonStaffErrorModule(ErrorFields, XModule):
"""
Module that gets shown to students when there has been an error while
loading or rendering other modules
"""
def get_html(self):
'''Show an error to a student.
TODO (vshnayder): proper style, divs, etc.
......@@ -61,7 +77,7 @@ class ErrorDescriptor(ErrorFields, JSONEditingDescriptor):
module_class = ErrorModule
@classmethod
def _construct(self, system, contents, error_msg, location):
def _construct(cls, system, contents, error_msg, location):
if location.name is None:
location = location._replace(
......@@ -80,7 +96,7 @@ class ErrorDescriptor(ErrorFields, JSONEditingDescriptor):
'contents': contents,
'display_name': 'Error: ' + location.name
}
return ErrorDescriptor(
return cls(
system,
location,
model_data,
......
......@@ -268,7 +268,7 @@ class MongoModuleStore(ModuleStoreBase):
query = {'_id.org': location.org,
'_id.course': location.course,
'_id.category': {'$in': ['course', 'chapter', 'sequential', 'vertical',
'wrapper', 'problemset', 'conditional']}
'wrapper', 'problemset', 'conditional', 'randomize']}
}
# we just want the Location, children, and inheritable metadata
record_filter = {'_id': 1, 'definition.children': 1}
......
---
metadata:
display_name: default
data_dir: a_made_up_name
display_name: Video Alpha 1
version: 1
data: |
<videoalpha youtube="0.75:JMD_ifUUfsU,1.0:OEoXaMPEzfM,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY"/>
<videoalpha show_captions="true" sub="name_of_file" youtube="0.75:JMD_ifUUfsU,1.0:OEoXaMPEzfM,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" >
<source src="https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4"/>
<source src="https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm"/>
<source src="https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv"/>
</videoalpha>
children: []
"""
Tests for ErrorModule and NonStaffErrorModule
"""
import unittest
from xmodule.tests import test_system
import xmodule.error_module as error_module
class TestErrorModule(unittest.TestCase):
"""
Tests for ErrorModule and ErrorDescriptor
"""
def setUp(self):
self.system = test_system()
self.org = "org"
self.course = "course"
self.fake_xml = "<problem />"
self.broken_xml = "<problem>"
self.error_msg = "Error"
def test_error_module_create(self):
descriptor = error_module.ErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course)
self.assertTrue(isinstance(descriptor, error_module.ErrorDescriptor))
def test_error_module_rendering(self):
descriptor = error_module.ErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course, self.error_msg)
module = descriptor.xmodule(self.system)
rendered_html = module.get_html()
self.assertIn(self.error_msg, rendered_html)
self.assertIn(self.fake_xml, rendered_html)
class TestNonStaffErrorModule(TestErrorModule):
"""
Tests for NonStaffErrorModule and NonStaffErrorDescriptor
"""
def test_non_staff_error_module_create(self):
descriptor = error_module.NonStaffErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course)
self.assertTrue(isinstance(descriptor, error_module.NonStaffErrorDescriptor))
def test_non_staff_error_module_rendering(self):
descriptor = error_module.NonStaffErrorDescriptor.from_xml(
self.fake_xml, self.system, self.org, self.course)
module = descriptor.xmodule(self.system)
rendered_html = module.get_html()
self.assertNotIn(self.error_msg, rendered_html)
self.assertNotIn(self.fake_xml, rendered_html)
......@@ -4,10 +4,12 @@
import json
import unittest
from lxml import etree
from xmodule.poll_module import PollDescriptor
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.word_cloud_module import WordCloudDescriptor
from xmodule.videoalpha_module import VideoAlphaDescriptor
class PostData:
"""Class which emulate postdata."""
......@@ -117,3 +119,33 @@ class WordCloudModuleTest(LogicTest):
)
self.assertEqual(100.0, sum(i['percent'] for i in response['top_words']) )
class VideoAlphaModuleTest(LogicTest):
descriptor_class = VideoAlphaDescriptor
raw_model_data = {
'data': '<videoalpha />'
}
def test_get_timeframe_no_parameters(self):
xmltree = etree.fromstring('<videoalpha>test</videoalpha>')
output = self.xmodule._get_timeframe(xmltree)
self.assertEqual(output, ('', ''))
def test_get_timeframe_with_one_parameter(self):
xmltree = etree.fromstring(
'<videoalpha start_time="00:04:07">test</videoalpha>'
)
output = self.xmodule._get_timeframe(xmltree)
self.assertEqual(output, (247, ''))
def test_get_timeframe_with_two_parameters(self):
xmltree = etree.fromstring(
'''<videoalpha
start_time="00:04:07"
end_time="13:04:39"
>test</videoalpha>'''
)
output = self.xmodule._get_timeframe(xmltree)
self.assertEqual(output, (247, 47079))
......@@ -93,7 +93,7 @@ class VideoAlphaModule(VideoAlphaFields, XModule):
return result
def _get_timeframe(self, xmltree):
""" Converts 'from' and 'to' parameters in video tag to seconds.
""" Converts 'start_time' and 'end_time' parameters in video tag to seconds.
If there are no parameters, returns empty string. """
def parse_time(s):
......@@ -103,11 +103,13 @@ class VideoAlphaModule(VideoAlphaFields, XModule):
return ''
else:
x = time.strptime(s, '%H:%M:%S')
return datetime.timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()
return datetime.timedelta(
hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec
).total_seconds()
return parse_time(xmltree.get('from')), parse_time(xmltree.get('to'))
return parse_time(xmltree.get('start_time')), parse_time(xmltree.get('end_time'))
def handle_ajax(self, dispatch, get):
"""Handle ajax calls to this video.
......
This source diff could not be displayed because it is too large. You can view the blob instead.
34d1996e44f78168a73297217b3a0973c2ae90e1
\ No newline at end of file
......@@ -115,6 +115,11 @@ xmodule can be tested independently, with this:
rake test_common/lib/xmodule
other module level tests include
* `rake test_common/lib/capa`
* `rake test_common/lib/calc`
To run a single django test class:
rake test_lms[courseware.tests.tests:testViewAuth]
......
lms/static/images/pinned.png

518 Bytes | W: | H:

lms/static/images/pinned.png

49.5 KB | W: | H:

lms/static/images/pinned.png
lms/static/images/pinned.png
lms/static/images/pinned.png
lms/static/images/pinned.png
  • 2-up
  • Swipe
  • Onion skin
b154ce99fb5c8d413ba769e8cc0df94ed674c3f4
\ No newline at end of file
2b8c58b098bdb17f9ddcbb2098f94c50fdcedf60
\ No newline at end of file
7d8b9879f7e5b859910edba7249661eedd3fcf37
\ No newline at end of file
caf8b43337faa75cef5da5cd090010215a67b1bd
\ No newline at end of file
b4d043bb1ca4a8815d4a388a2c9d96038211417b
\ No newline at end of file
6718f0c6e851376b5478baff94e1f1f4449bd938
\ No newline at end of file
lms/static/images/unpinned.png

498 Bytes | W: | H:

lms/static/images/unpinned.png

48.1 KB | W: | H:

lms/static/images/unpinned.png
lms/static/images/unpinned.png
lms/static/images/unpinned.png
lms/static/images/unpinned.png
  • 2-up
  • Swipe
  • Onion skin
......@@ -33,8 +33,8 @@
// colophon
.colophon {
margin-right: flex-gutter(2);
width: flex-grid(6,12);
margin-right: flex-gutter();
width: flex-grid(8,12);
float: left;
.nav-colophon {
......@@ -71,7 +71,7 @@
p {
float: left;
width: 460px;
width: flex-grid(6,8);
margin-left: $baseline;
padding-left: $baseline;
font-size: em(13);
......@@ -91,7 +91,6 @@
text-align: right;
li {
margin-right: ($baseline/10);
display: inline-block;
&:last-child {
......@@ -154,9 +153,5 @@
.colophon-about img {
margin-top: ($baseline*1.5);
}
.colophon-about p {
width: 360px;
}
}
}
......@@ -14,7 +14,6 @@ header.global {
padding: 18px 10px 0px;
max-width: grid-width(12);
min-width: 760px;
width: flex-grid(12);
}
h1.logo {
......
......@@ -9,7 +9,7 @@
<div class="content">
<div class="log-in-form">
<h2>Log in to your courses</h2>
<form id="login_form" data-remote="true" method="post" action="/login">
<form id="login_form" data-remote="true" method="post" action="/login_ajax">
<div class="row">
<label>Email</label>
<input name="email" type="email" class="email-field" tabindex="1">
......
......@@ -22,10 +22,6 @@ urlpatterns = ('', # nopep8
url(r'^admin_dashboard$', 'dashboard.views.dashboard'),
# Adding to allow debugging issues when prod is mysteriously different from staging
# (specifically missing get parameters in certain cases)
url(r'^debug_request$', 'util.views.debug_request'),
url(r'^change_email$', 'student.views.change_email_request', name="change_email"),
url(r'^email_confirm/(?P<key>[^/]*)$', 'student.views.confirm_email_change'),
url(r'^change_name$', 'student.views.change_name_request', name="change_name"),
......@@ -334,6 +330,13 @@ if settings.DEBUG or settings.MITX_FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
## Jasmine and admin
urlpatterns += (url(r'^admin/', include(admin.site.urls)),)
if settings.DEBUG:
# Originally added to allow debugging issues when prod is
# mysteriously different from staging (specifically missing get
# parameters in certain cases), but removing from prod because
# it's a security risk.
urlpatterns += (url(r'^debug_request$', 'util.views.debug_request'),)
if settings.MITX_FEATURES.get('AUTH_USE_OPENID'):
urlpatterns += (
url(r'^openid/login/$', 'django_openid_auth.views.login_begin', name='openid-login'),
......
......@@ -10,11 +10,14 @@ end
# the ENV_TOKENS to the templating context.
def preprocess_with_mako(filename)
# simple command-line invocation of Mako engine
# cdodge: the .gsub() are used to translate true->True and false->False to make the generated
# python actually valid python. This is just a short term hack to unblock the release train
# until a real fix can be made by people who know this better
mako = "from mako.template import Template;" +
"print Template(filename=\"#{filename}\")" +
# Total hack. It works because a Python dict literal has
# the same format as a JSON object.
".render(env=#{ENV_TOKENS.to_json});"
".render(env=#{ENV_TOKENS.to_json.gsub("true","True").gsub("false","False")});"
# strip off the .mako extension
output_filename = filename.chomp(File.extname(filename))
......
......@@ -9,4 +9,4 @@
# Our libraries:
-e git+https://github.com/edx/XBlock.git@2144a25d#egg=XBlock
-e git+https://github.com/edx/codejail.git@72cf791#egg=codejail
-e git+https://github.com/edx/codejail.git@5fb5fa0#egg=codejail
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment