Commit f856b69a by Ned Batchelder

Merge branch 'feature/ned/sandboxed-python'

parents ce56fa30 d0c4afb3
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
:2e# :2e#
.AppleDouble .AppleDouble
database.sqlite database.sqlite
private-requirements.txt requirements/private.txt
courseware/static/js/mathjax/* courseware/static/js/mathjax/*
flushdb.sh flushdb.sh
build build
......
...@@ -16,7 +16,7 @@ from mitxmako.shortcuts import render_to_response, render_to_string ...@@ -16,7 +16,7 @@ from mitxmako.shortcuts import render_to_response, render_to_string
from urllib import urlencode from urllib import urlencode
import zendesk import zendesk
import capa.calc import calc
import track.views import track.views
...@@ -27,7 +27,7 @@ def calculate(request): ...@@ -27,7 +27,7 @@ def calculate(request):
''' Calculator in footer of every page. ''' ''' Calculator in footer of every page. '''
equation = request.GET['equation'] equation = request.GET['equation']
try: try:
result = capa.calc.evaluator({}, {}, equation) result = calc.evaluator({}, {}, equation)
except: except:
event = {'error': map(str, sys.exc_info()), event = {'error': map(str, sys.exc_info()),
'equation': equation} 'equation': equation}
......
from setuptools import setup
setup(
name="calc",
version="0.1",
py_modules=["calc"],
install_requires=[
"pyparsing==1.5.6",
"numpy",
"scipy"
],
)
...@@ -13,33 +13,19 @@ Main module which shows problems (of "capa" type). ...@@ -13,33 +13,19 @@ Main module which shows problems (of "capa" type).
This is used by capa_module. This is used by capa_module.
''' '''
from __future__ import division
from datetime import datetime from datetime import datetime
import logging import logging
import math import math
import numpy import numpy
import os import os.path
import random
import re import re
import scipy
import struct
import sys import sys
from lxml import etree from lxml import etree
from xml.sax.saxutils import unescape from xml.sax.saxutils import unescape
from copy import deepcopy from copy import deepcopy
import chem
import chem.miller
import chem.chemcalc
import chem.chemtools
import verifiers
import verifiers.draganddrop
import calc
from .correctmap import CorrectMap from .correctmap import CorrectMap
import eia
import inputtypes import inputtypes
import customrender import customrender
from .util import contextualize_text, convert_files_to_filenames from .util import contextualize_text, convert_files_to_filenames
...@@ -47,6 +33,7 @@ import xqueue_interface ...@@ -47,6 +33,7 @@ import xqueue_interface
# to be replaced with auto-registering # to be replaced with auto-registering
import responsetypes import responsetypes
import safe_exec
# dict of tagname, Response Class -- this should come from auto-registering # dict of tagname, Response Class -- this should come from auto-registering
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
...@@ -63,17 +50,6 @@ html_transforms = {'problem': {'tag': 'div'}, ...@@ -63,17 +50,6 @@ html_transforms = {'problem': {'tag': 'div'},
"math": {'tag': 'span'}, "math": {'tag': 'span'},
} }
global_context = {'random': random,
'numpy': numpy,
'math': math,
'scipy': scipy,
'calc': calc,
'eia': eia,
'chemcalc': chem.chemcalc,
'chemtools': chem.chemtools,
'miller': chem.miller,
'draganddrop': verifiers.draganddrop}
# These should be removed from HTML output, including all subelements # These should be removed from HTML output, including all subelements
html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric"] html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric"]
...@@ -96,7 +72,7 @@ class LoncapaProblem(object): ...@@ -96,7 +72,7 @@ class LoncapaProblem(object):
- problem_text (string): xml defining the problem - problem_text (string): xml defining the problem
- id (string): identifier for this problem; often a filename (no spaces) - id (string): identifier for this problem; often a filename (no spaces)
- seed (int): random number generator seed (int) - seed (int): random number generator seed (int)
- state (dict): containing the following keys: - state (dict): containing the following keys:
- 'seed' - (int) random number generator seed - 'seed' - (int) random number generator seed
- 'student_answers' - (dict) maps input id to the stored answer for that input - 'student_answers' - (dict) maps input id to the stored answer for that input
...@@ -115,23 +91,20 @@ class LoncapaProblem(object): ...@@ -115,23 +91,20 @@ class LoncapaProblem(object):
if self.system is None: if self.system is None:
raise Exception() raise Exception()
state = state if state else {} state = state or {}
# Set seed according to the following priority: # Set seed according to the following priority:
# 1. Contained in problem's state # 1. Contained in problem's state
# 2. Passed into capa_problem via constructor # 2. Passed into capa_problem via constructor
# 3. Assign from the OS's random number generator
self.seed = state.get('seed', seed) self.seed = state.get('seed', seed)
if self.seed is None: assert self.seed is not None, "Seed must be provided for LoncapaProblem."
self.seed = struct.unpack('i', os.urandom(4))[0]
self.student_answers = state.get('student_answers', {}) self.student_answers = state.get('student_answers', {})
if 'correct_map' in state: if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map']) self.correct_map.set_dict(state['correct_map'])
self.done = state.get('done', False) self.done = state.get('done', False)
self.input_state = state.get('input_state', {}) self.input_state = state.get('input_state', {})
# Convert startouttext and endouttext to proper <text></text> # Convert startouttext and endouttext to proper <text></text>
problem_text = re.sub("startouttext\s*/", "text", problem_text) problem_text = re.sub("startouttext\s*/", "text", problem_text)
problem_text = re.sub("endouttext\s*/", "/text", problem_text) problem_text = re.sub("endouttext\s*/", "/text", problem_text)
...@@ -144,7 +117,7 @@ class LoncapaProblem(object): ...@@ -144,7 +117,7 @@ class LoncapaProblem(object):
self._process_includes() self._process_includes()
# construct script processor context (eg for customresponse problems) # construct script processor context (eg for customresponse problems)
self.context = self._extract_context(self.tree, seed=self.seed) self.context = self._extract_context(self.tree)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place # Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response # transformations. This also creates the dict (self.responders) of Response
...@@ -440,18 +413,23 @@ class LoncapaProblem(object): ...@@ -440,18 +413,23 @@ class LoncapaProblem(object):
path = [] path = []
for dir in raw_path: for dir in raw_path:
if not dir: if not dir:
continue continue
# path is an absolute path or a path relative to the data dir # path is an absolute path or a path relative to the data dir
dir = os.path.join(self.system.filestore.root_path, dir) dir = os.path.join(self.system.filestore.root_path, dir)
# Check that we are within the filestore tree.
reldir = os.path.relpath(dir, self.system.filestore.root_path)
if ".." in reldir:
log.warning("Ignoring Python directory outside of course: %r" % dir)
continue
abs_dir = os.path.normpath(dir) abs_dir = os.path.normpath(dir)
path.append(abs_dir) path.append(abs_dir)
return path return path
def _extract_context(self, tree, seed=struct.unpack('i', os.urandom(4))[0]): # private def _extract_context(self, tree):
''' '''
Extract content of <script>...</script> from the problem.xml file, and exec it in the Extract content of <script>...</script> from the problem.xml file, and exec it in the
context of this problem. Provides ability to randomize problems, and also set context of this problem. Provides ability to randomize problems, and also set
...@@ -459,55 +437,47 @@ class LoncapaProblem(object): ...@@ -459,55 +437,47 @@ class LoncapaProblem(object):
Problem XML goes to Python execution context. Runs everything in script tags. Problem XML goes to Python execution context. Runs everything in script tags.
''' '''
random.seed(self.seed) context = {}
# save global context in here also context['seed'] = self.seed
context = {'global_context': global_context} all_code = ''
# initialize context to have stuff in global_context python_path = []
context.update(global_context)
# put globals there also for script in tree.findall('.//script'):
context['__builtins__'] = globals()['__builtins__']
# pass instance of LoncapaProblem in
context['the_lcp'] = self
context['script_code'] = ''
self._execute_scripts(tree.findall('.//script'), context)
return context
def _execute_scripts(self, scripts, context):
'''
Executes scripts in the given context.
'''
original_path = sys.path
for script in scripts:
sys.path = original_path + self._extract_system_path(script)
stype = script.get('type') stype = script.get('type')
if stype: if stype:
if 'javascript' in stype: if 'javascript' in stype:
continue # skip javascript continue # skip javascript
if 'perl' in stype: if 'perl' in stype:
continue # skip perl continue # skip perl
# TODO: evaluate only python # TODO: evaluate only python
code = script.text
for d in self._extract_system_path(script):
if d not in python_path and os.path.exists(d):
python_path.append(d)
XMLESC = {"&apos;": "'", "&quot;": '"'} XMLESC = {"&apos;": "'", "&quot;": '"'}
code = unescape(code, XMLESC) code = unescape(script.text, XMLESC)
# store code source in context all_code += code
context['script_code'] += code
if all_code:
try: try:
# use "context" for global context; thus defs in code are global within code safe_exec.safe_exec(
exec code in context, context all_code,
context,
random_seed=self.seed,
python_path=python_path,
cache=self.system.cache,
)
except Exception as err: except Exception as err:
log.exception("Error while execing script code: " + code) log.exception("Error while execing script code: " + all_code)
msg = "Error while executing script code: %s" % str(err).replace('<', '&lt;') msg = "Error while executing script code: %s" % str(err).replace('<', '&lt;')
raise responsetypes.LoncapaProblemError(msg) raise responsetypes.LoncapaProblemError(msg)
finally:
sys.path = original_path # store code source in context
context['script_code'] = all_code
return context
......
...@@ -46,7 +46,7 @@ import sys ...@@ -46,7 +46,7 @@ import sys
import pyparsing import pyparsing
from .registry import TagRegistry from .registry import TagRegistry
from capa.chem import chemcalc from chem import chemcalc
import xqueue_interface import xqueue_interface
from datetime import datetime from datetime import datetime
......
...@@ -23,6 +23,7 @@ import random ...@@ -23,6 +23,7 @@ import random
import re import re
import requests import requests
import subprocess import subprocess
import textwrap
import traceback import traceback
import xml.sax.saxutils as saxutils import xml.sax.saxutils as saxutils
...@@ -30,17 +31,23 @@ from collections import namedtuple ...@@ -30,17 +31,23 @@ from collections import namedtuple
from shapely.geometry import Point, MultiPoint from shapely.geometry import Point, MultiPoint
# specific library imports # specific library imports
from .calc import evaluator, UndefinedVariable from calc import evaluator, UndefinedVariable
from .correctmap import CorrectMap from . import correctmap
from datetime import datetime from datetime import datetime
from .util import * from .util import *
from lxml import etree from lxml import etree
from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME? from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME?
import capa.xqueue_interface as xqueue_interface import capa.xqueue_interface as xqueue_interface
import safe_exec
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
CorrectMap = correctmap.CorrectMap
CORRECTMAP_PY = None
#----------------------------------------------------------------------------- #-----------------------------------------------------------------------------
# Exceptions # Exceptions
...@@ -252,20 +259,41 @@ class LoncapaResponse(object): ...@@ -252,20 +259,41 @@ class LoncapaResponse(object):
# We may extend this in the future to add another argument which provides a # We may extend this in the future to add another argument which provides a
# callback procedure to a social hint generation system. # callback procedure to a social hint generation system.
if not hintfn in self.context:
msg = 'missing specified hint function %s in script context' % hintfn global CORRECTMAP_PY
msg += "\nSee XML source line %s" % getattr( if CORRECTMAP_PY is None:
self.xml, 'sourceline', '<unavailable>') # We need the CorrectMap code for hint functions. No, this is not great.
raise LoncapaProblemError(msg) CORRECTMAP_PY = inspect.getsource(correctmap)
code = (
CORRECTMAP_PY + "\n" +
self.context['script_code'] + "\n" +
textwrap.dedent("""
new_cmap = CorrectMap()
new_cmap.set_dict(new_cmap_dict)
old_cmap = CorrectMap()
old_cmap.set_dict(old_cmap_dict)
{hintfn}(answer_ids, student_answers, new_cmap, old_cmap)
new_cmap_dict.update(new_cmap.get_dict())
old_cmap_dict.update(old_cmap.get_dict())
""").format(hintfn=hintfn)
)
globals_dict = {
'answer_ids': self.answer_ids,
'student_answers': student_answers,
'new_cmap_dict': new_cmap.get_dict(),
'old_cmap_dict': old_cmap.get_dict(),
}
try: try:
self.context[hintfn]( safe_exec.safe_exec(code, globals_dict)
self.answer_ids, student_answers, new_cmap, old_cmap)
except Exception as err: except Exception as err:
msg = 'Error %s in evaluating hint function %s' % (err, hintfn) msg = 'Error %s in evaluating hint function %s' % (err, hintfn)
msg += "\nSee XML source line %s" % getattr( msg += "\nSee XML source line %s" % getattr(
self.xml, 'sourceline', '<unavailable>') self.xml, 'sourceline', '<unavailable>')
raise ResponseError(msg) raise ResponseError(msg)
new_cmap.set_dict(globals_dict['new_cmap_dict'])
return return
# hint specified by conditions and text dependent on conditions (a-la Loncapa design) # hint specified by conditions and text dependent on conditions (a-la Loncapa design)
...@@ -475,6 +503,10 @@ class JavascriptResponse(LoncapaResponse): ...@@ -475,6 +503,10 @@ class JavascriptResponse(LoncapaResponse):
return tmp_env return tmp_env
def call_node(self, args): def call_node(self, args):
# Node.js code is un-sandboxed. If the XModuleSystem says we aren't
# allowed to run unsafe code, then stop now.
if not self.system.can_execute_unsafe_code():
raise LoncapaProblemError("Execution of unsafe Javascript code is not allowed.")
subprocess_args = ["node"] subprocess_args = ["node"]
subprocess_args.extend(args) subprocess_args.extend(args)
...@@ -488,7 +520,7 @@ class JavascriptResponse(LoncapaResponse): ...@@ -488,7 +520,7 @@ class JavascriptResponse(LoncapaResponse):
output = self.call_node([generator_file, output = self.call_node([generator_file,
self.generator, self.generator,
json.dumps(self.generator_dependencies), json.dumps(self.generator_dependencies),
json.dumps(str(self.context['the_lcp'].seed)), json.dumps(str(self.context['seed'])),
json.dumps(self.params)]).strip() json.dumps(self.params)]).strip()
return json.loads(output) return json.loads(output)
...@@ -660,15 +692,6 @@ class ChoiceResponse(LoncapaResponse): ...@@ -660,15 +692,6 @@ class ChoiceResponse(LoncapaResponse):
class MultipleChoiceResponse(LoncapaResponse): class MultipleChoiceResponse(LoncapaResponse):
# TODO: handle direction and randomize # TODO: handle direction and randomize
snippets = [{'snippet': '''<multiplechoiceresponse direction="vertical" randomize="yes">
<choicegroup type="MultipleChoice">
<choice location="random" correct="false"><span>`a+b`<br/></span></choice>
<choice location="random" correct="true"><span><math>a+b^2</math><br/></span></choice>
<choice location="random" correct="false"><math>a+b+c</math></choice>
<choice location="bottom" correct="false"><math>a+b+d</math></choice>
</choicegroup>
</multiplechoiceresponse>
'''}]
response_tag = 'multiplechoiceresponse' response_tag = 'multiplechoiceresponse'
max_inputfields = 1 max_inputfields = 1
...@@ -754,14 +777,6 @@ class OptionResponse(LoncapaResponse): ...@@ -754,14 +777,6 @@ class OptionResponse(LoncapaResponse):
''' '''
TODO: handle direction and randomize TODO: handle direction and randomize
''' '''
snippets = [{'snippet': """<optionresponse direction="vertical" randomize="yes">
<optioninput options="('Up','Down')" correct="Up">
<text>The location of the sky</text>
</optioninput>
<optioninput options="('Up','Down')" correct="Down">
<text>The location of the earth</text>
</optioninput>
</optionresponse>"""}]
response_tag = 'optionresponse' response_tag = 'optionresponse'
hint_tag = 'optionhint' hint_tag = 'optionhint'
...@@ -905,39 +920,6 @@ class CustomResponse(LoncapaResponse): ...@@ -905,39 +920,6 @@ class CustomResponse(LoncapaResponse):
Custom response. The python code to be run should be in <answer>...</answer> Custom response. The python code to be run should be in <answer>...</answer>
or in a <script>...</script> or in a <script>...</script>
''' '''
snippets = [{'snippet': r"""<customresponse>
<text>
<br/>
Suppose that \(I(t)\) rises from \(0\) to \(I_S\) at a time \(t_0 \neq 0\)
In the space provided below write an algebraic expression for \(I(t)\).
<br/>
<textline size="5" correct_answer="IS*u(t-t0)" />
</text>
<answer type="loncapa/python">
correct=['correct']
try:
r = str(submission[0])
except ValueError:
correct[0] ='incorrect'
r = '0'
if not(r=="IS*u(t-t0)"):
correct[0] ='incorrect'
</answer>
</customresponse>"""},
{'snippet': """<script type="loncapa/python"><![CDATA[
def sympy_check2():
messages[0] = '%s:%s' % (submission[0],fromjs[0].replace('<','&lt;'))
#messages[0] = str(answers)
correct[0] = 'correct'
]]>
</script>
<customresponse cfn="sympy_check2" type="cs" expect="2.27E-39" dojs="math" size="30" answer="2.27E-39">
<textline size="40" dojs="math" />
<responseparam description="Numerical Tolerance" type="tolerance" default="0.00001" name="tol"/>
</customresponse>"""}]
response_tag = 'customresponse' response_tag = 'customresponse'
...@@ -972,14 +954,29 @@ def sympy_check2(): ...@@ -972,14 +954,29 @@ def sympy_check2():
cfn = xml.get('cfn') cfn = xml.get('cfn')
if cfn: if cfn:
log.debug("cfn = %s" % cfn) log.debug("cfn = %s" % cfn)
if cfn in self.context:
self.code = self.context[cfn] # This is a bit twisty. We used to grab the cfn function from
else: # the context, but now that we sandbox Python execution, we
msg = "%s: can't find cfn %s in context" % ( # can't get functions from previous executions. So we make an
unicode(self), cfn) # actual function that will re-execute the original script,
msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline', # and invoke the function with the data needed.
'<unavailable>') def make_check_function(script_code, cfn):
raise LoncapaProblemError(msg) def check_function(expect, ans, **kwargs):
extra_args = "".join(", {0}={0}".format(k) for k in kwargs)
code = (
script_code + "\n" +
"cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args)
)
globals_dict = {
'expect': expect,
'ans': ans,
}
globals_dict.update(kwargs)
safe_exec.safe_exec(code, globals_dict, cache=self.system.cache)
return globals_dict['cfn_return']
return check_function
self.code = make_check_function(self.context['script_code'], cfn)
if not self.code: if not self.code:
if answer is None: if answer is None:
...@@ -1036,9 +1033,6 @@ def sympy_check2(): ...@@ -1036,9 +1033,6 @@ def sympy_check2():
# put these in the context of the check function evaluator # put these in the context of the check function evaluator
# note that this doesn't help the "cfn" version - only the exec version # note that this doesn't help the "cfn" version - only the exec version
self.context.update({ self.context.update({
# our subtree
'xml': self.xml,
# my ID # my ID
'response_id': self.myid, 'response_id': self.myid,
...@@ -1075,65 +1069,63 @@ def sympy_check2(): ...@@ -1075,65 +1069,63 @@ def sympy_check2():
# pass self.system.debug to cfn # pass self.system.debug to cfn
self.context['debug'] = self.system.DEBUG self.context['debug'] = self.system.DEBUG
# Run the check function
self.execute_check_function(idset, submission)
# build map giving "correct"ness of the answer(s)
correct = self.context['correct']
messages = self.context['messages']
overall_message = self.clean_message_html(self.context['overall_message'])
correct_map = CorrectMap()
correct_map.set_overall_message(overall_message)
for k in range(len(idset)):
npoints = self.maxpoints[idset[k]] if correct[k] == 'correct' else 0
correct_map.set(idset[k], correct[k], msg=messages[k],
npoints=npoints)
return correct_map
def execute_check_function(self, idset, submission):
# exec the check function # exec the check function
if isinstance(self.code, basestring): if isinstance(self.code, basestring):
try: try:
exec self.code in self.context['global_context'], self.context safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
correct = self.context['correct']
messages = self.context['messages']
overall_message = self.context['overall_message']
except Exception as err: except Exception as err:
self._handle_exec_exception(err) self._handle_exec_exception(err)
else: else:
# self.code is not a string; assume its a function # self.code is not a string; it's a function we created earlier.
# this is an interface to the Tutor2 check functions # this is an interface to the Tutor2 check functions
fn = self.code fn = self.code
ret = None answer_given = submission[0] if (len(idset) == 1) else submission
kwnames = self.xml.get("cfn_extra_args", "").split()
kwargs = {n:self.context.get(n) for n in kwnames}
log.debug(" submission = %s" % submission) log.debug(" submission = %s" % submission)
try: try:
answer_given = submission[0] if ( ret = fn(self.expect, answer_given, **kwargs)
len(idset) == 1) else submission
# handle variable number of arguments in check function, for backwards compatibility
# with various Tutor2 check functions
args = [self.expect, answer_given,
student_answers, self.answer_ids[0]]
argspec = inspect.getargspec(fn)
nargs = len(argspec.args) - len(argspec.defaults or [])
kwargs = {}
for argname in argspec.args[nargs:]:
kwargs[argname] = self.context[
argname] if argname in self.context else None
log.debug('[customresponse] answer_given=%s' % answer_given)
log.debug('nargs=%d, args=%s, kwargs=%s' % (
nargs, args, kwargs))
ret = fn(*args[:nargs], **kwargs)
except Exception as err: except Exception as err:
self._handle_exec_exception(err) self._handle_exec_exception(err)
log.debug(
if type(ret) == dict: "[courseware.capa.responsetypes.customresponse.get_score] ret = %s",
ret
)
if isinstance(ret, dict):
# One kind of dictionary the check function can return has the # One kind of dictionary the check function can return has the
# form {'ok': BOOLEAN, 'msg': STRING} # form {'ok': BOOLEAN, 'msg': STRING}
# If there are multiple inputs, they all get marked # If there are multiple inputs, they all get marked
# to the same correct/incorrect value # to the same correct/incorrect value
if 'ok' in ret: if 'ok' in ret:
correct = ['correct'] * len(idset) if ret[ correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
'ok'] else ['incorrect'] * len(idset)
msg = ret.get('msg', None) msg = ret.get('msg', None)
msg = self.clean_message_html(msg) msg = self.clean_message_html(msg)
# If there is only one input, apply the message to that input # If there is only one input, apply the message to that input
# Otherwise, apply the message to the whole problem # Otherwise, apply the message to the whole problem
if len(idset) > 1: if len(idset) > 1:
overall_message = msg self.context['overall_message'] = msg
else: else:
messages[0] = msg self.context['messages'][0] = msg
# Another kind of dictionary the check function can return has # Another kind of dictionary the check function can return has
# the form: # the form:
...@@ -1155,6 +1147,8 @@ def sympy_check2(): ...@@ -1155,6 +1147,8 @@ def sympy_check2():
msg = (self.clean_message_html(input_dict['msg']) msg = (self.clean_message_html(input_dict['msg'])
if 'msg' in input_dict else None) if 'msg' in input_dict else None)
messages.append(msg) messages.append(msg)
self.context['messages'] = messages
self.context['overall_message'] = overall_message
# Otherwise, we do not recognize the dictionary # Otherwise, we do not recognize the dictionary
# Raise an exception # Raise an exception
...@@ -1163,25 +1157,10 @@ def sympy_check2(): ...@@ -1163,25 +1157,10 @@ def sympy_check2():
raise ResponseError( raise ResponseError(
"CustomResponse: check function returned an invalid dict") "CustomResponse: check function returned an invalid dict")
# The check function can return a boolean value,
# indicating whether all inputs should be marked
# correct or incorrect
else: else:
n = len(idset) correct = ['correct' if ret else 'incorrect'] * len(idset)
correct = ['correct'] * n if ret else ['incorrect'] * n
# build map giving "correct"ness of the answer(s)
correct_map = CorrectMap()
overall_message = self.clean_message_html(overall_message) self.context['correct'] = correct
correct_map.set_overall_message(overall_message)
for k in range(len(idset)):
npoints = (self.maxpoints[idset[k]]
if correct[k] == 'correct' else 0)
correct_map.set(idset[k], correct[k], msg=messages[k],
npoints=npoints)
return correct_map
def clean_message_html(self, msg): def clean_message_html(self, msg):
...@@ -1253,24 +1232,38 @@ class SymbolicResponse(CustomResponse): ...@@ -1253,24 +1232,38 @@ class SymbolicResponse(CustomResponse):
""" """
Symbolic math response checking, using symmath library. Symbolic math response checking, using symmath library.
""" """
snippets = [{'snippet': r'''<problem>
<text>Compute \[ \exp\left(-i \frac{\theta}{2} \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) \]
and give the resulting \(2\times 2\) matrix: <br/>
<symbolicresponse answer="">
<textline size="40" math="1" />
</symbolicresponse>
<br/>
Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>.
</text>
</problem>'''}]
response_tag = 'symbolicresponse' response_tag = 'symbolicresponse'
max_inputfields = 1
def setup_response(self): def setup_response(self):
# Symbolic response always uses symmath_check()
# If the XML did not specify this, then set it now
# Otherwise, we get an error from the superclass
self.xml.set('cfn', 'symmath_check') self.xml.set('cfn', 'symmath_check')
code = "from symmath import *"
exec code in self.context, self.context # Let CustomResponse do its setup
CustomResponse.setup_response(self) super(SymbolicResponse, self).setup_response()
def execute_check_function(self, idset, submission):
from symmath import symmath_check
try:
# Since we have limited max_inputfields to 1,
# we can assume that there is only one submission
answer_given = submission[0]
ret = symmath_check(
self.expect, answer_given,
dynamath=self.context.get('dynamath'),
options=self.context.get('options'),
debug=self.context.get('debug'),
)
except Exception as err:
log.error("oops in symbolicresponse (cfn) error %s" % err)
log.error(traceback.format_exc())
raise Exception("oops in symbolicresponse (cfn) error %s" % err)
self.context['messages'][0] = self.clean_message_html(ret['msg'])
self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
#----------------------------------------------------------------------------- #-----------------------------------------------------------------------------
...@@ -1325,10 +1318,8 @@ class CodeResponse(LoncapaResponse): ...@@ -1325,10 +1318,8 @@ class CodeResponse(LoncapaResponse):
# Check if XML uses the ExternalResponse format or the generic # Check if XML uses the ExternalResponse format or the generic
# CodeResponse format # CodeResponse format
codeparam = self.xml.find('codeparam') codeparam = self.xml.find('codeparam')
if codeparam is None: assert codeparam is not None, "Unsupported old format! <coderesponse> without <codeparam>"
self._parse_externalresponse_xml() self._parse_coderesponse_xml(codeparam)
else:
self._parse_coderesponse_xml(codeparam)
def _parse_coderesponse_xml(self, codeparam): def _parse_coderesponse_xml(self, codeparam):
''' '''
...@@ -1348,62 +1339,6 @@ class CodeResponse(LoncapaResponse): ...@@ -1348,62 +1339,6 @@ class CodeResponse(LoncapaResponse):
self.answer = find_with_default(codeparam, 'answer_display', self.answer = find_with_default(codeparam, 'answer_display',
'No answer provided.') 'No answer provided.')
def _parse_externalresponse_xml(self):
'''
VS[compat]: Suppport for old ExternalResponse XML format. When successful, sets:
self.initial_display
self.answer (an answer to display to the student in the LMS)
self.payload
'''
answer = self.xml.find('answer')
if answer is not None:
answer_src = answer.get('src')
if answer_src is not None:
code = self.system.filesystem.open('src/' + answer_src).read()
else:
code = answer.text
else: # no <answer> stanza; get code from <script>
code = self.context['script_code']
if not code:
msg = '%s: Missing answer script code for coderesponse' % unicode(
self)
msg += "\nSee XML source line %s" % getattr(
self.xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
tests = self.xml.get('tests')
# Extract 'answer' and 'initial_display' from XML. Note that the code to be exec'ed here is:
# (1) Internal edX code, i.e. NOT student submissions, and
# (2) The code should only define the strings 'initial_display', 'answer',
# 'preamble', 'test_program'
# following the ExternalResponse XML format
penv = {}
penv['__builtins__'] = globals()['__builtins__']
try:
exec(code, penv, penv)
except Exception as err:
log.error(
'Error in CodeResponse %s: Error in problem reference code' % err)
raise Exception(err)
try:
self.answer = penv['answer']
self.initial_display = penv['initial_display']
except Exception as err:
log.error("Error in CodeResponse %s: Problem reference code does not define"
" 'answer' and/or 'initial_display' in <answer>...</answer>" % err)
raise Exception(err)
# Finally, make the ExternalResponse input XML format conform to the generic
# exteral grader interface
# The XML tagging of grader_payload is pyxserver-specific
grader_payload = '<pyxserver>'
grader_payload += '<tests>' + tests + '</tests>\n'
grader_payload += '<processor>' + code + '</processor>'
grader_payload += '</pyxserver>'
self.payload = {'grader_payload': grader_payload}
def get_score(self, student_answers): def get_score(self, student_answers):
try: try:
# Note that submission can be a file # Note that submission can be a file
...@@ -1583,44 +1518,6 @@ class ExternalResponse(LoncapaResponse): ...@@ -1583,44 +1518,6 @@ class ExternalResponse(LoncapaResponse):
Typically used by coding problems. Typically used by coding problems.
''' '''
snippets = [{'snippet': '''<externalresponse tests="repeat:10,generate">
<textbox rows="10" cols="70" mode="python"/>
<answer><![CDATA[
initial_display = """
def inc(x):
"""
answer = """
def inc(n):
return n+1
"""
preamble = """
import sympy
"""
test_program = """
import random
def testInc(n = None):
if n is None:
n = random.randint(2, 20)
print 'Test is: inc(%d)'%n
return str(inc(n))
def main():
f = os.fdopen(3,'w')
test = int(sys.argv[1])
rndlist = map(int,os.getenv('rndlist').split(','))
random.seed(rndlist[0])
if test == 1: f.write(testInc(0))
elif test == 2: f.write(testInc(1))
else: f.write(testInc())
f.close()
main()
"""
]]>
</answer>
</externalresponse>'''}]
response_tag = 'externalresponse' response_tag = 'externalresponse'
allowed_inputfields = ['textline', 'textbox'] allowed_inputfields = ['textline', 'textbox']
...@@ -1766,23 +1663,6 @@ class FormulaResponse(LoncapaResponse): ...@@ -1766,23 +1663,6 @@ class FormulaResponse(LoncapaResponse):
''' '''
Checking of symbolic math response using numerical sampling. Checking of symbolic math response using numerical sampling.
''' '''
snippets = [{'snippet': '''<problem>
<script type="loncapa/python">
I = "m*c^2"
</script>
<text>
<br/>
Give an equation for the relativistic energy of an object with mass m.
</text>
<formularesponse type="cs" samples="m,c@1,2:3,4#10" answer="$I">
<responseparam description="Numerical Tolerance" type="tolerance"
default="0.00001" name="tol" />
<textline size="40" math="1" />
</formularesponse>
</problem>'''}]
response_tag = 'formularesponse' response_tag = 'formularesponse'
hint_tag = 'formulahint' hint_tag = 'formulahint'
...@@ -1927,21 +1807,18 @@ class SchematicResponse(LoncapaResponse): ...@@ -1927,21 +1807,18 @@ class SchematicResponse(LoncapaResponse):
self.code = answer.text self.code = answer.text
def get_score(self, student_answers): def get_score(self, student_answers):
from capa_problem import global_context #from capa_problem import global_context
submission = [json.loads(student_answers[ submission = [
k]) for k in sorted(self.answer_ids)] json.loads(student_answers[k]) for k in sorted(self.answer_ids)
]
self.context.update({'submission': submission}) self.context.update({'submission': submission})
try: try:
exec self.code in global_context, self.context safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
except Exception as err: except Exception as err:
_, _, traceback_obj = sys.exc_info() msg = 'Error %s in evaluating SchematicResponse' % err
raise ResponseError, ResponseError(err.message), traceback_obj raise ResponseError(msg)
cmap = CorrectMap() cmap = CorrectMap()
cmap.set_dict(dict(zip(sorted( cmap.set_dict(dict(zip(sorted(self.answer_ids), self.context['correct'])))
self.answer_ids), self.context['correct'])))
return cmap return cmap
def get_answers(self): def get_answers(self):
...@@ -1977,19 +1854,6 @@ class ImageResponse(LoncapaResponse): ...@@ -1977,19 +1854,6 @@ class ImageResponse(LoncapaResponse):
Returns: Returns:
True, if click is inside any region or rectangle. Otherwise False. True, if click is inside any region or rectangle. Otherwise False.
""" """
snippets = [{'snippet': '''<imageresponse>
<imageinput src="image1.jpg" width="200" height="100"
rectangle="(10,10)-(20,30)" />
<imageinput src="image2.jpg" width="210" height="130"
rectangle="(12,12)-(40,60)" />
<imageinput src="image3.jpg" width="210" height="130"
rectangle="(10,10)-(20,30);(12,12)-(40,60)" />
<imageinput src="image4.jpg" width="811" height="610"
rectangle="(10,10)-(20,30);(12,12)-(40,60)"
regions="[[[10,10], [20,30], [40, 10]], [[100,100], [120,130], [110,150]]]"/>
<imageinput src="image5.jpg" width="200" height="200"
regions="[[[10,10], [20,30], [40, 10]], [[100,100], [120,130], [110,150]]]"/>
</imageresponse>'''}]
response_tag = 'imageresponse' response_tag = 'imageresponse'
allowed_inputfields = ['imageinput'] allowed_inputfields = ['imageinput']
......
Configuring Capa sandboxed execution
====================================
Capa problems can contain code authored by the course author. We need to
execute that code in a sandbox. We use CodeJail as the sandboxing facility,
but it needs to be configured specifically for Capa's use.
As a developer, you don't have to do anything to configure sandboxing if you
don't want to, and everything will operate properly, you just won't have
protection on that code.
If you want to configure sandboxing, you're going to use the `README from
CodeJail`__, with a few customized tweaks.
__ https://github.com/edx/codejail/blob/master/README.rst
1. At the instruction to install packages into the sandboxed code, you'll
need to install both `pre-sandbox-requirements.txt` and
`sandbox-requirements.txt`::
$ sudo pip install -r pre-sandbox-requirements.txt
$ sudo pip install -r sandbox-requirements.txt
2. At the instruction to create the AppArmor profile, you'll need a line in
the profile for the sandbox packages. <EDXPLATFORM> is the full path to
your edx_platform repo::
<EDXPLATFORM>/common/lib/sandbox-packages/** r,
3. You can configure resource limits in settings.py. A CODE_JAIL setting is
available, a dictionary. The "limits" key lets you adjust the limits for
CPU time, real time, and memory use. Setting any of them to zero disables
that limit::
# in settings.py...
CODE_JAIL = {
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
# How many real-time seconds will a sandbox survive?
'REALTIME': 1,
# How much memory (in bytes) can a sandbox use?
'VMEM': 30000000,
},
}
That's it. Once you've finished the CodeJail configuration instructions,
your course-hosted Python code should be run securely.
"""Capa's specialized use of codejail.safe_exec."""
from .safe_exec import safe_exec, update_hash
"""A module proxy for delayed importing of modules.
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
in the public domain.
"""
import sys
class LazyModule(object):
"""A lazy module proxy."""
def __init__(self, modname):
self.__dict__['__name__'] = modname
self._set_mod(None)
def _set_mod(self, mod):
if mod is not None:
self.__dict__ = mod.__dict__
self.__dict__['_lazymod_mod'] = mod
def _load_mod(self):
__import__(self.__name__)
self._set_mod(sys.modules[self.__name__])
def __getattr__(self, name):
if self.__dict__['_lazymod_mod'] is None:
self._load_mod()
mod = self.__dict__['_lazymod_mod']
if hasattr(mod, name):
return getattr(mod, name)
else:
try:
subname = '%s.%s' % (self.__name__, name)
__import__(subname)
submod = getattr(mod, name)
except ImportError:
raise AttributeError("'module' object has no attribute %r" % name)
self.__dict__[name] = LazyModule(subname, submod)
return self.__dict__[name]
"""Capa's specialized use of codejail.safe_exec."""
from codejail.safe_exec import safe_exec as codejail_safe_exec
from codejail.safe_exec import json_safe, SafeExecException
from . import lazymod
from statsd import statsd
import hashlib
# Establish the Python environment for Capa.
# Capa assumes float-friendly division always.
# The name "random" is a properly-seeded stand-in for the random module.
CODE_PROLOG = """\
from __future__ import division
import random as random_module
import sys
random = random_module.Random(%r)
random.Random = random_module.Random
del random_module
sys.modules['random'] = random
"""
ASSUMED_IMPORTS=[
("numpy", "numpy"),
("math", "math"),
("scipy", "scipy"),
("calc", "calc"),
("eia", "eia"),
("chemcalc", "chem.chemcalc"),
("chemtools", "chem.chemtools"),
("miller", "chem.miller"),
("draganddrop", "verifiers.draganddrop"),
]
# We'll need the code from lazymod.py for use in safe_exec, so read it now.
lazymod_py_file = lazymod.__file__
if lazymod_py_file.endswith("c"):
lazymod_py_file = lazymod_py_file[:-1]
lazymod_py = open(lazymod_py_file).read()
LAZY_IMPORTS = [lazymod_py]
for name, modname in ASSUMED_IMPORTS:
LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname))
LAZY_IMPORTS = "".join(LAZY_IMPORTS)
def update_hash(hasher, obj):
"""
Update a `hashlib` hasher with a nested object.
To properly cache nested structures, we need to compute a hash from the
entire structure, canonicalizing at every level.
`hasher`'s `.update()` method is called a number of times, touching all of
`obj` in the process. Only primitive JSON-safe types are supported.
"""
hasher.update(str(type(obj)))
if isinstance(obj, (tuple, list)):
for e in obj:
update_hash(hasher, e)
elif isinstance(obj, dict):
for k in sorted(obj):
update_hash(hasher, k)
update_hash(hasher, obj[k])
else:
hasher.update(repr(obj))
@statsd.timed('capa.safe_exec.time')
def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None):
"""
Execute python code safely.
`code` is the Python code to execute. It has access to the globals in `globals_dict`,
and any changes it makes to those globals are visible in `globals_dict` when this
function returns.
`random_seed` will be used to see the `random` module available to the code.
`python_path` is a list of directories to add to the Python path before execution.
`cache` is an object with .get(key) and .set(key, value) methods. It will be used
to cache the execution, taking into account the code, the values of the globals,
and the random seed.
"""
# Check the cache for a previous result.
if cache:
safe_globals = json_safe(globals_dict)
md5er = hashlib.md5()
md5er.update(repr(code))
update_hash(md5er, safe_globals)
key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest())
cached = cache.get(key)
if cached is not None:
# We have a cached result. The result is a pair: the exception
# message, if any, else None; and the resulting globals dictionary.
emsg, cleaned_results = cached
globals_dict.update(cleaned_results)
if emsg:
raise SafeExecException(emsg)
return
# Create the complete code we'll run.
code_prolog = CODE_PROLOG % random_seed
# Run the code! Results are side effects in globals_dict.
try:
codejail_safe_exec(
code_prolog + LAZY_IMPORTS + code, globals_dict,
python_path=python_path,
)
except SafeExecException as e:
emsg = e.message
else:
emsg = None
# Put the result back in the cache. This is complicated by the fact that
# the globals dict might not be entirely serializable.
if cache:
cleaned_results = json_safe(globals_dict)
cache.set(key, (emsg, cleaned_results))
# If an exception happened, raise it now.
if emsg:
raise e
"""Test lazymod.py"""
import sys
import unittest
from capa.safe_exec.lazymod import LazyModule
class ModuleIsolation(object):
"""
Manage changes to sys.modules so that we can roll back imported modules.
Create this object, it will snapshot the currently imported modules. When
you call `clean_up()`, it will delete any module imported since its creation.
"""
def __init__(self):
# Save all the names of all the imported modules.
self.mods = set(sys.modules)
def clean_up(self):
# Get a list of modules that didn't exist when we were created
new_mods = [m for m in sys.modules if m not in self.mods]
# and delete them all so another import will run code for real again.
for m in new_mods:
del sys.modules[m]
class TestLazyMod(unittest.TestCase):
def setUp(self):
# Each test will remove modules that it imported.
self.addCleanup(ModuleIsolation().clean_up)
def test_simple(self):
# Import some stdlib module that has not been imported before
self.assertNotIn("colorsys", sys.modules)
colorsys = LazyModule("colorsys")
hsv = colorsys.rgb_to_hsv(.3, .4, .2)
self.assertEqual(hsv[0], 0.25)
def test_dotted(self):
self.assertNotIn("email.utils", sys.modules)
email_utils = LazyModule("email.utils")
self.assertEqual(email_utils.quote('"hi"'), r'\"hi\"')
"""Test safe_exec.py"""
import hashlib
import os.path
import random
import textwrap
import unittest
from capa.safe_exec import safe_exec, update_hash
from codejail.safe_exec import SafeExecException
class TestSafeExec(unittest.TestCase):
def test_set_values(self):
g = {}
safe_exec("a = 17", g)
self.assertEqual(g['a'], 17)
def test_division(self):
g = {}
# Future division: 1/2 is 0.5.
safe_exec("a = 1/2", g)
self.assertEqual(g['a'], 0.5)
def test_assumed_imports(self):
g = {}
# Math is always available.
safe_exec("a = int(math.pi)", g)
self.assertEqual(g['a'], 3)
def test_random_seeding(self):
g = {}
r = random.Random(17)
rnums = [r.randint(0, 999) for _ in xrange(100)]
# Without a seed, the results are unpredictable
safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g)
self.assertNotEqual(g['rnums'], rnums)
# With a seed, the results are predictable
safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g, random_seed=17)
self.assertEqual(g['rnums'], rnums)
def test_random_is_still_importable(self):
g = {}
r = random.Random(17)
rnums = [r.randint(0, 999) for _ in xrange(100)]
# With a seed, the results are predictable even from the random module
safe_exec(
"import random\n"
"rnums = [random.randint(0, 999) for _ in xrange(100)]\n",
g, random_seed=17)
self.assertEqual(g['rnums'], rnums)
def test_python_lib(self):
pylib = os.path.dirname(__file__) + "/test_files/pylib"
g = {}
safe_exec(
"import constant; a = constant.THE_CONST",
g, python_path=[pylib]
)
def test_raising_exceptions(self):
g = {}
with self.assertRaises(SafeExecException) as cm:
safe_exec("1/0", g)
self.assertIn("ZeroDivisionError", cm.exception.message)
class DictCache(object):
"""A cache implementation over a simple dict, for testing."""
def __init__(self, d):
self.cache = d
def get(self, key):
# Actual cache implementations have limits on key length
assert len(key) <= 250
return self.cache.get(key)
def set(self, key, value):
# Actual cache implementations have limits on key length
assert len(key) <= 250
self.cache[key] = value
class TestSafeExecCaching(unittest.TestCase):
"""Test that caching works on safe_exec."""
def test_cache_miss_then_hit(self):
g = {}
cache = {}
# Cache miss
safe_exec("a = int(math.pi)", g, cache=DictCache(cache))
self.assertEqual(g['a'], 3)
# A result has been cached
self.assertEqual(cache.values()[0], (None, {'a': 3}))
# Fiddle with the cache, then try it again.
cache[cache.keys()[0]] = (None, {'a': 17})
g = {}
safe_exec("a = int(math.pi)", g, cache=DictCache(cache))
self.assertEqual(g['a'], 17)
def test_cache_large_code_chunk(self):
# Caching used to die on memcache with more than 250 bytes of code.
# Check that it doesn't any more.
code = "a = 0\n" + ("a += 1\n" * 12345)
g = {}
cache = {}
safe_exec(code, g, cache=DictCache(cache))
self.assertEqual(g['a'], 12345)
def test_cache_exceptions(self):
# Used to be that running code that raised an exception didn't cache
# the result. Check that now it does.
code = "1/0"
g = {}
cache = {}
with self.assertRaises(SafeExecException):
safe_exec(code, g, cache=DictCache(cache))
# The exception should be in the cache now.
self.assertEqual(len(cache), 1)
cache_exc_msg, cache_globals = cache.values()[0]
self.assertIn("ZeroDivisionError", cache_exc_msg)
# Change the value stored in the cache, the result should change.
cache[cache.keys()[0]] = ("Hey there!", {})
with self.assertRaises(SafeExecException):
safe_exec(code, g, cache=DictCache(cache))
self.assertEqual(len(cache), 1)
cache_exc_msg, cache_globals = cache.values()[0]
self.assertEqual("Hey there!", cache_exc_msg)
# Change it again, now no exception!
cache[cache.keys()[0]] = (None, {'a': 17})
safe_exec(code, g, cache=DictCache(cache))
self.assertEqual(g['a'], 17)
def test_unicode_submission(self):
# Check that using non-ASCII unicode does not raise an encoding error.
# Try several non-ASCII unicode characters
for code in [129, 500, 2**8 - 1, 2**16 - 1]:
code_with_unichr = unicode("# ") + unichr(code)
try:
safe_exec(code_with_unichr, {}, cache=DictCache({}))
except UnicodeEncodeError:
self.fail("Tried executing code with non-ASCII unicode: {0}".format(code))
class TestUpdateHash(unittest.TestCase):
"""Test the safe_exec.update_hash function to be sure it canonicalizes properly."""
def hash_obj(self, obj):
"""Return the md5 hash that `update_hash` makes us."""
md5er = hashlib.md5()
update_hash(md5er, obj)
return md5er.hexdigest()
def equal_but_different_dicts(self):
"""
Make two equal dicts with different key order.
Simple literals won't do it. Filling one and then shrinking it will
make them different.
"""
d1 = {k:1 for k in "abcdefghijklmnopqrstuvwxyz"}
d2 = dict(d1)
for i in xrange(10000):
d2[i] = 1
for i in xrange(10000):
del d2[i]
# Check that our dicts are equal, but with different key order.
self.assertEqual(d1, d2)
self.assertNotEqual(d1.keys(), d2.keys())
return d1, d2
def test_simple_cases(self):
h1 = self.hash_obj(1)
h10 = self.hash_obj(10)
hs1 = self.hash_obj("1")
self.assertNotEqual(h1, h10)
self.assertNotEqual(h1, hs1)
def test_list_ordering(self):
h1 = self.hash_obj({'a': [1,2,3]})
h2 = self.hash_obj({'a': [3,2,1]})
self.assertNotEqual(h1, h2)
def test_dict_ordering(self):
d1, d2 = self.equal_but_different_dicts()
h1 = self.hash_obj(d1)
h2 = self.hash_obj(d2)
self.assertEqual(h1, h2)
def test_deep_ordering(self):
d1, d2 = self.equal_but_different_dicts()
o1 = {'a':[1, 2, [d1], 3, 4]}
o2 = {'a':[1, 2, [d2], 3, 4]}
h1 = self.hash_obj(o1)
h2 = self.hash_obj(o2)
self.assertEqual(h1, h2)
class TestRealProblems(unittest.TestCase):
def test_802x(self):
code = textwrap.dedent("""\
import math
import random
import numpy
e=1.602e-19 #C
me=9.1e-31 #kg
mp=1.672e-27 #kg
eps0=8.854e-12 #SI units
mu0=4e-7*math.pi #SI units
Rd1=random.randrange(1,30,1)
Rd2=random.randrange(30,50,1)
Rd3=random.randrange(50,70,1)
Rd4=random.randrange(70,100,1)
Rd5=random.randrange(100,120,1)
Vd1=random.randrange(1,20,1)
Vd2=random.randrange(20,40,1)
Vd3=random.randrange(40,60,1)
#R=[0,10,30,50,70,100] #Ohm
#V=[0,12,24,36] # Volt
R=[0,Rd1,Rd2,Rd3,Rd4,Rd5] #Ohms
V=[0,Vd1,Vd2,Vd3] #Volts
#here the currents IL and IR are defined as in figure ps3_p3_fig2
a=numpy.array([ [ R[1]+R[4]+R[5],R[4] ],[R[4], R[2]+R[3]+R[4] ] ])
b=numpy.array([V[1]-V[2],-V[3]-V[2]])
x=numpy.linalg.solve(a,b)
IL='%.2e' % x[0]
IR='%.2e' % x[1]
ILR='%.2e' % (x[0]+x[1])
def sign(x):
return abs(x)/x
RW="Rightwards"
LW="Leftwards"
UW="Upwards"
DW="Downwards"
I1='%.2e' % abs(x[0])
I1d=LW if sign(x[0])==1 else RW
I1not=LW if I1d==RW else RW
I2='%.2e' % abs(x[1])
I2d=RW if sign(x[1])==1 else LW
I2not=LW if I2d==RW else RW
I3='%.2e' % abs(x[1])
I3d=DW if sign(x[1])==1 else UW
I3not=DW if I3d==UW else UW
I4='%.2e' % abs(x[0]+x[1])
I4d=UW if sign(x[1]+x[0])==1 else DW
I4not=DW if I4d==UW else UW
I5='%.2e' % abs(x[0])
I5d=RW if sign(x[0])==1 else LW
I5not=LW if I5d==RW else RW
VAP=-x[0]*R[1]-(x[0]+x[1])*R[4]
VPN=-V[2]
VGD=+V[1]-x[0]*R[1]+V[3]+x[1]*R[2]
aVAP='%.2e' % VAP
aVPN='%.2e' % VPN
aVGD='%.2e' % VGD
""")
g = {}
safe_exec(code, g)
self.assertIn("aVAP", g)
import fs
import fs.osfs import fs.osfs
import os import os, os.path
from capa.capa_problem import LoncapaProblem
from mock import Mock, MagicMock from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils import xml.sax.saxutils as saxutils
...@@ -22,16 +22,28 @@ def calledback_url(dispatch = 'score_update'): ...@@ -22,16 +22,28 @@ def calledback_url(dispatch = 'score_update'):
xqueue_interface = MagicMock() xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!') xqueue_interface.send_to_queue.return_value = (0, 'Success!')
test_system = Mock( def test_system():
ajax_url='courses/course_id/modx/a_location', """
track_function=Mock(), Construct a mock ModuleSystem instance.
get_module=Mock(),
render_template=tst_render_template, """
replace_urls=Mock(), the_system = Mock(
user=Mock(), ajax_url='courses/course_id/modx/a_location',
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")), track_function=Mock(),
debug=True, get_module=Mock(),
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10}, render_template=tst_render_template,
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), replace_urls=Mock(),
anonymous_student_id='student' user=Mock(),
) filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
debug=True,
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
)
return the_system
def new_loncapa_problem(xml, system=None):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=723, system=system or test_system())
...@@ -221,6 +221,8 @@ class CustomResponseXMLFactory(ResponseXMLFactory): ...@@ -221,6 +221,8 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
cfn = kwargs.get('cfn', None) cfn = kwargs.get('cfn', None)
expect = kwargs.get('expect', None) expect = kwargs.get('expect', None)
answer = kwargs.get('answer', None) answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
# Create the response element # Create the response element
response_element = etree.Element("customresponse") response_element = etree.Element("customresponse")
...@@ -235,6 +237,33 @@ class CustomResponseXMLFactory(ResponseXMLFactory): ...@@ -235,6 +237,33 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
answer_element = etree.SubElement(response_element, "answer") answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer) answer_element.text = str(answer)
if options:
response_element.set('options', str(options))
if cfn_extra_args:
response_element.set('cfn_extra_args', str(cfn_extra_args))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <symbolicresponse> XML trees """
def create_response_element(self, **kwargs):
cfn = kwargs.get('cfn', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
response_element = etree.Element("symbolicresponse")
if cfn:
response_element.set('cfn', str(cfn))
if answer:
response_element.set('answer', str(answer))
if options:
response_element.set('options', str(options))
return response_element return response_element
def create_input_element(self, **kwargs): def create_input_element(self, **kwargs):
...@@ -638,12 +667,16 @@ class StringResponseXMLFactory(ResponseXMLFactory): ...@@ -638,12 +667,16 @@ class StringResponseXMLFactory(ResponseXMLFactory):
Where *hint_prompt* is the string for which we show the hint, Where *hint_prompt* is the string for which we show the hint,
*hint_name* is an internal identifier for the hint, *hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint. and *hint_text* is the text we show for the hint.
*hintfn*: The name of a function in the script to use for hints.
""" """
# Retrieve the **kwargs # Retrieve the **kwargs
answer = kwargs.get("answer", None) answer = kwargs.get("answer", None)
case_sensitive = kwargs.get("case_sensitive", True) case_sensitive = kwargs.get("case_sensitive", True)
hint_list = kwargs.get('hints', None) hint_list = kwargs.get('hints', None)
assert(answer) hint_fn = kwargs.get('hintfn', None)
assert answer
# Create the <stringresponse> element # Create the <stringresponse> element
response_element = etree.Element("stringresponse") response_element = etree.Element("stringresponse")
...@@ -655,18 +688,24 @@ class StringResponseXMLFactory(ResponseXMLFactory): ...@@ -655,18 +688,24 @@ class StringResponseXMLFactory(ResponseXMLFactory):
response_element.set("type", "cs" if case_sensitive else "ci") response_element.set("type", "cs" if case_sensitive else "ci")
# Add the hints if specified # Add the hints if specified
if hint_list: if hint_list or hint_fn:
hintgroup_element = etree.SubElement(response_element, "hintgroup") hintgroup_element = etree.SubElement(response_element, "hintgroup")
for (hint_prompt, hint_name, hint_text) in hint_list: if hint_list:
stringhint_element = etree.SubElement(hintgroup_element, "stringhint") assert not hint_fn
stringhint_element.set("answer", str(hint_prompt)) for (hint_prompt, hint_name, hint_text) in hint_list:
stringhint_element.set("name", str(hint_name)) stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
stringhint_element.set("answer", str(hint_prompt))
stringhint_element.set("name", str(hint_name))
hintpart_element = etree.SubElement(hintgroup_element, "hintpart") hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name)) hintpart_element.set("on", str(hint_name))
hint_text_element = etree.SubElement(hintpart_element, "text")
hint_text_element.text = str(hint_text)
hint_text_element = etree.SubElement(hintpart_element, "text") if hint_fn:
hint_text_element.text = str(hint_text) assert not hint_list
hintgroup_element.set("hintfn", hint_fn)
return response_element return response_element
...@@ -705,3 +744,38 @@ class AnnotationResponseXMLFactory(ResponseXMLFactory): ...@@ -705,3 +744,38 @@ class AnnotationResponseXMLFactory(ResponseXMLFactory):
option_element.text = description option_element.text = description
return input_element return input_element
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <symbolicresponse> xml """
def create_response_element(self, **kwargs):
""" Build the <symbolicresponse> XML element.
Uses **kwargs:
*expect*: The correct answer (a sympy string)
*options*: list of option strings to pass to symmath_check
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
# Retrieve **kwargs
expect = kwargs.get('expect', '')
options = kwargs.get('options', [])
# Symmath check expects a string of options
options_str = ",".join(options)
# Construct the <symbolicresponse> element
response_element = etree.Element('symbolicresponse')
if expect:
response_element.set('expect', str(expect))
if options_str:
response_element.set('options', str(options_str))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
...@@ -26,7 +26,7 @@ class HelperTest(unittest.TestCase): ...@@ -26,7 +26,7 @@ class HelperTest(unittest.TestCase):
Make sure that our helper function works! Make sure that our helper function works!
''' '''
def check(self, d): def check(self, d):
xml = etree.XML(test_system.render_template('blah', d)) xml = etree.XML(test_system().render_template('blah', d))
self.assertEqual(d, extract_context(xml)) self.assertEqual(d, extract_context(xml))
def test_extract_context(self): def test_extract_context(self):
...@@ -46,11 +46,11 @@ class SolutionRenderTest(unittest.TestCase): ...@@ -46,11 +46,11 @@ class SolutionRenderTest(unittest.TestCase):
xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution) xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution)
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
renderer = lookup_tag('solution')(test_system, element) renderer = lookup_tag('solution')(test_system(), element)
self.assertEqual(renderer.id, 'solution_12') self.assertEqual(renderer.id, 'solution_12')
# our test_system "renders" templates to a div with the repr of the context # Our test_system "renders" templates to a div with the repr of the context.
xml = renderer.get_html() xml = renderer.get_html()
context = extract_context(xml) context = extract_context(xml)
self.assertEqual(context, {'id': 'solution_12'}) self.assertEqual(context, {'id': 'solution_12'})
...@@ -65,7 +65,7 @@ class MathRenderTest(unittest.TestCase): ...@@ -65,7 +65,7 @@ class MathRenderTest(unittest.TestCase):
xml_str = """<math>{tex}</math>""".format(tex=latex_in) xml_str = """<math>{tex}</math>""".format(tex=latex_in)
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
renderer = lookup_tag('math')(test_system, element) renderer = lookup_tag('math')(test_system(), element)
self.assertEqual(renderer.mathstr, mathjax_out) self.assertEqual(renderer.mathstr, mathjax_out)
......
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta content="application/xhtml+xml; charset=UTF-8" http-equiv="Content-Type" />
<meta content="SnuggleTeX" name="Generator" />
<meta content="SnuggleTeX Documentation" name="description" />
<meta content="David McKain" name="author" />
<meta content="The University of Edinburgh" name="publisher" />
<link href="/snuggletex-webapp-1.2.2/includes/core.css" rel="stylesheet" />
<link href="/snuggletex-webapp-1.2.2/includes/webapp.css" rel="stylesheet" />
<link href="/snuggletex-webapp-1.2.2/includes/snuggletex.css" rel="stylesheet" />
<link href="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.css"
rel="stylesheet" /><script src="/snuggletex-webapp-1.2.2/includes/jquery.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.js"
type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/webapp.js" type="text/javascript"></script><title>SnuggleTeX - ASCIIMathML Enrichment Demo</title><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathML.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathMLwidget.js"
type="text/javascript"></script></head>
<body id="asciiMathMLUpConversionDemo">
<table border="0" cellpadding="0" cellspacing="0" id="header" width="100%">
<tr>
<td align="left" id="logo" valign="top"><a class="headertext" href="http://www.ed.ac.uk"><img alt="The University of Edinburgh" height="84"
src="/snuggletex-webapp-1.2.2/includes/uoe_logo.jpg"
width="84" /></a></td>
<td align="left">
<h3>THE UNIVERSITY of EDINBURGH</h3>
<h1>SCHOOL OF PHYSICS AND ASTRONOMY</h1>
</td>
</tr>
</table>
<h1 id="location"><a href="/snuggletex-webapp-1.2.2">SnuggleTeX (1.2.2)</a></h1>
<div id="content">
<div id="skipnavigation"><a href="#maincontent">Skip Navigation</a></div>
<div id="navigation">
<div id="navinner">
<h2>About SnuggleTeX</h2>
<ul>
<li><a href="/snuggletex-webapp-1.2.2/documentation/overview-and-features.html">Overview &amp; Features</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/use-cases.html">Why Use SnuggleTeX?</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/license.html">License</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a></li>
</ul>
<h2>Demos &amp; Samples</h2>
<ul>
<li><a href="/snuggletex-webapp-1.2.2/MathInputDemo">Simple Math Input Demo</a></li>
<li><a href="/snuggletex-webapp-1.2.2/FullLaTeXInputDemo">Full LaTeX Input Demo</a></li>
<li><a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichment Demo</a></li>
<li><a class="selected" href="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo">ASCIIMathML Enrichment Demo</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output-samples.html">Web Output Samples</a></li>
</ul>
<h2>User Guide</h2>
<ul>
<li><a href="/snuggletex-webapp-1.2.2/documentation/getting-snuggletex.html">Getting SnuggleTeX</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/software-requirements.html">Software Requirements</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/your-classpath.html">Setting up Your ClassPath</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/examples.html">Examples</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/basic-usage.html">Basic Usage</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/inputs.html">Parsing LaTeX Inputs</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/xml-or-dom-output.html">Creating XML String or DOM Outputs</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output.html">Creating Web Pages</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/error-reporting.html">Error Reporting</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/supported-latex.html">Supported LaTeX</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/advanced-usage.html">Advanced Usage</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">Semantic Enrichment</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/migrating-from-older-versions.html">Migrating from older versions</a></li>
<li><a href="http://snuggletex.sourceforge.net/maven/apidocs/index.html">API Documentation<span class="extlink"> </span></a></li>
<li><a href="http://snuggletex.sourceforge.net/maven/xref/index.html">Source Code Cross-Reference<span class="extlink"> </span></a></li>
</ul>
<h2>SnuggleTeX Project Links</h2>
<ul>
<li><a href="http://sourceforge.net/project/showfiles.php?group_id=221375">Download from SourceForge.net<span class="extlink"> </span></a></li>
<li><a href="http://sourceforge.net/projects/snuggletex/">SnuggleTeX on SourceForge.net<span class="extlink"> </span></a></li>
<li><a href="http://snuggletex.sourceforge.net/maven/">SnuggleTeX Maven Developer Reports<span class="extlink"> </span></a></li>
<li><a href="https://www.wiki.ed.ac.uk/display/Physics/SnuggleTeX">SnuggleTeX Wiki<span class="extlink"> </span></a></li>
</ul>
</div>
</div>
<div id="maincontent">
<div id="popup"></div>
<div id="maininner">
<h2>ASCIIMathML Enrichment Demo</h2>
<h3>Input</h3>
<p>
This demo is similar to the
<a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichnment Demo</a>
but uses
<a href="http://www1.chapman.edu/~jipsen/asciimath.html">ASCIIMathML</a> as
an alternative input format, which provides real-time feedback as you
type but can often generate MathML with odd semantics in it.
SnuggleTeX includes some functionality that can to convert this raw MathML into
something equivalent to its own MathML output, thereby allowing you to
<a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">semantically enrich</a> it in
certain simple cases, making ASCIIMathML a possibly viable input format
for simple semantic maths.
</p>
<p>
To try the demo, simply enter some some ASCIIMathML into the box below.
You should see a real time preview of this while you type.
Then hit <tt>Go!</tt> to use SnuggleTeX to semantically enrich your
input.
</p>
<form action="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo" class="input"
method="post">
<div class="inputBox">
ASCIIMath Input:
<input id="asciiMathInput" name="asciiMathInput" type="text" value="" /><input id="asciiMathML" name="asciiMathML" type="hidden" /><input type="submit" value="Go!" /></div>
</form>
<h3>Live Preview</h3>
<p>
This is a MathML rendering of your input, generated by ASCIIMathML as you type.
</p>
<div class="result">
<div id="preview"> </div>
</div>
<p>
This is the underlying MathML source generated by ASCIIMathML, again updated in real time.
</p>
<div class="result"><pre id="previewSource"> </pre></div><script type="text/javascript">
registerASCIIMathMLInputWidget('asciiMathInput', 'preview', 'asciiMathML', 'previewSource');
var inputChanged = false;
// Hide any existing output stuff in page on first change, as it will no longer be in sync
jQuery(document).ready(function() {
jQuery('#asciiMathInput').bind('keydown', function() {
if (!inputChanged) jQuery('.outputContainer').css('visibility', 'hidden');
inputChanged = true;
});
});
</script><div class="outputContainer">
<h3>Enhanced Presentation MathML</h3>
<p>
This shows the result of attempting to enrich the raw Presentation MathML
generated by ASCIIMathML:
</p><pre class="result">&lt;math xmlns="http://www.w3.org/1998/Math/MathML"&gt;
&lt;mrow&gt;
&lt;mrow&gt;
&lt;mrow&gt;
&lt;mi&gt;cos&lt;/mi&gt;
&lt;mo&gt;&amp;ApplyFunction;&lt;/mo&gt;
&lt;mfenced close=")" open="("&gt;
&lt;mi&gt;theta&lt;/mi&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;mo&gt;&amp;sdot;&lt;/mo&gt;
&lt;mfenced close="]" open="["&gt;
&lt;mtable&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;/mtable&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;mo&gt;+&lt;/mo&gt;
&lt;mrow&gt;
&lt;mi&gt;i&lt;/mi&gt;
&lt;mo&gt;&amp;sdot;&lt;/mo&gt;
&lt;mrow&gt;
&lt;mi&gt;sin&lt;/mi&gt;
&lt;mo&gt;&amp;ApplyFunction;&lt;/mo&gt;
&lt;mfenced close=")" open="("&gt;
&lt;mi&gt;theta&lt;/mi&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;mo&gt;&amp;sdot;&lt;/mo&gt;
&lt;mfenced close="]" open="["&gt;
&lt;mtable&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;/mtable&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;/mrow&gt;
&lt;/math&gt;</pre><h3>Content MathML</h3>
<p>
This shows the result of an attempted
<a href="documentation/content-mathml.html">conversion to Content MathML</a>:
</p><pre class="result">&lt;math xmlns="http://www.w3.org/1998/Math/MathML"&gt;
&lt;apply&gt;
&lt;plus/&gt;
&lt;apply&gt;
&lt;times/&gt;
&lt;apply&gt;
&lt;cos/&gt;
&lt;ci&gt;theta&lt;/ci&gt;
&lt;/apply&gt;
&lt;list&gt;
&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;
&lt;/list&gt;
&lt;/apply&gt;
&lt;apply&gt;
&lt;times/&gt;
&lt;ci&gt;i&lt;/ci&gt;
&lt;apply&gt;
&lt;sin/&gt;
&lt;ci&gt;theta&lt;/ci&gt;
&lt;/apply&gt;
&lt;list&gt;
&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;
&lt;/list&gt;
&lt;/apply&gt;
&lt;/apply&gt;
&lt;/math&gt;</pre><h3>Maxima Input Form</h3>
<p>
This shows the result of an attempted
<a href="documentation/maxima-input.html">conversion to Maxima Input syntax</a>:
</p>
<p>
The conversion from Content MathML to Maxima Input was not successful for
this input.
</p>
<table class="failures">
<thead>
<tr>
<th>Failure Code</th>
<th>Message</th>
<th>XPath</th>
<th>Context</th>
</tr>
</thead>
<tbody>
<tr>
<td><a href="/snuggletex-webapp-1.2.2/documentation/error-codes.html#UMFG00">UMFG00</a></td>
<td>Content MathML element matrix not supported</td>
<td>apply[1]/apply[1]/list[1]/matrix[1]</td>
<td><pre>&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;</pre></td>
</tr>
<tr>
<td><a href="/snuggletex-webapp-1.2.2/documentation/error-codes.html#UMFG00">UMFG00</a></td>
<td>Content MathML element matrix not supported</td>
<td>apply[1]/apply[2]/list[1]/matrix[1]</td>
<td><pre>&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;</pre></td>
</tr>
</tbody>
</table>
<h3>MathML Parallel Markup</h3>
<p>
This shows the enhanced Presentation MathML with other forms encapsulated
as annotations:
</p><pre class="result">&lt;math xmlns="http://www.w3.org/1998/Math/MathML"&gt;
&lt;semantics&gt;
&lt;mrow&gt;
&lt;mrow&gt;
&lt;mrow&gt;
&lt;mi&gt;cos&lt;/mi&gt;
&lt;mo&gt;&amp;ApplyFunction;&lt;/mo&gt;
&lt;mfenced close=")" open="("&gt;
&lt;mi&gt;theta&lt;/mi&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;mo&gt;&amp;sdot;&lt;/mo&gt;
&lt;mfenced close="]" open="["&gt;
&lt;mtable&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;/mtable&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;mo&gt;+&lt;/mo&gt;
&lt;mrow&gt;
&lt;mi&gt;i&lt;/mi&gt;
&lt;mo&gt;&amp;sdot;&lt;/mo&gt;
&lt;mrow&gt;
&lt;mi&gt;sin&lt;/mi&gt;
&lt;mo&gt;&amp;ApplyFunction;&lt;/mo&gt;
&lt;mfenced close=")" open="("&gt;
&lt;mi&gt;theta&lt;/mi&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;mo&gt;&amp;sdot;&lt;/mo&gt;
&lt;mfenced close="]" open="["&gt;
&lt;mtable&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;mtr&gt;
&lt;mtd&gt;
&lt;mn&gt;1&lt;/mn&gt;
&lt;/mtd&gt;
&lt;mtd&gt;
&lt;mn&gt;0&lt;/mn&gt;
&lt;/mtd&gt;
&lt;/mtr&gt;
&lt;/mtable&gt;
&lt;/mfenced&gt;
&lt;/mrow&gt;
&lt;/mrow&gt;
&lt;annotation-xml encoding="MathML-Content"&gt;
&lt;apply&gt;
&lt;plus/&gt;
&lt;apply&gt;
&lt;times/&gt;
&lt;apply&gt;
&lt;cos/&gt;
&lt;ci&gt;theta&lt;/ci&gt;
&lt;/apply&gt;
&lt;list&gt;
&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;
&lt;/list&gt;
&lt;/apply&gt;
&lt;apply&gt;
&lt;times/&gt;
&lt;ci&gt;i&lt;/ci&gt;
&lt;apply&gt;
&lt;sin/&gt;
&lt;ci&gt;theta&lt;/ci&gt;
&lt;/apply&gt;
&lt;list&gt;
&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;
&lt;/list&gt;
&lt;/apply&gt;
&lt;/apply&gt;
&lt;/annotation-xml&gt;
&lt;annotation encoding="ASCIIMathInput"/&gt;
&lt;annotation-xml encoding="Maxima-upconversion-failures"&gt;
&lt;s:fail xmlns:s="http://www.ph.ed.ac.uk/snuggletex" code="UMFG00"
message="Content MathML element matrix not supported"&gt;
&lt;s:arg&gt;matrix&lt;/s:arg&gt;
&lt;s:xpath&gt;apply[1]/apply[1]/list[1]/matrix[1]&lt;/s:xpath&gt;
&lt;s:context&gt;
&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;
&lt;/s:context&gt;
&lt;/s:fail&gt;
&lt;s:fail xmlns:s="http://www.ph.ed.ac.uk/snuggletex" code="UMFG00"
message="Content MathML element matrix not supported"&gt;
&lt;s:arg&gt;matrix&lt;/s:arg&gt;
&lt;s:xpath&gt;apply[1]/apply[2]/list[1]/matrix[1]&lt;/s:xpath&gt;
&lt;s:context&gt;
&lt;matrix&gt;
&lt;vector&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;/vector&gt;
&lt;vector&gt;
&lt;cn&gt;1&lt;/cn&gt;
&lt;cn&gt;0&lt;/cn&gt;
&lt;/vector&gt;
&lt;/matrix&gt;
&lt;/s:context&gt;
&lt;/s:fail&gt;
&lt;/annotation-xml&gt;
&lt;/semantics&gt;
&lt;/math&gt;</pre></div>
</div>
</div>
</div>
<div id="copyright">
<p>
SnuggleTeX Release 1.2.2 —
<a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a><br />
Copyright © 2009
<a href="http://www.ph.ed.ac.uk">The School of Physics and Astronomy</a>,
<a href="http://www.ed.ac.uk">The University of Edinburgh</a>.
<br />
For more information, contact
<a href="http://www.ph.ed.ac.uk/elearning/contacts/#dmckain">David McKain</a>.
</p>
<p>
The University of Edinburgh is a charitable body, registered in Scotland,
with registration number SC005336.
</p>
</div>
</body>
</html>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta content="application/xhtml+xml; charset=UTF-8" http-equiv="Content-Type" />
<meta content="SnuggleTeX" name="Generator" />
<meta content="SnuggleTeX Documentation" name="description" />
<meta content="David McKain" name="author" />
<meta content="The University of Edinburgh" name="publisher" />
<link href="/snuggletex-webapp-1.2.2/includes/core.css" rel="stylesheet" />
<link href="/snuggletex-webapp-1.2.2/includes/webapp.css" rel="stylesheet" />
<link href="/snuggletex-webapp-1.2.2/includes/snuggletex.css" rel="stylesheet" />
<link href="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.css"
rel="stylesheet" /><script src="/snuggletex-webapp-1.2.2/includes/jquery.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.js"
type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/webapp.js" type="text/javascript"></script><title>SnuggleTeX - ASCIIMathML Enrichment Demo</title><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathML.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathMLwidget.js"
type="text/javascript"></script></head>
<body id="asciiMathMLUpConversionDemo">
<table border="0" cellpadding="0" cellspacing="0" id="header" width="100%">
<tr>
<td align="left" id="logo" valign="top"><a class="headertext" href="http://www.ed.ac.uk"><img alt="The University of Edinburgh" height="84"
src="/snuggletex-webapp-1.2.2/includes/uoe_logo.jpg"
width="84" /></a></td>
<td align="left">
<h3>THE UNIVERSITY of EDINBURGH</h3>
<h1>SCHOOL OF PHYSICS AND ASTRONOMY</h1>
</td>
</tr>
</table>
<h1 id="location"><a href="/snuggletex-webapp-1.2.2">SnuggleTeX (1.2.2)</a></h1>
<div id="content">
<div id="skipnavigation"><a href="#maincontent">Skip Navigation</a></div>
<div id="navigation">
<div id="navinner">
<h2>About SnuggleTeX</h2>
<ul>
<li><a href="/snuggletex-webapp-1.2.2/documentation/overview-and-features.html">Overview &amp; Features</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/use-cases.html">Why Use SnuggleTeX?</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/license.html">License</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a></li>
</ul>
<h2>Demos &amp; Samples</h2>
<ul>
<li><a href="/snuggletex-webapp-1.2.2/MathInputDemo">Simple Math Input Demo</a></li>
<li><a href="/snuggletex-webapp-1.2.2/FullLaTeXInputDemo">Full LaTeX Input Demo</a></li>
<li><a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichment Demo</a></li>
<li><a class="selected" href="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo">ASCIIMathML Enrichment Demo</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output-samples.html">Web Output Samples</a></li>
</ul>
<h2>User Guide</h2>
<ul>
<li><a href="/snuggletex-webapp-1.2.2/documentation/getting-snuggletex.html">Getting SnuggleTeX</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/software-requirements.html">Software Requirements</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/your-classpath.html">Setting up Your ClassPath</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/examples.html">Examples</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/basic-usage.html">Basic Usage</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/inputs.html">Parsing LaTeX Inputs</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/xml-or-dom-output.html">Creating XML String or DOM Outputs</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output.html">Creating Web Pages</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/error-reporting.html">Error Reporting</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/supported-latex.html">Supported LaTeX</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/advanced-usage.html">Advanced Usage</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">Semantic Enrichment</a></li>
<li><a href="/snuggletex-webapp-1.2.2/documentation/migrating-from-older-versions.html">Migrating from older versions</a></li>
<li><a href="http://snuggletex.sourceforge.net/maven/apidocs/index.html">API Documentation<span class="extlink"> </span></a></li>
<li><a href="http://snuggletex.sourceforge.net/maven/xref/index.html">Source Code Cross-Reference<span class="extlink"> </span></a></li>
</ul>
<h2>SnuggleTeX Project Links</h2>
<ul>
<li><a href="http://sourceforge.net/project/showfiles.php?group_id=221375">Download from SourceForge.net<span class="extlink"> </span></a></li>
<li><a href="http://sourceforge.net/projects/snuggletex/">SnuggleTeX on SourceForge.net<span class="extlink"> </span></a></li>
<li><a href="http://snuggletex.sourceforge.net/maven/">SnuggleTeX Maven Developer Reports<span class="extlink"> </span></a></li>
<li><a href="https://www.wiki.ed.ac.uk/display/Physics/SnuggleTeX">SnuggleTeX Wiki<span class="extlink"> </span></a></li>
</ul>
</div>
</div>
<div id="maincontent">
<div id="popup"></div>
<div id="maininner">
<h2>ASCIIMathML Enrichment Demo</h2>
<h3>Input</h3>
<p>
This demo is similar to the
<a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichnment Demo</a>
but uses
<a href="http://www1.chapman.edu/~jipsen/asciimath.html">ASCIIMathML</a> as
an alternative input format, which provides real-time feedback as you
type but can often generate MathML with odd semantics in it.
SnuggleTeX includes some functionality that can to convert this raw MathML into
something equivalent to its own MathML output, thereby allowing you to
<a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">semantically enrich</a> it in
certain simple cases, making ASCIIMathML a possibly viable input format
for simple semantic maths.
</p>
<p>
To try the demo, simply enter some some ASCIIMathML into the box below.
You should see a real time preview of this while you type.
Then hit <tt>Go!</tt> to use SnuggleTeX to semantically enrich your
input.
</p>
<form action="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo" class="input"
method="post">
<div class="inputBox">
ASCIIMath Input:
<input id="asciiMathInput" name="asciiMathInput" type="text" value="" /><input id="asciiMathML" name="asciiMathML" type="hidden" /><input type="submit" value="Go!" /></div>
</form>
<h3>Live Preview</h3>
<p>
This is a MathML rendering of your input, generated by ASCIIMathML as you type.
</p>
<div class="result">
<div id="preview"> </div>
</div>
<p>
This is the underlying MathML source generated by ASCIIMathML, again updated in real time.
</p>
<div class="result"><pre id="previewSource"> </pre></div><script type="text/javascript">
registerASCIIMathMLInputWidget('asciiMathInput', 'preview', 'asciiMathML', 'previewSource');
var inputChanged = false;
// Hide any existing output stuff in page on first change, as it will no longer be in sync
jQuery(document).ready(function() {
jQuery('#asciiMathInput').bind('keydown', function() {
if (!inputChanged) jQuery('.outputContainer').css('visibility', 'hidden');
inputChanged = true;
});
});
</script><div class="outputContainer">
<h3>Enhanced Presentation MathML</h3>
<p>
This shows the result of attempting to enrich the raw Presentation MathML
generated by ASCIIMathML:
</p><pre class="result">&lt;math xmlns="http://www.w3.org/1998/Math/MathML"&gt;
&lt;mn&gt;2&lt;/mn&gt;
&lt;/math&gt;</pre><h3>Content MathML</h3>
<p>
This shows the result of an attempted
<a href="documentation/content-mathml.html">conversion to Content MathML</a>:
</p><pre class="result">&lt;math xmlns="http://www.w3.org/1998/Math/MathML"&gt;
&lt;cn&gt;2&lt;/cn&gt;
&lt;/math&gt;</pre><h3>Maxima Input Form</h3>
<p>
This shows the result of an attempted
<a href="documentation/maxima-input.html">conversion to Maxima Input syntax</a>:
</p><pre class="result">2</pre><h3>MathML Parallel Markup</h3>
<p>
This shows the enhanced Presentation MathML with other forms encapsulated
as annotations:
</p><pre class="result">&lt;math xmlns="http://www.w3.org/1998/Math/MathML"&gt;
&lt;semantics&gt;
&lt;mn&gt;2&lt;/mn&gt;
&lt;annotation-xml encoding="MathML-Content"&gt;
&lt;cn&gt;2&lt;/cn&gt;
&lt;/annotation-xml&gt;
&lt;annotation encoding="ASCIIMathInput"/&gt;
&lt;annotation encoding="Maxima"&gt;2&lt;/annotation&gt;
&lt;/semantics&gt;
&lt;/math&gt;</pre></div>
</div>
</div>
</div>
<div id="copyright">
<p>
SnuggleTeX Release 1.2.2 —
<a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a><br />
Copyright © 2009
<a href="http://www.ph.ed.ac.uk">The School of Physics and Astronomy</a>,
<a href="http://www.ed.ac.uk">The University of Edinburgh</a>.
<br />
For more information, contact
<a href="http://www.ph.ed.ac.uk/elearning/contacts/#dmckain">David McKain</a>.
</p>
<p>
The University of Edinburgh is a charitable body, registered in Scotland,
with registration number SC005336.
</p>
</div>
</body>
</html>
\ No newline at end of file
...@@ -6,12 +6,15 @@ import json ...@@ -6,12 +6,15 @@ import json
import mock import mock
from capa.capa_problem import LoncapaProblem
from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFactory from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFactory
from . import test_system from . import test_system, new_loncapa_problem
class CapaHtmlRenderTest(unittest.TestCase): class CapaHtmlRenderTest(unittest.TestCase):
def setUp(self):
super(CapaHtmlRenderTest, self).setUp()
self.system = test_system()
def test_blank_problem(self): def test_blank_problem(self):
""" """
It's important that blank problems don't break, since that's It's important that blank problems don't break, since that's
...@@ -20,7 +23,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -20,7 +23,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
xml_str = "<problem> </problem>" xml_str = "<problem> </problem>"
# Create the problem # Create the problem
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str)
# Render the HTML # Render the HTML
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
...@@ -39,7 +42,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -39,7 +42,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
""") """)
# Create the problem # Create the problem
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str, system=self.system)
# Render the HTML # Render the HTML
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
...@@ -49,9 +52,6 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -49,9 +52,6 @@ class CapaHtmlRenderTest(unittest.TestCase):
self.assertEqual(test_element.tag, "test") self.assertEqual(test_element.tag, "test")
self.assertEqual(test_element.text, "Test include") self.assertEqual(test_element.text, "Test include")
def test_process_outtext(self): def test_process_outtext(self):
# Generate some XML with <startouttext /> and <endouttext /> # Generate some XML with <startouttext /> and <endouttext />
xml_str = textwrap.dedent(""" xml_str = textwrap.dedent("""
...@@ -61,7 +61,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -61,7 +61,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
""") """)
# Create the problem # Create the problem
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str)
# Render the HTML # Render the HTML
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
...@@ -80,7 +80,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -80,7 +80,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
""") """)
# Create the problem # Create the problem
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str)
# Render the HTML # Render the HTML
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
...@@ -98,7 +98,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -98,7 +98,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
""") """)
# Create the problem # Create the problem
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str)
# Render the HTML # Render the HTML
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
...@@ -117,11 +117,12 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -117,11 +117,12 @@ class CapaHtmlRenderTest(unittest.TestCase):
xml_str = StringResponseXMLFactory().build_xml(**kwargs) xml_str = StringResponseXMLFactory().build_xml(**kwargs)
# Mock out the template renderer # Mock out the template renderer
test_system.render_template = mock.Mock() the_system = test_system()
test_system.render_template.return_value = "<div>Input Template Render</div>" the_system.render_template = mock.Mock()
the_system.render_template.return_value = "<div>Input Template Render</div>"
# Create the problem and render the HTML # Create the problem and render the HTML
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str, system=the_system)
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
# Expect problem has been turned into a <div> # Expect problem has been turned into a <div>
...@@ -166,7 +167,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -166,7 +167,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
mock.call('textline.html', expected_textline_context), mock.call('textline.html', expected_textline_context),
mock.call('solutionspan.html', expected_solution_context)] mock.call('solutionspan.html', expected_solution_context)]
self.assertEqual(test_system.render_template.call_args_list, self.assertEqual(the_system.render_template.call_args_list,
expected_calls) expected_calls)
...@@ -184,7 +185,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -184,7 +185,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
xml_str = CustomResponseXMLFactory().build_xml(**kwargs) xml_str = CustomResponseXMLFactory().build_xml(**kwargs)
# Create the problem and render the html # Create the problem and render the html
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str)
# Grade the problem # Grade the problem
correctmap = problem.grade_answers({'1_2_1': 'test'}) correctmap = problem.grade_answers({'1_2_1': 'test'})
...@@ -219,7 +220,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -219,7 +220,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
""") """)
# Create the problem and render the HTML # Create the problem and render the HTML
problem = LoncapaProblem(xml_str, '1', system=test_system) problem = new_loncapa_problem(xml_str)
rendered_html = etree.XML(problem.get_html()) rendered_html = etree.XML(problem.get_html())
# Expect that the variable $test has been replaced with its value # Expect that the variable $test has been replaced with its value
...@@ -227,7 +228,7 @@ class CapaHtmlRenderTest(unittest.TestCase): ...@@ -227,7 +228,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
self.assertEqual(span_element.get('attr'), "TEST") self.assertEqual(span_element.get('attr'), "TEST")
def _create_test_file(self, path, content_str): def _create_test_file(self, path, content_str):
test_fp = test_system.filestore.open(path, "w") test_fp = self.system.filestore.open(path, "w")
test_fp.write(content_str) test_fp.write(content_str)
test_fp.close() test_fp.close()
......
...@@ -45,7 +45,7 @@ class OptionInputTest(unittest.TestCase): ...@@ -45,7 +45,7 @@ class OptionInputTest(unittest.TestCase):
state = {'value': 'Down', state = {'value': 'Down',
'id': 'sky_input', 'id': 'sky_input',
'status': 'answered'} 'status': 'answered'}
option_input = lookup_tag('optioninput')(test_system, element, state) option_input = lookup_tag('optioninput')(test_system(), element, state)
context = option_input._get_render_context() context = option_input._get_render_context()
...@@ -92,7 +92,7 @@ class ChoiceGroupTest(unittest.TestCase): ...@@ -92,7 +92,7 @@ class ChoiceGroupTest(unittest.TestCase):
'id': 'sky_input', 'id': 'sky_input',
'status': 'answered'} 'status': 'answered'}
the_input = lookup_tag(tag)(test_system, element, state) the_input = lookup_tag(tag)(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -142,7 +142,7 @@ class JavascriptInputTest(unittest.TestCase): ...@@ -142,7 +142,7 @@ class JavascriptInputTest(unittest.TestCase):
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
state = {'value': '3', } state = {'value': '3', }
the_input = lookup_tag('javascriptinput')(test_system, element, state) the_input = lookup_tag('javascriptinput')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -170,7 +170,7 @@ class TextLineTest(unittest.TestCase): ...@@ -170,7 +170,7 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', } state = {'value': 'BumbleBee', }
the_input = lookup_tag('textline')(test_system, element, state) the_input = lookup_tag('textline')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -198,7 +198,7 @@ class TextLineTest(unittest.TestCase): ...@@ -198,7 +198,7 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', } state = {'value': 'BumbleBee', }
the_input = lookup_tag('textline')(test_system, element, state) the_input = lookup_tag('textline')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -236,7 +236,7 @@ class TextLineTest(unittest.TestCase): ...@@ -236,7 +236,7 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', } state = {'value': 'BumbleBee', }
the_input = lookup_tag('textline')(test_system, element, state) the_input = lookup_tag('textline')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -274,7 +274,7 @@ class FileSubmissionTest(unittest.TestCase): ...@@ -274,7 +274,7 @@ class FileSubmissionTest(unittest.TestCase):
'status': 'incomplete', 'status': 'incomplete',
'feedback': {'message': '3'}, } 'feedback': {'message': '3'}, }
input_class = lookup_tag('filesubmission') input_class = lookup_tag('filesubmission')
the_input = input_class(test_system, element, state) the_input = input_class(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -319,7 +319,7 @@ class CodeInputTest(unittest.TestCase): ...@@ -319,7 +319,7 @@ class CodeInputTest(unittest.TestCase):
'feedback': {'message': '3'}, } 'feedback': {'message': '3'}, }
input_class = lookup_tag('codeinput') input_class = lookup_tag('codeinput')
the_input = input_class(test_system, element, state) the_input = input_class(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -368,7 +368,7 @@ class MatlabTest(unittest.TestCase): ...@@ -368,7 +368,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, } 'feedback': {'message': '3'}, }
self.input_class = lookup_tag('matlabinput') self.input_class = lookup_tag('matlabinput')
self.the_input = self.input_class(test_system, elt, state) self.the_input = self.input_class(test_system(), elt, state)
def test_rendering(self): def test_rendering(self):
context = self.the_input._get_render_context() context = self.the_input._get_render_context()
...@@ -396,7 +396,7 @@ class MatlabTest(unittest.TestCase): ...@@ -396,7 +396,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, } 'feedback': {'message': '3'}, }
elt = etree.fromstring(self.xml) elt = etree.fromstring(self.xml)
the_input = self.input_class(test_system, elt, state) the_input = self.input_class(test_system(), elt, state)
context = the_input._get_render_context() context = the_input._get_render_context()
expected = {'id': 'prob_1_2', expected = {'id': 'prob_1_2',
...@@ -423,7 +423,7 @@ class MatlabTest(unittest.TestCase): ...@@ -423,7 +423,7 @@ class MatlabTest(unittest.TestCase):
} }
elt = etree.fromstring(self.xml) elt = etree.fromstring(self.xml)
the_input = self.input_class(test_system, elt, state) the_input = self.input_class(test_system(), elt, state)
context = the_input._get_render_context() context = the_input._get_render_context()
expected = {'id': 'prob_1_2', expected = {'id': 'prob_1_2',
'value': 'print "good evening"', 'value': 'print "good evening"',
...@@ -448,7 +448,7 @@ class MatlabTest(unittest.TestCase): ...@@ -448,7 +448,7 @@ class MatlabTest(unittest.TestCase):
} }
elt = etree.fromstring(self.xml) elt = etree.fromstring(self.xml)
the_input = self.input_class(test_system, elt, state) the_input = self.input_class(test_system(), elt, state)
context = the_input._get_render_context() context = the_input._get_render_context()
expected = {'id': 'prob_1_2', expected = {'id': 'prob_1_2',
'value': 'print "good evening"', 'value': 'print "good evening"',
...@@ -470,7 +470,7 @@ class MatlabTest(unittest.TestCase): ...@@ -470,7 +470,7 @@ class MatlabTest(unittest.TestCase):
get = {'submission': 'x = 1234;'} get = {'submission': 'x = 1234;'}
response = self.the_input.handle_ajax("plot", get) response = self.the_input.handle_ajax("plot", get)
test_system.xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY) test_system().xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY)
self.assertTrue(response['success']) self.assertTrue(response['success'])
self.assertTrue(self.the_input.input_state['queuekey'] is not None) self.assertTrue(self.the_input.input_state['queuekey'] is not None)
...@@ -479,13 +479,12 @@ class MatlabTest(unittest.TestCase): ...@@ -479,13 +479,12 @@ class MatlabTest(unittest.TestCase):
def test_plot_data_failure(self): def test_plot_data_failure(self):
get = {'submission': 'x = 1234;'} get = {'submission': 'x = 1234;'}
error_message = 'Error message!' error_message = 'Error message!'
test_system.xqueue['interface'].send_to_queue.return_value = (1, error_message) test_system().xqueue['interface'].send_to_queue.return_value = (1, error_message)
response = self.the_input.handle_ajax("plot", get) response = self.the_input.handle_ajax("plot", get)
self.assertFalse(response['success']) self.assertFalse(response['success'])
self.assertEqual(response['message'], error_message) self.assertEqual(response['message'], error_message)
self.assertTrue('queuekey' not in self.the_input.input_state) self.assertTrue('queuekey' not in self.the_input.input_state)
self.assertTrue('queuestate' not in self.the_input.input_state) self.assertTrue('queuestate' not in self.the_input.input_state)
test_system.xqueue['interface'].send_to_queue.return_value = (0, 'Success!')
def test_ungraded_response_success(self): def test_ungraded_response_success(self):
queuekey = 'abcd' queuekey = 'abcd'
...@@ -496,7 +495,7 @@ class MatlabTest(unittest.TestCase): ...@@ -496,7 +495,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, } 'feedback': {'message': '3'}, }
elt = etree.fromstring(self.xml) elt = etree.fromstring(self.xml)
the_input = self.input_class(test_system, elt, state) the_input = self.input_class(test_system(), elt, state)
inner_msg = 'hello!' inner_msg = 'hello!'
queue_msg = json.dumps({'msg': inner_msg}) queue_msg = json.dumps({'msg': inner_msg})
...@@ -514,7 +513,7 @@ class MatlabTest(unittest.TestCase): ...@@ -514,7 +513,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, } 'feedback': {'message': '3'}, }
elt = etree.fromstring(self.xml) elt = etree.fromstring(self.xml)
the_input = self.input_class(test_system, elt, state) the_input = self.input_class(test_system(), elt, state)
inner_msg = 'hello!' inner_msg = 'hello!'
queue_msg = json.dumps({'msg': inner_msg}) queue_msg = json.dumps({'msg': inner_msg})
...@@ -553,7 +552,7 @@ class SchematicTest(unittest.TestCase): ...@@ -553,7 +552,7 @@ class SchematicTest(unittest.TestCase):
state = {'value': value, state = {'value': value,
'status': 'unsubmitted'} 'status': 'unsubmitted'}
the_input = lookup_tag('schematic')(test_system, element, state) the_input = lookup_tag('schematic')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -592,7 +591,7 @@ class ImageInputTest(unittest.TestCase): ...@@ -592,7 +591,7 @@ class ImageInputTest(unittest.TestCase):
state = {'value': value, state = {'value': value,
'status': 'unsubmitted'} 'status': 'unsubmitted'}
the_input = lookup_tag('imageinput')(test_system, element, state) the_input = lookup_tag('imageinput')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -643,7 +642,7 @@ class CrystallographyTest(unittest.TestCase): ...@@ -643,7 +642,7 @@ class CrystallographyTest(unittest.TestCase):
state = {'value': value, state = {'value': value,
'status': 'unsubmitted'} 'status': 'unsubmitted'}
the_input = lookup_tag('crystallography')(test_system, element, state) the_input = lookup_tag('crystallography')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -681,7 +680,7 @@ class VseprTest(unittest.TestCase): ...@@ -681,7 +680,7 @@ class VseprTest(unittest.TestCase):
state = {'value': value, state = {'value': value,
'status': 'unsubmitted'} 'status': 'unsubmitted'}
the_input = lookup_tag('vsepr_input')(test_system, element, state) the_input = lookup_tag('vsepr_input')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
...@@ -708,7 +707,7 @@ class ChemicalEquationTest(unittest.TestCase): ...@@ -708,7 +707,7 @@ class ChemicalEquationTest(unittest.TestCase):
element = etree.fromstring(xml_str) element = etree.fromstring(xml_str)
state = {'value': 'H2OYeah', } state = {'value': 'H2OYeah', }
self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state) self.the_input = lookup_tag('chemicalequationinput')(test_system(), element, state)
def test_rendering(self): def test_rendering(self):
''' Verify that the render context matches the expected render context''' ''' Verify that the render context matches the expected render context'''
...@@ -783,7 +782,7 @@ class DragAndDropTest(unittest.TestCase): ...@@ -783,7 +782,7 @@ class DragAndDropTest(unittest.TestCase):
] ]
} }
the_input = lookup_tag('drag_and_drop_input')(test_system, element, state) the_input = lookup_tag('drag_and_drop_input')(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
expected = {'id': 'prob_1_2', expected = {'id': 'prob_1_2',
...@@ -832,7 +831,7 @@ class AnnotationInputTest(unittest.TestCase): ...@@ -832,7 +831,7 @@ class AnnotationInputTest(unittest.TestCase):
tag = 'annotationinput' tag = 'annotationinput'
the_input = lookup_tag(tag)(test_system, element, state) the_input = lookup_tag(tag)(test_system(), element, state)
context = the_input._get_render_context() context = the_input._get_render_context()
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
Tests of responsetypes Tests of responsetypes
""" """
from datetime import datetime from datetime import datetime
import json import json
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
...@@ -10,10 +9,11 @@ import os ...@@ -10,10 +9,11 @@ import os
import random import random
import unittest import unittest
import textwrap import textwrap
import mock
import textwrap
from . import test_system from . import new_loncapa_problem, test_system
import capa.capa_problem as lcp
from capa.responsetypes import LoncapaProblemError, \ from capa.responsetypes import LoncapaProblemError, \
StudentInputError, ResponseError StudentInputError, ResponseError
from capa.correctmap import CorrectMap from capa.correctmap import CorrectMap
...@@ -30,9 +30,9 @@ class ResponseTest(unittest.TestCase): ...@@ -30,9 +30,9 @@ class ResponseTest(unittest.TestCase):
if self.xml_factory_class: if self.xml_factory_class:
self.xml_factory = self.xml_factory_class() self.xml_factory = self.xml_factory_class()
def build_problem(self, **kwargs): def build_problem(self, system=None, **kwargs):
xml = self.xml_factory.build_xml(**kwargs) xml = self.xml_factory.build_xml(**kwargs)
return lcp.LoncapaProblem(xml, '1', system=test_system) return new_loncapa_problem(xml, system=system)
def assert_grade(self, problem, submission, expected_correctness, msg=None): def assert_grade(self, problem, submission, expected_correctness, msg=None):
input_dict = {'1_2_1': submission} input_dict = {'1_2_1': submission}
...@@ -184,94 +184,151 @@ class ImageResponseTest(ResponseTest): ...@@ -184,94 +184,151 @@ class ImageResponseTest(ResponseTest):
self.assert_answer_format(problem) self.assert_answer_format(problem)
class SymbolicResponseTest(unittest.TestCase): class SymbolicResponseTest(ResponseTest):
def test_sr_grade(self): from response_xml_factory import SymbolicResponseXMLFactory
raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test xml_factory_class = SymbolicResponseXMLFactory
symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) def test_grade_single_input(self):
correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', problem = self.build_problem(math_display=True,
'1_2_1_dynamath': ''' expect="2*x+3*y")
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true"> # Correct answers
<mrow> correct_inputs = [
<mi>cos</mi> ('2x+3y', textwrap.dedent("""
<mrow> <math xmlns="http://www.w3.org/1998/Math/MathML">
<mo>(</mo> <mstyle displaystyle="true">
<mi>&#x3B8;</mi> <mn>2</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
<mo>)</mo> </mstyle></math>""")),
</mrow>
</mrow> ('x+x+3y', textwrap.dedent("""
<mo>&#x22C5;</mo> <math xmlns="http://www.w3.org/1998/Math/MathML">
<mrow> <mstyle displaystyle="true">
<mo>[</mo> <mi>x</mi><mo>+</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
<mtable> </mstyle></math>""")),
<mtr> ]
<mtd>
<mn>1</mn> for (input_str, input_mathml) in correct_inputs:
</mtd> self._assert_symbolic_grade(problem, input_str, input_mathml, 'correct')
<mtd>
<mn>0</mn> # Incorrect answers
</mtd> incorrect_inputs = [
</mtr> ('0', ''),
<mtr> ('4x+3y', textwrap.dedent("""
<mtd> <math xmlns="http://www.w3.org/1998/Math/MathML">
<mn>0</mn> <mstyle displaystyle="true">
</mtd> <mn>4</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
<mtd> </mstyle></math>""")),
<mn>1</mn> ]
</mtd>
</mtr> for (input_str, input_mathml) in incorrect_inputs:
</mtable> self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
<mo>]</mo>
</mrow>
<mo>+</mo> def test_complex_number_grade(self):
<mi>i</mi> problem = self.build_problem(math_display=True,
<mo>&#x22C5;</mo> expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
<mrow> options=["matrix", "imaginary"])
<mi>sin</mi>
<mrow> # For LaTeX-style inputs, symmath_check() will try to contact
<mo>(</mo> # a server to convert the input to MathML.
<mi>&#x3B8;</mi> # We mock out the server, simulating the response that it would give
<mo>)</mo> # for this input.
</mrow> import requests
</mrow> dirpath = os.path.dirname(__file__)
<mo>&#x22C5;</mo> correct_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_correct.html")).read().decode('utf8')
<mrow> wrong_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_wrong.html")).read().decode('utf8')
<mo>[</mo>
<mtable> # Correct answer
<mtr> with mock.patch.object(requests, 'post') as mock_post:
<mtd>
<mn>0</mn> # Simulate what the LaTeX-to-MathML server would
</mtd> # send for the correct response input
<mtd> mock_post.return_value.text = correct_snuggletex_response
<mn>1</mn>
</mtd> self._assert_symbolic_grade(problem,
</mtr> "cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]",
<mtr> textwrap.dedent("""
<mtd> <math xmlns="http://www.w3.org/1998/Math/MathML">
<mn>1</mn> <mstyle displaystyle="true">
</mtd> <mrow>
<mtd> <mi>cos</mi>
<mn>0</mn> <mrow><mo>(</mo><mi>&#x3B8;</mi><mo>)</mo></mrow>
</mtd> </mrow>
</mtr> <mo>&#x22C5;</mo>
</mtable> <mrow>
<mo>]</mo> <mo>[</mo>
</mrow> <mtable>
</mstyle> <mtr>
</math> <mtd><mn>1</mn></mtd><mtd><mn>0</mn></mtd>
''', </mtr>
} <mtr>
wrong_answers = {'1_2_1': '2', <mtd><mn>0</mn></mtd><mtd><mn>1</mn></mtd>
'1_2_1_dynamath': ''' </mtr>
<math xmlns="http://www.w3.org/1998/Math/MathML"> </mtable>
<mstyle displaystyle="true"> <mo>]</mo>
<mn>2</mn> </mrow>
</mstyle> <mo>+</mo>
</math>''', <mi>i</mi>
} <mo>&#x22C5;</mo>
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') <mrow>
self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') <mi>sin</mi>
<mrow>
<mo>(</mo><mi>&#x3B8;</mi><mo>)</mo>
</mrow>
</mrow>
<mo>&#x22C5;</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd><mn>0</mn></mtd><mtd><mn>1</mn></mtd>
</mtr>
<mtr>
<mtd><mn>1</mn></mtd><mtd><mn>0</mn></mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
</mstyle>
</math>
"""),
'correct')
# Incorrect answer
with mock.patch.object(requests, 'post') as mock_post:
# Simulate what the LaTeX-to-MathML server would
# send for the incorrect response input
mock_post.return_value.text = wrong_snuggletex_response
self._assert_symbolic_grade(problem, "2",
textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true"><mn>2</mn></mstyle>
</math>
"""),
'incorrect')
def test_multiple_inputs_exception(self):
# Should not allow multiple inputs, since we specify
# only one "expect" value
with self.assertRaises(Exception):
problem = self.build_problem(math_display=True,
expect="2*x+3*y",
num_inputs=3)
def _assert_symbolic_grade(self, problem,
student_input,
dynamath_input,
expected_correctness):
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input) }
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_correctness('1_2_1'),
expected_correctness)
class OptionResponseTest(ResponseTest): class OptionResponseTest(ResponseTest):
...@@ -531,6 +588,22 @@ class StringResponseTest(ResponseTest): ...@@ -531,6 +588,22 @@ class StringResponseTest(ResponseTest):
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "") self.assertEquals(correct_map.get_hint('1_2_1'), "")
def test_computed_hints(self):
problem = self.build_problem(
answer="Michigan",
hintfn="gimme_a_hint",
script = textwrap.dedent("""
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
answer = student_answers[aid]
new_cmap.set_hint_and_mode(aid, answer+"??", "always")
""")
)
input_dict = {'1_2_1': 'Hello'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "Hello??")
class CodeResponseTest(ResponseTest): class CodeResponseTest(ResponseTest):
from response_xml_factory import CodeResponseXMLFactory from response_xml_factory import CodeResponseXMLFactory
...@@ -710,16 +783,37 @@ class JavascriptResponseTest(ResponseTest): ...@@ -710,16 +783,37 @@ class JavascriptResponseTest(ResponseTest):
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path)) os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path))
problem = self.build_problem(generator_src="test_problem_generator.js", system = test_system()
grader_src="test_problem_grader.js", system.can_execute_unsafe_code = lambda: True
display_class="TestProblemDisplay", problem = self.build_problem(
display_src="test_problem_display.js", system=system,
param_dict={'value': '4'}) generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
display_class="TestProblemDisplay",
display_src="test_problem_display.js",
param_dict={'value': '4'},
)
# Test that we get graded correctly # Test that we get graded correctly
self.assert_grade(problem, json.dumps({0: 4}), "correct") self.assert_grade(problem, json.dumps({0: 4}), "correct")
self.assert_grade(problem, json.dumps({0: 5}), "incorrect") self.assert_grade(problem, json.dumps({0: 5}), "incorrect")
def test_cant_execute_javascript(self):
# If the system says to disallow unsafe code execution, then making
# this problem will raise an exception.
system = test_system()
system.can_execute_unsafe_code = lambda: False
with self.assertRaises(LoncapaProblemError):
problem = self.build_problem(
system=system,
generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
display_class="TestProblemDisplay",
display_src="test_problem_display.js",
param_dict={'value': '4'},
)
class NumericalResponseTest(ResponseTest): class NumericalResponseTest(ResponseTest):
from response_xml_factory import NumericalResponseXMLFactory from response_xml_factory import NumericalResponseXMLFactory
...@@ -853,9 +947,8 @@ class CustomResponseTest(ResponseTest): ...@@ -853,9 +947,8 @@ class CustomResponseTest(ResponseTest):
# #
# 'answer_given' is the answer the student gave (if there is just one input) # 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs) # or an ordered list of answers (if there are multiple inputs)
# #
# # The function should return a dict of the form
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING } # { 'ok': BOOL, 'msg': STRING }
# #
script = textwrap.dedent(""" script = textwrap.dedent("""
...@@ -964,6 +1057,35 @@ class CustomResponseTest(ResponseTest): ...@@ -964,6 +1057,35 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2') self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3') self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
def test_function_code_with_extra_args(self):
script = textwrap.dedent("""\
def check_func(expect, answer_given, options, dynamath):
assert options == "xyzzy", "Options was %r" % options
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
# Correct answer
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text")
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'incorrect')
self.assertEqual(msg, "Message text")
def test_multiple_inputs_return_one_status(self): def test_multiple_inputs_return_one_status(self):
# When given multiple inputs, the 'answer_given' argument # When given multiple inputs, the 'answer_given' argument
# to the check_func() is a list of inputs # to the check_func() is a list of inputs
......
from .calc import evaluator, UndefinedVariable from calc import evaluator, UndefinedVariable
from cmath import isinf from cmath import isinf
#----------------------------------------------------------------------------- #-----------------------------------------------------------------------------
......
...@@ -4,5 +4,5 @@ setup( ...@@ -4,5 +4,5 @@ setup(
name="capa", name="capa",
version="0.1", version="0.1",
packages=find_packages(exclude=["tests"]), packages=find_packages(exclude=["tests"]),
install_requires=['distribute==0.6.28', 'pyparsing==1.5.6'], install_requires=["distribute==0.6.28"],
) )
...@@ -736,4 +736,4 @@ def test6(): # imaginary numbers ...@@ -736,4 +736,4 @@ def test6(): # imaginary numbers
</mstyle> </mstyle>
</math> </math>
''' '''
return formula(xmlstr, options='imaginaryi') return formula(xmlstr, options='imaginary')
...@@ -324,4 +324,5 @@ def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None ...@@ -324,4 +324,5 @@ def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None
msg += "<p>Difference: %s</p>" % to_latex(diff) msg += "<p>Difference: %s</p>" % to_latex(diff)
msg += '<hr>' msg += '<hr>'
return {'ok': False, 'msg': msg, 'ex': fexpect, 'got': fsym} # Used to return more keys: 'ex': fexpect, 'got': fsym
return {'ok': False, 'msg': msg}
from setuptools import setup
setup(
name="chem",
version="0.1",
packages=["chem"],
install_requires=[
"pyparsing==1.5.6",
"numpy",
"scipy",
"nltk==2.0.4",
],
)
This directory is in the Python path for sandboxed Python execution.
from setuptools import setup
setup(
name="sandbox-packages",
version="0.1",
packages=[
"verifiers",
],
py_modules=[
"eia",
],
install_requires=[
],
)
...@@ -13,13 +13,10 @@ real time, next to the input box. ...@@ -13,13 +13,10 @@ real time, next to the input box.
<p>This is a correct answer which may be entered below: </p> <p>This is a correct answer which may be entered below: </p>
<p><tt>cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]</tt></p> <p><tt>cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]</tt></p>
<script>
from symmath import *
</script>
<text>Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 &amp; 1 \\ 1 &amp; 0 \end{matrix} \right] \right) [/mathjax] <text>Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 &amp; 1 \\ 1 &amp; 0 \end{matrix} \right] \right) [/mathjax]
and give the resulting \(2 \times 2\) matrix. <br/> and give the resulting \(2 \times 2\) matrix. <br/>
Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>. <br/> Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>. <br/>
[mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),I*sin(theta)],[I*sin(theta),cos(theta)]]" options="matrix,imaginaryi" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted"> [mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]" options="matrix,imaginary" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted">
<textline size="80" math="1" response_id="2" answer_id="1" id="filenamedogi0VpEBOWedxsymmathresponse_2_1"/> <textline size="80" math="1" response_id="2" answer_id="1" id="filenamedogi0VpEBOWedxsymmathresponse_2_1"/>
</symbolicresponse> </symbolicresponse>
<br/> <br/>
......
...@@ -3,7 +3,9 @@ import datetime ...@@ -3,7 +3,9 @@ import datetime
import hashlib import hashlib
import json import json
import logging import logging
import os
import traceback import traceback
import struct
import sys import sys
from pkg_resources import resource_string from pkg_resources import resource_string
...@@ -23,8 +25,10 @@ from xmodule.util.date_utils import time_to_datetime ...@@ -23,8 +25,10 @@ from xmodule.util.date_utils import time_to_datetime
log = logging.getLogger("mitx.courseware") log = logging.getLogger("mitx.courseware")
# Generated this many different variants of problems with rerandomize=per_student # Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20 NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id): def randomization_bin(seed, problem_id):
...@@ -109,11 +113,7 @@ class CapaModule(CapaFields, XModule): ...@@ -109,11 +113,7 @@ class CapaModule(CapaFields, XModule):
self.close_date = due_date self.close_date = due_date
if self.seed is None: if self.seed is None:
if self.rerandomize == 'never': self.choose_new_seed()
self.seed = 1
elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'):
# see comment on randomization_bin
self.seed = randomization_bin(system.seed, self.location.url)
# Need the problem location in openendedresponse to send out. Adding # Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it # it to the system here seems like the least clunky way to get it
...@@ -157,6 +157,22 @@ class CapaModule(CapaFields, XModule): ...@@ -157,6 +157,22 @@ class CapaModule(CapaFields, XModule):
self.set_state_from_lcp() self.set_state_from_lcp()
assert self.seed is not None
def choose_new_seed(self):
"""Choose a new seed."""
if self.rerandomize == 'never':
self.seed = 1
elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'):
# see comment on randomization_bin
self.seed = randomization_bin(self.system.seed, self.location.url)
else:
self.seed = struct.unpack('i', os.urandom(4))[0]
# So that sandboxed code execution can be cached, but still have an interesting
# number of possibilities, cap the number of different random seeds.
self.seed %= MAX_RANDOMIZATION_BINS
def new_lcp(self, state, text=None): def new_lcp(self, state, text=None):
if text is None: if text is None:
text = self.data text = self.data
...@@ -165,6 +181,7 @@ class CapaModule(CapaFields, XModule): ...@@ -165,6 +181,7 @@ class CapaModule(CapaFields, XModule):
problem_text=text, problem_text=text,
id=self.location.html_id(), id=self.location.html_id(),
state=state, state=state,
seed=self.seed,
system=self.system, system=self.system,
) )
...@@ -832,14 +849,11 @@ class CapaModule(CapaFields, XModule): ...@@ -832,14 +849,11 @@ class CapaModule(CapaFields, XModule):
'error': "Refresh the page and make an attempt before resetting."} 'error': "Refresh the page and make an attempt before resetting."}
if self.rerandomize in ["always", "onreset"]: if self.rerandomize in ["always", "onreset"]:
# reset random number generator seed (note the self.lcp.get_state() # Reset random number generator seed.
# in next line) self.choose_new_seed()
seed = None
else:
seed = self.lcp.seed
# Generate a new problem with either the previous seed or a new seed # Generate a new problem with either the previous seed or a new seed
self.lcp = self.new_lcp({'seed': seed}) self.lcp = self.new_lcp(None)
# Pull in the new problem seed # Pull in the new problem seed
self.set_state_from_lcp() self.set_state_from_lcp()
......
...@@ -14,7 +14,7 @@ import fs.osfs ...@@ -14,7 +14,7 @@ import fs.osfs
import numpy import numpy
import capa.calc as calc import calc
import xmodule import xmodule
from xmodule.x_module import ModuleSystem from xmodule.x_module import ModuleSystem
from mock import Mock from mock import Mock
...@@ -33,15 +33,14 @@ def test_system(): ...@@ -33,15 +33,14 @@ def test_system():
""" """
Construct a test ModuleSystem instance. Construct a test ModuleSystem instance.
By default, the render_template() method simply returns By default, the render_template() method simply returns the context it is
the context it is passed as a string. passed as a string. You can override this behavior by monkey patching::
You can override this behavior by monkey patching:
system = test_system() system = test_system()
system.render_template = my_render_func system.render_template = my_render_func
where `my_render_func` is a function of the form my_render_func(template, context).
where my_render_func is a function of the form
my_render_func(template, context)
""" """
return ModuleSystem( return ModuleSystem(
ajax_url='courses/course_id/modx/a_location', ajax_url='courses/course_id/modx/a_location',
...@@ -86,10 +85,12 @@ class ModelsTest(unittest.TestCase): ...@@ -86,10 +85,12 @@ class ModelsTest(unittest.TestCase):
self.assertTrue(abs(calc.evaluator(variables, functions, "e^(j*pi)") + 1) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "e^(j*pi)") + 1) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "j||1") - 0.5 - 0.5j) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "j||1") - 0.5 - 0.5j) < 0.00001)
variables['t'] = 1.0 variables['t'] = 1.0
# Use self.assertAlmostEqual here...
self.assertTrue(abs(calc.evaluator(variables, functions, "t") - 1.0) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "t") - 1.0) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "T") - 1.0) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "T") - 1.0) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "t", cs=True) - 1.0) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "t", cs=True) - 1.0) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "T", cs=True) - 298) < 0.2) self.assertTrue(abs(calc.evaluator(variables, functions, "T", cs=True) - 298) < 0.2)
# Use self.assertRaises here...
exception_happened = False exception_happened = False
try: try:
calc.evaluator({}, {}, "5+7 QWSEKO") calc.evaluator({}, {}, "5+7 QWSEKO")
......
...@@ -550,6 +550,7 @@ class CapaModuleTest(unittest.TestCase): ...@@ -550,6 +550,7 @@ class CapaModuleTest(unittest.TestCase):
def test_reset_problem(self): def test_reset_problem(self):
module = CapaFactory.create(done=True) module = CapaFactory.create(done=True)
module.new_lcp = Mock(wraps=module.new_lcp) module.new_lcp = Mock(wraps=module.new_lcp)
module.choose_new_seed = Mock(wraps=module.choose_new_seed)
# Stub out HTML rendering # Stub out HTML rendering
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
...@@ -567,7 +568,8 @@ class CapaModuleTest(unittest.TestCase): ...@@ -567,7 +568,8 @@ class CapaModuleTest(unittest.TestCase):
self.assertEqual(result['html'], "<div>Test HTML</div>") self.assertEqual(result['html'], "<div>Test HTML</div>")
# Expect that the problem was reset # Expect that the problem was reset
module.new_lcp.assert_called_once_with({'seed': None}) module.new_lcp.assert_called_once_with(None)
module.choose_new_seed.assert_called_once_with()
def test_reset_problem_closed(self): def test_reset_problem_closed(self):
module = CapaFactory.create() module = CapaFactory.create()
...@@ -1033,3 +1035,13 @@ class CapaModuleTest(unittest.TestCase): ...@@ -1033,3 +1035,13 @@ class CapaModuleTest(unittest.TestCase):
self.assertTrue(module.seed is not None) self.assertTrue(module.seed is not None)
msg = 'Could not get a new seed from reset after 5 tries' msg = 'Could not get a new seed from reset after 5 tries'
self.assertTrue(success, msg) self.assertTrue(success, msg)
def test_random_seed_bins(self):
# Assert that we are limiting the number of possible seeds.
# Check the conditions that generate random seeds
for rerandomize in ['always', 'per_student', 'true', 'onreset']:
# Get a bunch of seeds, they should all be in 0-999.
for i in range(200):
module = CapaFactory.create(rerandomize=rerandomize)
assert 0 <= module.seed < 1000
...@@ -134,6 +134,6 @@ class ModuleProgressTest(unittest.TestCase): ...@@ -134,6 +134,6 @@ class ModuleProgressTest(unittest.TestCase):
''' '''
def test_xmodule_default(self): def test_xmodule_default(self):
'''Make sure default get_progress exists, returns None''' '''Make sure default get_progress exists, returns None'''
xm = x_module.XModule(test_system, 'a://b/c/d/e', None, {}) xm = x_module.XModule(test_system(), 'a://b/c/d/e', None, {})
p = xm.get_progress() p = xm.get_progress()
self.assertEqual(p, None) self.assertEqual(p, None)
...@@ -14,7 +14,6 @@ START = '2013-01-01T01:00:00' ...@@ -14,7 +14,6 @@ START = '2013-01-01T01:00:00'
from .test_course_module import DummySystem as DummyImportSystem from .test_course_module import DummySystem as DummyImportSystem
from . import test_system
class RandomizeModuleTestCase(unittest.TestCase): class RandomizeModuleTestCase(unittest.TestCase):
......
...@@ -737,7 +737,10 @@ class ModuleSystem(object): ...@@ -737,7 +737,10 @@ class ModuleSystem(object):
anonymous_student_id='', anonymous_student_id='',
course_id=None, course_id=None,
open_ended_grading_interface=None, open_ended_grading_interface=None,
s3_interface=None): s3_interface=None,
cache=None,
can_execute_unsafe_code=None,
):
''' '''
Create a closure around the system environment. Create a closure around the system environment.
...@@ -779,6 +782,14 @@ class ModuleSystem(object): ...@@ -779,6 +782,14 @@ class ModuleSystem(object):
xblock_model_data - A dict-like object containing the all data available to this xblock_model_data - A dict-like object containing the all data available to this
xblock xblock
cache - A cache object with two methods:
.get(key) returns an object from the cache or None.
.set(key, value, timeout_secs=None) stores a value in the cache with a timeout.
can_execute_unsafe_code - A function returning a boolean, whether or
not to allow the execution of unsafe, unsandboxed code.
''' '''
self.ajax_url = ajax_url self.ajax_url = ajax_url
self.xqueue = xqueue self.xqueue = xqueue
...@@ -803,6 +814,9 @@ class ModuleSystem(object): ...@@ -803,6 +814,9 @@ class ModuleSystem(object):
self.open_ended_grading_interface = open_ended_grading_interface self.open_ended_grading_interface = open_ended_grading_interface
self.s3_interface = s3_interface self.s3_interface = s3_interface
self.cache = cache or DoNothingCache()
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
def get(self, attr): def get(self, attr):
''' provide uniform access to attributes (like etree).''' ''' provide uniform access to attributes (like etree).'''
return self.__dict__.get(attr) return self.__dict__.get(attr)
...@@ -816,3 +830,12 @@ class ModuleSystem(object): ...@@ -816,3 +830,12 @@ class ModuleSystem(object):
def __str__(self): def __str__(self):
return str(self.__dict__) return str(self.__dict__)
class DoNothingCache(object):
"""A duck-compatible object to use in ModuleSystem when there's no cache."""
def get(self, key):
return None
def set(self, key, value, timeout=None):
pass
<course org="edX" course="embedded_python" url_name="2013_Spring"/>
<course>
<chapter url_name="EmbeddedPythonChapter">
<vertical url_name="Homework1">
<problem url_name="schematic_problem">
<schematicresponse>
<center>
<schematic height="500" width="600" parts="g,n,s" analyses="dc,tran"
submit_analyses="{&quot;tran&quot;:[[&quot;Z&quot;,0.0000004,0.0000009,0.0000014,0.0000019,0.0000024,0.0000029,0.0000034,0.000039]]}"
initial_value="[[&quot;w&quot;,[112,96,128,96]],[&quot;w&quot;,[256,96,240,96]],[&quot;w&quot;,[192,96,240,96]],[&quot;s&quot;,[240,96,0],{&quot;color&quot;:&quot;cyan&quot;,&quot;offset&quot;:&quot;&quot;,&quot;plot offset&quot;:&quot;0&quot;,&quot;_json_&quot;:3},[&quot;Z&quot;]],[&quot;w&quot;,[32,224,192,224]],[&quot;w&quot;,[96,48,192,48]],[&quot;L&quot;,[256,96,3],{&quot;label&quot;:&quot;Z&quot;,&quot;_json_&quot;:6},[&quot;Z&quot;]],[&quot;r&quot;,[192,48,0],{&quot;name&quot;:&quot;Rpullup&quot;,&quot;r&quot;:&quot;10K&quot;,&quot;_json_&quot;:7},[&quot;1&quot;,&quot;Z&quot;]],[&quot;w&quot;,[32,144,32,192]],[&quot;w&quot;,[32,224,32,192]],[&quot;w&quot;,[48,192,32,192]],[&quot;w&quot;,[32,96,32,144]],[&quot;w&quot;,[48,144,32,144]],[&quot;w&quot;,[32,48,32,96]],[&quot;w&quot;,[48,96,32,96]],[&quot;w&quot;,[32,48,48,48]],[&quot;g&quot;,[32,224,0],{&quot;_json_&quot;:16},[&quot;0&quot;]],[&quot;v&quot;,[96,192,1],{&quot;name&quot;:&quot;VC&quot;,&quot;value&quot;:&quot;square(3,0,250K)&quot;,&quot;_json_&quot;:17},[&quot;C&quot;,&quot;0&quot;]],[&quot;v&quot;,[96,144,1],{&quot;name&quot;:&quot;VB&quot;,&quot;value&quot;:&quot;square(3,0,500K)&quot;,&quot;_json_&quot;:18},[&quot;B&quot;,&quot;0&quot;]],[&quot;v&quot;,[96,96,1],{&quot;name&quot;:&quot;VA&quot;,&quot;value&quot;:&quot;square(3,0,1000K)&quot;,&quot;_json_&quot;:19},[&quot;A&quot;,&quot;0&quot;]],[&quot;v&quot;,[96,48,1],{&quot;name&quot;:&quot;Vpwr&quot;,&quot;value&quot;:&quot;dc(3)&quot;,&quot;_json_&quot;:20},[&quot;1&quot;,&quot;0&quot;]],[&quot;L&quot;,[96,96,2],{&quot;label&quot;:&quot;A&quot;,&quot;_json_&quot;:21},[&quot;A&quot;]],[&quot;w&quot;,[96,96,104,96]],[&quot;L&quot;,[96,144,2],{&quot;label&quot;:&quot;B&quot;,&quot;_json_&quot;:23},[&quot;B&quot;]],[&quot;w&quot;,[96,144,104,144]],[&quot;L&quot;,[96,192,2],{&quot;label&quot;:&quot;C&quot;,&quot;_json_&quot;:25},[&quot;C&quot;]],[&quot;w&quot;,[96,192,104,192]],[&quot;w&quot;,[192,96,192,112]],[&quot;s&quot;,[112,96,0],{&quot;color&quot;:&quot;red&quot;,&quot;offset&quot;:&quot;15&quot;,&quot;plot offset&quot;:&quot;0&quot;,&quot;_json_&quot;:28},[&quot;A&quot;]],[&quot;w&quot;,[104,96,112,96]],[&quot;s&quot;,[112,144,0],{&quot;color&quot;:&quot;green&quot;,&quot;offset&quot;:&quot;10&quot;,&quot;plot offset&quot;:&quot;0&quot;,&quot;_json_&quot;:30},[&quot;B&quot;]],[&quot;w&quot;,[104,144,112,144]],[&quot;w&quot;,[128,144,112,144]],[&quot;s&quot;,[112,192,0],{&quot;color&quot;:&quot;blue&quot;,&quot;offset&quot;:&quot;5&quot;,&quot;plot offset&quot;:&quot;0&quot;,&quot;_json_&quot;:33},[&quot;C&quot;]],[&quot;w&quot;,[104,192,112,192]],[&quot;w&quot;,[128,192,112,192]],[&quot;view&quot;,0,0,2,&quot;5&quot;,&quot;10&quot;,&quot;10MEG&quot;,null,&quot;100&quot;,&quot;4us&quot;]]"
/>
</center>
<answer type="loncapa/python">
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004,output) &lt; 2.7: okay = False;
if get_value(0.0000009,output) &lt; 2.7: okay = False;
if get_value(0.0000014,output) &lt; 2.7: okay = False;
if get_value(0.0000019,output) &lt; 2.7: okay = False;
if get_value(0.0000024,output) &lt; 2.7: okay = False;
if get_value(0.0000029,output) &gt; 0.25: okay = False;
if get_value(0.0000034,output) &gt; 0.25: okay = False;
if get_value(0.0000039,output) &gt; 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']
</answer></schematicresponse>
</problem>
<problem url_name="cfn_problem">
<text>
<script type="text/python" system_path="python_lib">
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)
</script>
<ol class="enumerate">
<li>
<pre>
num = 0
while num &lt;= 5:
print(num)
num += 1
print("Outside of loop")
print(num)
</pre>
<p>
<customresponse cfn="test_csv" expect="0, 1, 2, 3, 4, 5, 'Outside of loop', 6">
<textline size="50" correct_answer="0, 1, 2, 3, 4, 5, 'Outside of loop', 6"/>
</customresponse>
</p>
</li>
</ol>
</text>
</problem>
<problem url_name="computed_answer">
<customresponse>
<textline size="5" correct_answer="Xyzzy"/>
<answer type="loncapa/python">
if submission[0] == "Xyzzy":
correct = ['correct']
else:
correct = ['incorrect']
</answer>
</customresponse>
</problem>
</vertical>
</chapter>
</course>
<course org="edX" course="embedded_python" url_name="2013_Spring"/>
...@@ -19,7 +19,7 @@ from symmath import * ...@@ -19,7 +19,7 @@ from symmath import *
<text>Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 &amp; 1 \\ 1 &amp; 0 \end{matrix} \right] \right) [/mathjax] <text>Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 &amp; 1 \\ 1 &amp; 0 \end{matrix} \right] \right) [/mathjax]
and give the resulting \(2 \times 2\) matrix. <br/> and give the resulting \(2 \times 2\) matrix. <br/>
Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>. <br/> Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>. <br/>
[mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),I*sin(theta)],[I*sin(theta),cos(theta)]]" options="matrix,imaginaryi" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted"> [mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]" options="matrix,imaginary" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted">
<textline size="80" math="1" response_id="2" answer_id="1" id="filenamedogi0VpEBOWedxsymmathresponse_2_1"/> <textline size="80" math="1" response_id="2" answer_id="1" id="filenamedogi0VpEBOWedxsymmathresponse_2_1"/>
</symbolicresponse> </symbolicresponse>
<br/> <br/>
......
import json import json
import logging import logging
import pyparsing import pyparsing
import re
import sys import sys
import static_replace import static_replace
...@@ -8,6 +9,7 @@ from functools import partial ...@@ -8,6 +9,7 @@ from functools import partial
from django.conf import settings from django.conf import settings
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import PermissionDenied from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from django.http import Http404 from django.http import Http404
...@@ -273,6 +275,14 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -273,6 +275,14 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
statsd.increment("lms.courseware.question_answered", tags=tags) statsd.increment("lms.courseware.question_answered", tags=tags)
def can_execute_unsafe_code():
# To decide if we can run unsafe code, we check the course id against
# a list of regexes configured on the server.
for regex in settings.COURSES_WITH_UNSAFE_CODE:
if re.match(regex, course_id):
return True
return False
# TODO (cpennington): When modules are shared between courses, the static # TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory # prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from # that the xml was loaded from
...@@ -299,6 +309,8 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -299,6 +309,8 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
course_id=course_id, course_id=course_id,
open_ended_grading_interface=open_ended_grading_interface, open_ended_grading_interface=open_ended_grading_interface,
s3_interface=s3_interface, s3_interface=s3_interface,
cache=cache,
can_execute_unsafe_code=can_execute_unsafe_code,
) )
# pass position specified in URL to module through ModuleSystem # pass position specified in URL to module through ModuleSystem
system.set('position', position) system.set('position', position)
......
# Load Testing
Scripts for load testing the courseware app,
mostly using [multimechanize](http://testutils.org/multi-mechanize/)
# Custom Response Load Test
## Optional Installations
* [memcached](http://pypi.python.org/pypi/python-memcached/): Install this
and make sure it is running, or the Capa problem will not cache results.
* [AppArmor](http://wiki.apparmor.net): Follow the instructions in
`common/lib/codejail/README` to set up the Python sandbox environment.
If you do not set up the sandbox, the tests will still execute code in the CustomResponse,
so you can still run the tests.
* [matplotlib](http://matplotlib.org): Multi-mechanize uses this to create graphs.
## Running the Tests
This test simulates student submissions for a custom response problem.
First, clear the cache:
/etc/init.d/memcached restart
Then, run the test:
multimech-run custom_response
You can configure the parameters in `customresponse/config.cfg`,
and you can change the CustomResponse script and student submissions
in `customresponse/test_scripts/v_user.py`.
## Components Under Test
Components under test:
* Python sandbox (see `common/lib/codejail`), which uses `AppArmor`
* Caching (see `common/lib/capa/capa/safe_exec/`), which uses `memcache` in production
Components NOT under test:
* Django views
* `XModule`
* gunicorn
This allows us to avoid creating courses in mongo, logging in, using CSRF tokens,
and other inconveniences. Instead, we create a capa problem (from the capa package),
pass it Django's memcache backend, and pass the problem student submissions.
Even though the test uses `capa.capa_problem.LoncapaProblem` directly,
the `capa` should not depend on Django. For this reason, we put the
test in the `courseware` Django app.
[global]
run_time = 240
rampup = 30
results_ts_interval = 10
progress_bar = on
console_logging = off
xml_report = off
[user_group-1]
threads = 10
script = v_user.py
[user_group-2]
threads = 10
script = v_user.py
[user_group-3]
threads = 10
script = v_user.py
""" User script for load testing CustomResponse """
from capa.tests.response_xml_factory import CustomResponseXMLFactory
import capa.capa_problem as lcp
from xmodule.x_module import ModuleSystem
import mock
import fs.osfs
import random
import textwrap
# Use memcache running locally
CACHE_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211'
},
}
# Configure settings so Django will let us import its cache wrapper
# Caching is the only part of Django being tested
from django.conf import settings
settings.configure(CACHES=CACHE_SETTINGS)
from django.core.cache import cache
# Script to install as the checker for the CustomResponse
TEST_SCRIPT = textwrap.dedent("""
def check_func(expect, answer_given):
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
# Submissions submitted by the student
TEST_SUBMISSIONS = [random.randint(-100, 100) for i in range(100)]
class TestContext(object):
""" One-time set up for the test that is shared across transactions.
Uses a Singleton design pattern."""
SINGLETON = None
NUM_UNIQUE_SEEDS = 20
@classmethod
def singleton(cls):
""" Return the singleton, creating one if it does not already exist."""
# If we haven't created the singleton yet, create it now
if cls.SINGLETON is None:
# Create a mock ModuleSystem, installing our cache
system = mock.MagicMock(ModuleSystem)
system.render_template = lambda template, context: "<div>%s</div>" % template
system.cache = cache
system.filestore = mock.MagicMock(fs.osfs.OSFS)
system.filestore.root_path = ""
system.DEBUG = True
# Create a custom response problem
xml_factory = CustomResponseXMLFactory()
xml = xml_factory.build_xml(script=TEST_SCRIPT, cfn="check_func", expect="42")
# Create and store the context
cls.SINGLETON = cls(system, xml)
else:
pass
# Return the singleton
return cls.SINGLETON
def __init__(self, system, xml):
""" Store context needed for the test across transactions """
self.system = system
self.xml = xml
# Construct a small pool of unique seeds
# To keep our implementation in line with the one capa actually uses,
# construct the problems, then use the seeds they generate
self.seeds = [lcp.LoncapaProblem(self.xml, 'problem_id', system=self.system).seed
for i in range(self.NUM_UNIQUE_SEEDS)]
def random_seed(self):
""" Return one of a small number of unique random seeds """
return random.choice(self.seeds)
def student_submission(self):
""" Return one of a small number of student submissions """
return random.choice(TEST_SUBMISSIONS)
class Transaction(object):
""" User script that submits a response to a CustomResponse problem """
def __init__(self):
""" Create the problem """
# Get the context (re-used across transactions)
self.context = TestContext.singleton()
# Create a new custom response problem
# using one of a small number of unique seeds
# We're assuming that the capa module is limiting the number
# of seeds (currently not the case for certain settings)
self.problem = lcp.LoncapaProblem(self.context.xml,
'1',
state=None,
seed=self.context.random_seed(),
system=self.context.system)
def run(self):
""" Submit a response to the CustomResponse problem """
answers = {'1_2_1': self.context.student_submission()}
self.problem.grade_answers(answers)
if __name__ == '__main__':
trans = Transaction()
trans.run()
...@@ -372,6 +372,7 @@ class TestCoursesLoadTestCase_XmlModulestore(PageLoaderTestCase): ...@@ -372,6 +372,7 @@ class TestCoursesLoadTestCase_XmlModulestore(PageLoaderTestCase):
'''Check that all pages in test courses load properly from XML''' '''Check that all pages in test courses load properly from XML'''
def setUp(self): def setUp(self):
super(TestCoursesLoadTestCase_XmlModulestore, self).setUp()
self.setup_viewtest_user() self.setup_viewtest_user()
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -390,6 +391,7 @@ class TestCoursesLoadTestCase_MongoModulestore(PageLoaderTestCase): ...@@ -390,6 +391,7 @@ class TestCoursesLoadTestCase_MongoModulestore(PageLoaderTestCase):
'''Check that all pages in test courses load properly from Mongo''' '''Check that all pages in test courses load properly from Mongo'''
def setUp(self): def setUp(self):
super(TestCoursesLoadTestCase_MongoModulestore, self).setUp()
self.setup_viewtest_user() self.setup_viewtest_user()
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
modulestore().collection.drop() modulestore().collection.drop()
...@@ -487,9 +489,6 @@ class TestDraftModuleStore(TestCase): ...@@ -487,9 +489,6 @@ class TestDraftModuleStore(TestCase):
class TestViewAuth(LoginEnrollmentTestCase): class TestViewAuth(LoginEnrollmentTestCase):
"""Check that view authentication works properly""" """Check that view authentication works properly"""
# NOTE: setUpClass() runs before override_settings takes effect, so
# can't do imports there without manually hacking settings.
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -810,43 +809,85 @@ class TestViewAuth(LoginEnrollmentTestCase): ...@@ -810,43 +809,85 @@ class TestViewAuth(LoginEnrollmentTestCase):
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE) @override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class TestCourseGrader(LoginEnrollmentTestCase): class TestSubmittingProblems(LoginEnrollmentTestCase):
"""Check that a course gets graded properly""" """Check that a course gets graded properly"""
# NOTE: setUpClass() runs before override_settings takes effect, so # Subclasses should specify the course slug
# can't do imports there without manually hacking settings. course_slug = "UNKNOWN"
course_when = "UNKNOWN"
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
courses = modulestore().get_courses()
def find_course(course_id):
"""Assumes the course is present"""
return [c for c in courses if c.id == course_id][0]
self.graded_course = find_course("edX/graded/2012_Fall") course_name = "edX/%s/%s" % (self.course_slug, self.course_when)
self.course = modulestore().get_course(course_name)
assert self.course, "Couldn't load course %r" % course_name
# create a test student # create a test student
self.student = 'view@test.com' self.student = 'view@test.com'
self.password = 'foo' self.password = 'foo'
self.create_account('u1', self.student, self.password) self.create_account('u1', self.student, self.password)
self.activate_user(self.student) self.activate_user(self.student)
self.enroll(self.graded_course) self.enroll(self.course)
self.student_user = get_user(self.student) self.student_user = get_user(self.student)
self.factory = RequestFactory() self.factory = RequestFactory()
def problem_location(self, problem_url_name):
return "i4x://edX/{}/problem/{}".format(self.course_slug, problem_url_name)
def modx_url(self, problem_location, dispatch):
return reverse(
'modx_dispatch',
kwargs={
'course_id': self.course.id,
'location': problem_location,
'dispatch': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids (not sure of the right term)
to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_i4x-edX-{}-problem-{}_'.format(self.course_slug, problem_url_name)
resp = self.client.post(modx_url,
{ (answer_key_prefix + k): v for k,v in responses.items() }
)
return resp
def reset_question_answer(self, problem_url_name):
'''resets specified problem for current user'''
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
class TestCourseGrader(TestSubmittingProblems):
"""Check that a course gets graded properly"""
course_slug = "graded"
course_when = "2012_Fall"
def get_grade_summary(self): def get_grade_summary(self):
'''calls grades.grade for current user and course''' '''calls grades.grade for current user and course'''
model_data_cache = ModelDataCache.cache_for_descriptor_descendents( model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
self.graded_course.id, self.student_user, self.graded_course) self.course.id, self.student_user, self.course)
fake_request = self.factory.get(reverse('progress', fake_request = self.factory.get(reverse('progress',
kwargs={'course_id': self.graded_course.id})) kwargs={'course_id': self.course.id}))
return grades.grade(self.student_user, fake_request, return grades.grade(self.student_user, fake_request,
self.graded_course, model_data_cache) self.course, model_data_cache)
def get_homework_scores(self): def get_homework_scores(self):
'''get scores for homeworks''' '''get scores for homeworks'''
...@@ -855,14 +896,14 @@ class TestCourseGrader(LoginEnrollmentTestCase): ...@@ -855,14 +896,14 @@ class TestCourseGrader(LoginEnrollmentTestCase):
def get_progress_summary(self): def get_progress_summary(self):
'''return progress summary structure for current user and course''' '''return progress summary structure for current user and course'''
model_data_cache = ModelDataCache.cache_for_descriptor_descendents( model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
self.graded_course.id, self.student_user, self.graded_course) self.course.id, self.student_user, self.course)
fake_request = self.factory.get(reverse('progress', fake_request = self.factory.get(reverse('progress',
kwargs={'course_id': self.graded_course.id})) kwargs={'course_id': self.course.id}))
progress_summary = grades.progress_summary(self.student_user, progress_summary = grades.progress_summary(self.student_user,
fake_request, fake_request,
self.graded_course, self.course,
model_data_cache) model_data_cache)
return progress_summary return progress_summary
...@@ -871,46 +912,6 @@ class TestCourseGrader(LoginEnrollmentTestCase): ...@@ -871,46 +912,6 @@ class TestCourseGrader(LoginEnrollmentTestCase):
grade_summary = self.get_grade_summary() grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent) self.assertEqual(grade_summary['percent'], percent)
def submit_question_answer(self, problem_url_name, responses):
"""
The field names of a problem are hard to determine. This method only works
for the problems used in the edX/graded course, which has fields named in the
following form:
input_i4x-edX-graded-problem-H1P3_2_1
input_i4x-edX-graded-problem-H1P3_2_2
"""
problem_location = "i4x://edX/graded/problem/%s" % problem_url_name
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.graded_course.id,
'location': problem_location,
'dispatch': 'problem_check', })
resp = self.client.post(modx_url, {
'input_i4x-edX-graded-problem-%s_2_1' % problem_url_name: responses[0],
'input_i4x-edX-graded-problem-%s_2_2' % problem_url_name: responses[1],
})
print "modx_url", modx_url, "responses", responses
print "resp", resp
return resp
def problem_location(self, problem_url_name):
'''Get location string for problem, assuming hardcoded course_id'''
return "i4x://edX/graded/problem/{0}".format(problem_url_name)
def reset_question_answer(self, problem_url_name):
'''resets specified problem for current user'''
problem_location = self.problem_location(problem_url_name)
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.graded_course.id,
'location': problem_location,
'dispatch': 'problem_reset', })
resp = self.client.post(modx_url)
return resp
def test_get_graded(self): def test_get_graded(self):
#### Check that the grader shows we have 0% in the course #### Check that the grader shows we have 0% in the course
self.check_grade_percent(0) self.check_grade_percent(0)
...@@ -928,27 +929,27 @@ class TestCourseGrader(LoginEnrollmentTestCase): ...@@ -928,27 +929,27 @@ class TestCourseGrader(LoginEnrollmentTestCase):
return [s.earned for s in hw_section['scores']] return [s.earned for s in hw_section['scores']]
# Only get half of the first problem correct # Only get half of the first problem correct
self.submit_question_answer('H1P1', ['Correct', 'Incorrect']) self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
self.check_grade_percent(0.06) self.check_grade_percent(0.06)
self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters
self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0]) self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0])
# Get both parts of the first problem correct # Get both parts of the first problem correct
self.reset_question_answer('H1P1') self.reset_question_answer('H1P1')
self.submit_question_answer('H1P1', ['Correct', 'Correct']) self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.13) self.check_grade_percent(0.13)
self.assertEqual(earned_hw_scores(), [2.0, 0, 0]) self.assertEqual(earned_hw_scores(), [2.0, 0, 0])
self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0]) self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0])
# This problem is shown in an ABTest # This problem is shown in an ABTest
self.submit_question_answer('H1P2', ['Correct', 'Correct']) self.submit_question_answer('H1P2', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25) self.check_grade_percent(0.25)
self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0]) self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0])
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
# This problem is hidden in an ABTest. # This problem is hidden in an ABTest.
# Getting it correct doesn't change total grade # Getting it correct doesn't change total grade
self.submit_question_answer('H1P3', ['Correct', 'Correct']) self.submit_question_answer('H1P3', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25) self.check_grade_percent(0.25)
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
...@@ -957,19 +958,85 @@ class TestCourseGrader(LoginEnrollmentTestCase): ...@@ -957,19 +958,85 @@ class TestCourseGrader(LoginEnrollmentTestCase):
# This problem is also weighted to be 4 points (instead of default of 2) # This problem is also weighted to be 4 points (instead of default of 2)
# If the problem was unweighted the percent would have been 0.38 so we # If the problem was unweighted the percent would have been 0.38 so we
# know it works. # know it works.
self.submit_question_answer('H2P1', ['Correct', 'Correct']) self.submit_question_answer('H2P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.42) self.check_grade_percent(0.42)
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0]) self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0])
# Third homework # Third homework
self.submit_question_answer('H3P1', ['Correct', 'Correct']) self.submit_question_answer('H3P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.42) # Score didn't change self.check_grade_percent(0.42) # Score didn't change
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0]) self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0])
self.submit_question_answer('H3P2', ['Correct', 'Correct']) self.submit_question_answer('H3P2', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.5) # Now homework2 dropped. Score changes self.check_grade_percent(0.5) # Now homework2 dropped. Score changes
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0]) self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0])
# Now we answer the final question (worth half of the grade) # Now we answer the final question (worth half of the grade)
self.submit_question_answer('FinalQuestion', ['Correct', 'Correct']) self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0) # Hooray! We got 100% self.check_grade_percent(1.0) # Hooray! We got 100%
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class TestSchematicResponse(TestSubmittingProblems):
"""Check that we can submit a schematic response, and it answers properly."""
course_slug = "embedded_python"
course_when = "2013_Spring"
def test_schematic(self):
resp = self.submit_question_answer('schematic_problem',
{ '2_1': json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
self.reset_question_answer('schematic_problem')
resp = self.submit_question_answer('schematic_problem',
{ '2_1': json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def test_check_function(self):
resp = self.submit_question_answer('cfn_problem', {'2_1': "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
self.reset_question_answer('cfn_problem')
resp = self.submit_question_answer('cfn_problem', {'2_1': "xyzzy!"})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def test_computed_answer(self):
resp = self.submit_question_answer('computed_answer', {'2_1': "Xyzzy"})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
self.reset_question_answer('computed_answer')
resp = self.submit_question_answer('computed_answer', {'2_1': "NO!"})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
from django.db import models
# Create your models here.
"""Views for debugging and diagnostics"""
import pprint
import traceback
from django.http import Http404
from django.contrib.auth.decorators import login_required
from django_future.csrf import ensure_csrf_cookie, csrf_exempt
from mitxmako.shortcuts import render_to_response
from codejail.safe_exec import safe_exec
@login_required
@ensure_csrf_cookie
def run_python(request):
"""A page to allow testing the Python sandbox on a production server."""
if not request.user.is_staff:
raise Http404
c = {}
c['code'] = ''
c['results'] = None
if request.method == 'POST':
py_code = c['code'] = request.POST.get('code')
g = {}
try:
safe_exec(py_code, g)
except Exception as e:
c['results'] = traceback.format_exc()
else:
c['results'] = pprint.pformat(g)
return render_to_response("debug/run_python_form.html", c)
...@@ -92,6 +92,16 @@ CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull') ...@@ -92,6 +92,16 @@ CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL") ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL")
FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL") FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL")
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
############################## SECURE AUTH ITEMS ############### ############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc. # Secret things: passwords, access keys, etc.
with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file: with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
......
...@@ -97,6 +97,10 @@ MITX_FEATURES = { ...@@ -97,6 +97,10 @@ MITX_FEATURES = {
# Provide a UI to allow users to submit feedback from the LMS # Provide a UI to allow users to submit feedback from the LMS
'ENABLE_FEEDBACK_SUBMISSION': False, 'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
} }
# Used for A/B testing # Used for A/B testing
...@@ -246,6 +250,31 @@ MODULESTORE = { ...@@ -246,6 +250,31 @@ MODULESTORE = {
} }
CONTENTSTORE = None CONTENTSTORE = None
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################ SIGNAL HANDLERS ################################ ############################ SIGNAL HANDLERS ################################
# This is imported to register the exception signal handling that logs exceptions # This is imported to register the exception signal handling that logs exceptions
import monitoring.exceptions # noqa import monitoring.exceptions # noqa
...@@ -398,6 +427,7 @@ MIDDLEWARE_CLASSES = ( ...@@ -398,6 +427,7 @@ MIDDLEWARE_CLASSES = (
# 'debug_toolbar.middleware.DebugToolbarMiddleware', # 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware', 'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
) )
############################### Pipeline ####################################### ############################### Pipeline #######################################
...@@ -601,6 +631,7 @@ INSTALLED_APPS = ( ...@@ -601,6 +631,7 @@ INSTALLED_APPS = (
# For testing # For testing
'django.contrib.admin', # only used in DEBUG mode 'django.contrib.admin', # only used in DEBUG mode
'debug',
# Discussion forums # Discussion forums
'django_comment_client', 'django_comment_client',
......
<html><body>
<div>
<p>Python:</p>
<form method='post'>
<input type="hidden" name="csrfmiddlewaretoken" value="${ csrf_token }">
<div>
<textarea name='code' rows='20' cols='80'>${code|h}</textarea>
</div>
<input type='submit' value='Run it!'/>
</form>
</div>
%if results:
<div>
<p>Results:</p>
<pre>
${results|h}
</pre>
</div>
%endif
...@@ -363,6 +363,11 @@ urlpatterns += ( ...@@ -363,6 +363,11 @@ urlpatterns += (
url(r'^comm/foldit_ops', 'foldit.views.foldit_ops', name="foldit_ops"), url(r'^comm/foldit_ops', 'foldit.views.foldit_ops', name="foldit_ops"),
) )
if settings.MITX_FEATURES.get('ENABLE_DEBUG_RUN_PYTHON'):
urlpatterns += (
url(r'^debug/run_python', 'debug.views.run_python'),
)
urlpatterns = patterns(*urlpatterns) urlpatterns = patterns(*urlpatterns)
if settings.DEBUG: if settings.DEBUG:
......
# Packages to install in the Python sandbox for secured execution.
scipy==0.11.0
lxml==3.0.1
-e common/lib/calc
-e common/lib/chem
-e common/lib/sandbox-packages
...@@ -9,3 +9,4 @@ ...@@ -9,3 +9,4 @@
# Our libraries: # Our libraries:
-e git+https://github.com/edx/XBlock.git@483e0cb1#egg=XBlock -e git+https://github.com/edx/XBlock.git@483e0cb1#egg=XBlock
-e git+https://github.com/edx/codejail.git@07494f1#egg=codejail
# Python libraries to install that are local to the mitx repo # Python libraries to install that are local to the mitx repo
-e common/lib/calc
-e common/lib/capa -e common/lib/capa
-e common/lib/chem
-e common/lib/xmodule -e common/lib/xmodule
-e . -e .
#!/usr/bin/env python #!/usr/bin/env python
from django.core import management
import argparse import argparse
import os import os
...@@ -42,21 +41,34 @@ def main(argv): ...@@ -42,21 +41,34 @@ def main(argv):
test_py_path = find_full_path(test_py_path) test_py_path = find_full_path(test_py_path)
test_spec = "%s:%s.%s" % (test_py_path, test_class, test_method) test_spec = "%s:%s.%s" % (test_py_path, test_class, test_method)
settings = None
if test_py_path.startswith('cms'): if test_py_path.startswith('cms'):
settings = 'cms.envs.test' settings = 'cms.envs.test'
elif test_py_path.startswith('lms'): elif test_py_path.startswith('lms'):
settings = 'lms.envs.test' settings = 'lms.envs.test'
else:
raise Exception("Couldn't determine settings to use!")
django_args = ["django-admin.py", "test", "--pythonpath=."] if settings:
django_args.append("--settings=%s" % settings) # Run as a django test suite
if args.nocapture: from django.core import management
django_args.append("-s")
django_args.append(test_spec) django_args = ["django-admin.py", "test", "--pythonpath=."]
django_args.append("--settings=%s" % settings)
if args.nocapture:
django_args.append("-s")
django_args.append(test_spec)
print " ".join(django_args)
management.execute_from_command_line(django_args)
else:
# Run as a nose test suite
import nose.core
nose_args = ["nosetests"]
if args.nocapture:
nose_args.append("-s")
nose_args.append(test_spec)
print " ".join(nose_args)
nose.core.main(argv=nose_args)
print " ".join(django_args)
management.execute_from_command_line(django_args)
if __name__ == "__main__": if __name__ == "__main__":
main(sys.argv[1:]) main(sys.argv[1:])
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment