Commit fd5c7c62 by ichuang

Merge pull request #862 from MITx/feature/victor/capa-ajax

Feature/victor/capa ajax

I'm happy with all the changes to inputtypes, modulo the minor comments.
I didn't read chemcalc.py closely, but the tests look good.
parents f5f1e858 d78e041c
...@@ -30,6 +30,8 @@ import sys ...@@ -30,6 +30,8 @@ import sys
from lxml import etree from lxml import etree
from xml.sax.saxutils import unescape from xml.sax.saxutils import unescape
import chem
import chem.chemcalc
import calc import calc
from correctmap import CorrectMap from correctmap import CorrectMap
import eia import eia
...@@ -53,7 +55,8 @@ entry_types = ['textline', ...@@ -53,7 +55,8 @@ entry_types = ['textline',
'radiogroup', 'radiogroup',
'checkboxgroup', 'checkboxgroup',
'filesubmission', 'filesubmission',
'javascriptinput',] 'javascriptinput',
'chemicalequationinput']
# extra things displayed after "show answers" is pressed # extra things displayed after "show answers" is pressed
solution_types = ['solution'] solution_types = ['solution']
...@@ -72,7 +75,8 @@ global_context = {'random': random, ...@@ -72,7 +75,8 @@ global_context = {'random': random,
'math': math, 'math': math,
'scipy': scipy, 'scipy': scipy,
'calc': calc, 'calc': calc,
'eia': eia} 'eia': eia,
'chemcalc': chem.chemcalc}
# These should be removed from HTML output, including all subelements # These should be removed from HTML output, including all subelements
html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"] html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"]
...@@ -436,7 +440,7 @@ class LoncapaProblem(object): ...@@ -436,7 +440,7 @@ class LoncapaProblem(object):
sys.path = original_path + self._extract_system_path(script) sys.path = original_path + self._extract_system_path(script)
stype = script.get('type') stype = script.get('type')
if stype: if stype:
if 'javascript' in stype: if 'javascript' in stype:
continue # skip javascript continue # skip javascript
...@@ -478,8 +482,8 @@ class LoncapaProblem(object): ...@@ -478,8 +482,8 @@ class LoncapaProblem(object):
problemid = problemtree.get('id') # my ID problemid = problemtree.get('id') # my ID
if problemtree.tag in inputtypes.get_input_xml_tags(): if problemtree.tag in inputtypes.registered_input_tags():
# If this is an inputtype subtree, let it render itself.
status = "unsubmitted" status = "unsubmitted"
msg = '' msg = ''
hint = '' hint = ''
...@@ -496,20 +500,17 @@ class LoncapaProblem(object): ...@@ -496,20 +500,17 @@ class LoncapaProblem(object):
value = self.student_answers[problemid] value = self.student_answers[problemid]
# do the rendering # do the rendering
render_object = inputtypes.SimpleInput(system=self.system,
xml=problemtree, state = {'value': value,
state={'value': value, 'status': status,
'status': status, 'id': problemtree.get('id'),
'id': problemtree.get('id'), 'feedback': {'message': msg,
'feedback': {'message': msg, 'hint': hint,
'hint': hint, 'hintmode': hintmode,}}
'hintmode': hintmode,
} input_type_cls = inputtypes.get_class_for_tag(problemtree.tag)
}, the_input = input_type_cls(self.system, problemtree, state)
use='capa_input') return the_input.get_html()
# function(problemtree, value, status, msg)
# render the special response (textline, schematic,...)
return render_object.get_html()
# let each Response render itself # let each Response render itself
if problemtree in self.responders: if problemtree in self.responders:
......
...@@ -756,15 +756,26 @@ class NumericalResponse(LoncapaResponse): ...@@ -756,15 +756,26 @@ class NumericalResponse(LoncapaResponse):
def get_score(self, student_answers): def get_score(self, student_answers):
'''Grade a numeric response ''' '''Grade a numeric response '''
student_answer = student_answers[self.answer_id] student_answer = student_answers[self.answer_id]
try:
correct_ans = complex(self.correct_answer)
except ValueError:
log.debug("Content error--answer '{0}' is not a valid complex number".format(self.correct_answer))
raise StudentInputError("There was a problem with the staff answer to this problem")
try: try:
correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer), correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer),
complex(self.correct_answer), self.tolerance) correct_ans, self.tolerance)
# We should catch this explicitly. # We should catch this explicitly.
# I think this is just pyparsing.ParseException, calc.UndefinedVariable: # I think this is just pyparsing.ParseException, calc.UndefinedVariable:
# But we'd need to confirm # But we'd need to confirm
except: except:
raise StudentInputError("Invalid input: could not interpret '%s' as a number" % # Use the traceback-preserving version of re-raising with a different type
cgi.escape(student_answer)) import sys
type, value, traceback = sys.exc_info()
raise StudentInputError, ("Invalid input: could not interpret '%s' as a number" %
cgi.escape(student_answer)), traceback
if correct: if correct:
return CorrectMap(self.answer_id, 'correct') return CorrectMap(self.answer_id, 'correct')
...@@ -856,7 +867,7 @@ def sympy_check2(): ...@@ -856,7 +867,7 @@ def sympy_check2():
</customresponse>"""}] </customresponse>"""}]
response_tag = 'customresponse' response_tag = 'customresponse'
allowed_inputfields = ['textline', 'textbox'] allowed_inputfields = ['textline', 'textbox', 'chemicalequationinput']
def setup_response(self): def setup_response(self):
xml = self.xml xml = self.xml
......
<section id="chemicalequationinput_${id}" class="chemicalequationinput">
<div class="script_placeholder" data-src="${previewer}"/>
% if status == 'unsubmitted':
<div class="unanswered" id="status_${id}">
% elif status == 'correct':
<div class="correct" id="status_${id}">
% elif status == 'incorrect':
<div class="incorrect" id="status_${id}">
% elif status == 'incomplete':
<div class="incorrect" id="status_${id}">
% endif
<input type="text" name="input_${id}" id="input_${id}" value="${value|h}"
% if size:
size="${size}"
% endif
/>
<p class="status">
% if status == 'unsubmitted':
unanswered
% elif status == 'correct':
correct
% elif status == 'incorrect':
incorrect
% elif status == 'incomplete':
incomplete
% endif
</p>
<div class="equation">
</div>
<p id="answer_${id}" class="answer"></p>
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
</div>
% endif
</section>
import fs
import fs.osfs
import os
from mock import Mock
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
test_system = Mock(
ajax_url='courses/course_id/modx/a_location',
track_function=Mock(),
get_module=Mock(),
render_template=Mock(),
replace_urls=Mock(),
user=Mock(),
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
debug=True,
xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id = 'student'
)
"""
Tests of input types (and actually responsetypes too)
"""
from datetime import datetime
import json
from mock import Mock
from nose.plugins.skip import SkipTest
import os
import unittest
from . import test_system
from capa import inputtypes
from lxml import etree
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring the template name.
"""
return repr(context)
system = Mock(render_template=tst_render_template)
class OptionInputTest(unittest.TestCase):
'''
Make sure option inputs work
'''
def test_rendering_new(self):
xml = """<optioninput options="('Up','Down')" id="sky_input" correct="Up"/>"""
element = etree.fromstring(xml)
value = 'Down'
status = 'answered'
context = inputtypes._optioninput(element, value, status, test_system.render_template)
print 'context: ', context
expected = {'value': 'Down',
'options': [('Up', 'Up'), ('Down', 'Down')],
'state': 'answered',
'msg': '',
'inline': '',
'id': 'sky_input'}
self.assertEqual(context, expected)
def test_rendering(self):
xml_str = """<optioninput options="('Up','Down')" id="sky_input" correct="Up"/>"""
element = etree.fromstring(xml_str)
state = {'value': 'Down',
'id': 'sky_input',
'status': 'answered'}
option_input = inputtypes.OptionInput(system, element, state)
context = option_input._get_render_context()
expected = {'value': 'Down',
'options': [('Up', 'Up'), ('Down', 'Down')],
'state': 'answered',
'msg': '',
'inline': '',
'id': 'sky_input'}
self.assertEqual(context, expected)
...@@ -133,6 +133,11 @@ class CapaModule(XModule): ...@@ -133,6 +133,11 @@ class CapaModule(XModule):
if self.rerandomize == 'never': if self.rerandomize == 'never':
self.seed = 1 self.seed = 1
elif self.rerandomize == "per_student" and hasattr(self.system, 'id'): elif self.rerandomize == "per_student" and hasattr(self.system, 'id'):
# TODO: This line is badly broken:
# (1) We're passing student ID to xmodule.
# (2) There aren't bins of students. -- we only want 10 or 20 randomizations, and want to assign students
# to these bins, and may not want cohorts. So e.g. hash(your-id, problem_id) % num_bins.
# - analytics really needs small number of bins.
self.seed = system.id self.seed = system.id
else: else:
self.seed = None self.seed = None
......
"""Module progress tests"""
import unittest
from xmodule.progress import Progress
from xmodule import x_module
from . import i4xs
class ProgressTest(unittest.TestCase):
''' Test that basic Progress objects work. A Progress represents a
fraction between 0 and 1.
'''
not_started = Progress(0, 17)
part_done = Progress(2, 6)
half_done = Progress(3, 6)
also_half_done = Progress(1, 2)
done = Progress(7, 7)
def test_create_object(self):
# These should work:
p = Progress(0, 2)
p = Progress(1, 2)
p = Progress(2, 2)
p = Progress(2.5, 5.0)
p = Progress(3.7, 12.3333)
# These shouldn't
self.assertRaises(ValueError, Progress, 0, 0)
self.assertRaises(ValueError, Progress, 2, 0)
self.assertRaises(ValueError, Progress, 1, -2)
self.assertRaises(TypeError, Progress, 0, "all")
# check complex numbers just for the heck of it :)
self.assertRaises(TypeError, Progress, 2j, 3)
def test_clamp(self):
self.assertEqual((2, 2), Progress(3, 2).frac())
self.assertEqual((0, 2), Progress(-2, 2).frac())
def test_frac(self):
p = Progress(1, 2)
(a, b) = p.frac()
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_percent(self):
self.assertEqual(self.not_started.percent(), 0)
self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
self.assertEqual(self.half_done.percent(), 50)
self.assertEqual(self.done.percent(), 100)
self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
def test_started(self):
self.assertFalse(self.not_started.started())
self.assertTrue(self.part_done.started())
self.assertTrue(self.half_done.started())
self.assertTrue(self.done.started())
def test_inprogress(self):
# only true if working on it
self.assertFalse(self.done.inprogress())
self.assertFalse(self.not_started.inprogress())
self.assertTrue(self.part_done.inprogress())
self.assertTrue(self.half_done.inprogress())
def test_done(self):
self.assertTrue(self.done.done())
self.assertFalse(self.half_done.done())
self.assertFalse(self.not_started.done())
def test_str(self):
self.assertEqual(str(self.not_started), "0/17")
self.assertEqual(str(self.part_done), "2/6")
self.assertEqual(str(self.done), "7/7")
def test_ternary_str(self):
self.assertEqual(self.not_started.ternary_str(), "none")
self.assertEqual(self.half_done.ternary_str(), "in_progress")
self.assertEqual(self.done.ternary_str(), "done")
def test_to_js_status(self):
'''Test the Progress.to_js_status_str() method'''
self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
self.assertEqual(Progress.to_js_status_str(self.done), "done")
self.assertEqual(Progress.to_js_status_str(None), "NA")
def test_to_js_detail_str(self):
'''Test the Progress.to_js_detail_str() method'''
f = Progress.to_js_detail_str
for p in (self.not_started, self.half_done, self.done):
self.assertEqual(f(p), str(p))
# But None should be encoded as NA
self.assertEqual(f(None), "NA")
def test_add(self):
'''Test the Progress.add_counts() method'''
p = Progress(0, 2)
p2 = Progress(1, 3)
p3 = Progress(2, 5)
pNone = None
add = lambda a, b: Progress.add_counts(a, b).frac()
self.assertEqual(add(p, p), (0, 4))
self.assertEqual(add(p, p2), (1, 5))
self.assertEqual(add(p2, p3), (3, 8))
self.assertEqual(add(p2, pNone), p2.frac())
self.assertEqual(add(pNone, p2), p2.frac())
def test_equality(self):
'''Test that comparing Progress objects for equality
works correctly.'''
p = Progress(1, 2)
p2 = Progress(2, 4)
p3 = Progress(1, 2)
self.assertTrue(p == p3)
self.assertFalse(p == p2)
# Check != while we're at it
self.assertTrue(p != p2)
self.assertFalse(p != p3)
class ModuleProgressTest(unittest.TestCase):
''' Test that get_progress() does the right thing for the different modules
'''
def test_xmodule_default(self):
'''Make sure default get_progress exists, returns None'''
xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {})
p = xm.get_progress()
self.assertEqual(p, None)
These files really should be in the capa module, but we don't have a way to load js from there at the moment. (TODO)
(function () {
var preview_div = $('.chemicalequationinput .equation');
$('.chemicalequationinput input').bind("input", function(eventObject) {
$.get("/preview/chemcalc/", {"formula" : this.value}, function(response) {
if (response.error) {
preview_div.html("<span class='error'>" + response.error + "</span>");
} else {
preview_div.html(response.preview);
}
});
});
}).call(this);
import hashlib import hashlib
import json import json
import logging import logging
import pyparsing
import sys import sys
from django.conf import settings from django.conf import settings
...@@ -13,6 +14,7 @@ from django.views.decorators.csrf import csrf_exempt ...@@ -13,6 +14,7 @@ from django.views.decorators.csrf import csrf_exempt
from requests.auth import HTTPBasicAuth from requests.auth import HTTPBasicAuth
from capa.xqueue_interface import XQueueInterface from capa.xqueue_interface import XQueueInterface
from capa.chem import chemcalc
from courseware.access import has_access from courseware.access import has_access
from mitxmako.shortcuts import render_to_string from mitxmako.shortcuts import render_to_string
from models import StudentModule, StudentModuleCache from models import StudentModule, StudentModuleCache
...@@ -471,3 +473,42 @@ def modx_dispatch(request, dispatch, location, course_id): ...@@ -471,3 +473,42 @@ def modx_dispatch(request, dispatch, location, course_id):
# Return whatever the module wanted to return to the client/caller # Return whatever the module wanted to return to the client/caller
return HttpResponse(ajax_return) return HttpResponse(ajax_return)
def preview_chemcalc(request):
"""
Render an html preview of a chemical formula or equation. The fact that
this is here is a bit of hack. See the note in lms/urls.py about why it's
here. (Victor is to blame.)
request should be a GET, with a key 'formula' and value 'some formula string'.
Returns a json dictionary:
{
'preview' : 'the-preview-html' or ''
'error' : 'the-error' or ''
}
"""
if request.method != "GET":
raise Http404
result = {'preview': '',
'error': '' }
formula = request.GET.get('formula')
if formula is None:
result['error'] = "No formula specified."
return HttpResponse(json.dumps(result))
try:
result['preview'] = chemcalc.render_to_html(formula)
except pyparsing.ParseException as p:
result['error'] = "Couldn't parse formula: {0}".format(p)
except Exception:
# this is unexpected, so log
log.warning("Error while previewing chemical formula", exc_info=True)
result['error'] = "Error while rendering preview"
return HttpResponse(json.dumps(result))
...@@ -141,6 +141,16 @@ if settings.COURSEWARE_ENABLED: ...@@ -141,6 +141,16 @@ if settings.COURSEWARE_ENABLED:
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/modx/(?P<location>.*?)/(?P<dispatch>[^/]*)$', url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/modx/(?P<location>.*?)/(?P<dispatch>[^/]*)$',
'courseware.module_render.modx_dispatch', 'courseware.module_render.modx_dispatch',
name='modx_dispatch'), name='modx_dispatch'),
# TODO (vshnayder): This is a hack. It creates a direct connection from
# the LMS to capa functionality, and really wants to go through the
# input types system so that previews can be context-specific.
# Unfortunately, we don't have time to think through the right way to do
# that (and implement it), and it's not a terrible thing to provide a
# generic chemican-equation rendering service.
url(r'^preview/chemcalc', 'courseware.module_render.preview_chemcalc',
name='preview_chemcalc'),
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/xqueue/(?P<userid>[^/]*)/(?P<id>.*?)/(?P<dispatch>[^/]*)$', url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/xqueue/(?P<userid>[^/]*)/(?P<id>.*?)/(?P<dispatch>[^/]*)$',
'courseware.module_render.xqueue_callback', 'courseware.module_render.xqueue_callback',
name='xqueue_callback'), name='xqueue_callback'),
......
...@@ -49,3 +49,4 @@ networkx ...@@ -49,3 +49,4 @@ networkx
pygraphviz pygraphviz
-r repo-requirements.txt -r repo-requirements.txt
pil pil
nltk
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment