Commit 8dd32b2a by chrisndodge

Merge pull request #312 from edx/fix/cdodge/add-sandbox-whitelisting-to-cms

add a 'can_execute_unsafe_code' callback method (ala LMS) to allow for w...
parents 221db93f a224c1e8
......@@ -344,6 +344,28 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
err_cnt = perform_xlint('common/test/data', ['full'])
self.assertGreater(err_cnt, 0)
@override_settings(COURSES_WITH_UNSAFE_CODE=['edX/full/.*'])
def test_module_preview_in_whitelist(self):
'''
Tests the ajax callback to render an XModule
'''
direct_store = modulestore('direct')
import_from_xml(direct_store, 'common/test/data/', ['full'])
html_module_location = Location(['i4x', 'edX', 'full', 'html', 'html_90', None])
url = reverse('preview_component', kwargs={'location': html_module_location.url()})
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('Inline content', resp.content)
# also try a custom response which will trigger the 'is this course in whitelist' logic
problem_module_location = Location(['i4x', 'edX', 'full', 'problem', 'H1P1_Energy', None])
url = reverse('preview_component', kwargs={'location': problem_module_location.url()})
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_delete(self):
direct_store = modulestore('direct')
import_from_xml(direct_store, 'common/test/data/', ['full'])
......
......@@ -17,10 +17,13 @@ from xmodule.modulestore.mongo import MongoUsage
from xmodule.x_module import ModuleSystem
from xblock.runtime import DbModel
from util.sandboxing import can_execute_unsafe_code
import static_replace
from .session_kv_store import SessionKeyValueStore
from .requests import render_from_lms
from .access import has_access
from ..utils import get_course_for_item
__all__ = ['preview_dispatch', 'preview_component']
......@@ -93,6 +96,8 @@ def preview_module_system(request, preview_id, descriptor):
MongoUsage(preview_id, descriptor.location.url()),
)
course_id = get_course_for_item(descriptor.location).location.course_id
return ModuleSystem(
ajax_url=reverse('preview_dispatch', args=[preview_id, descriptor.location.url(), '']).rstrip('/'),
# TODO (cpennington): Do we want to track how instructors are using the preview problems?
......@@ -104,6 +109,7 @@ def preview_module_system(request, preview_id, descriptor):
replace_urls=partial(static_replace.replace_static_urls, data_directory=None, course_namespace=descriptor.location),
user=request.user,
xblock_model_data=preview_model_data,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
)
......
......@@ -105,6 +105,8 @@ ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
......
import re
from django.conf import settings
def can_execute_unsafe_code(course_id):
"""
Determine if this course is allowed to run unsafe code.
For use from the ModuleStore. Checks the `course_id` against a list of whitelisted
regexes.
Returns a boolean, true if the course can run outside the sandbox.
"""
# To decide if we can run unsafe code, we check the course id against
# a list of regexes configured on the server.
for regex in settings.COURSES_WITH_UNSAFE_CODE:
if re.match(regex, course_id):
return True
return False
"""
Tests for sandboxing.py in util app
"""
from django.test import TestCase
from util.sandboxing import can_execute_unsafe_code
from django.test.utils import override_settings
class SandboxingTest(TestCase):
"""
Test sandbox whitelisting
"""
@override_settings(COURSES_WITH_UNSAFE_CODE=['edX/full/.*'])
def test_sandbox_exclusion(self):
"""
Test to make sure that a non-match returns false
"""
self.assertFalse(can_execute_unsafe_code('edX/notful/empty'))
@override_settings(COURSES_WITH_UNSAFE_CODE=['edX/full/.*'])
def test_sandbox_inclusion(self):
"""
Test to make sure that a match works across course runs
"""
self.assertTrue(can_execute_unsafe_code('edX/full/2012_Fall'))
self.assertTrue(can_execute_unsafe_code('edX/full/2013_Spring'))
......@@ -37,7 +37,7 @@ from courseware.access import has_access
from courseware.masquerade import setup_masquerade
from courseware.model_data import LmsKeyValueStore, LmsUsage, ModelDataCache
from courseware.models import StudentModule
from util.sandboxing import can_execute_unsafe_code
log = logging.getLogger(__name__)
......@@ -313,14 +313,6 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
statsd.increment("lms.courseware.question_answered", tags=tags)
def can_execute_unsafe_code():
# To decide if we can run unsafe code, we check the course id against
# a list of regexes configured on the server.
for regex in settings.COURSES_WITH_UNSAFE_CODE:
if re.match(regex, course_id):
return True
return False
# TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from
......@@ -348,7 +340,7 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
open_ended_grading_interface=open_ended_grading_interface,
s3_interface=s3_interface,
cache=cache,
can_execute_unsafe_code=can_execute_unsafe_code,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
)
# pass position specified in URL to module through ModuleSystem
system.set('position', position)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment