Commit 23364d8b by Jay Zoldak

Merge branch 'master' into feature/zoldak/refactor-factories

parents 5eba299d 84281198
...@@ -142,8 +142,11 @@ CMS.Views.ClassInfoUpdateView = Backbone.View.extend({ ...@@ -142,8 +142,11 @@ CMS.Views.ClassInfoUpdateView = Backbone.View.extend({
onDelete: function(event) { onDelete: function(event) {
event.preventDefault(); event.preventDefault();
// TODO ask for confirmation
// remove the dom element and delete the model if (!confirm('Are you sure you want to delete this update? This action cannot be undone.')) {
return;
}
var targetModel = this.eventModel(event); var targetModel = this.eventModel(event);
this.modelDom(event).remove(); this.modelDom(event).remove();
var cacheThis = this; var cacheThis = this;
......
...@@ -15,6 +15,24 @@ from .models import CourseUserGroup ...@@ -15,6 +15,24 @@ from .models import CourseUserGroup
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
# tl;dr: global state is bad. capa reseeds random every time a problem is loaded. Even
# if and when that's fixed, it's a good idea to have a local generator to avoid any other
# code that messes with the global random module.
_local_random = None
def local_random():
"""
Get the local random number generator. In a function so that we don't run
random.Random() at import time.
"""
# ironic, isn't it?
global _local_random
if _local_random is None:
_local_random = random.Random()
return _local_random
def is_course_cohorted(course_id): def is_course_cohorted(course_id):
""" """
Given a course id, return a boolean for whether or not the course is Given a course id, return a boolean for whether or not the course is
...@@ -129,13 +147,7 @@ def get_cohort(user, course_id): ...@@ -129,13 +147,7 @@ def get_cohort(user, course_id):
return None return None
# Put user in a random group, creating it if needed # Put user in a random group, creating it if needed
choice = random.randrange(0, n) group_name = local_random().choice(choices)
group_name = choices[choice]
# Victor: we are seeing very strange behavior on prod, where almost all users
# end up in the same group. Log at INFO to try to figure out what's going on.
log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format(
user, group_name,choice))
group, created = CourseUserGroup.objects.get_or_create( group, created = CourseUserGroup.objects.get_or_create(
course_id=course_id, course_id=course_id,
......
...@@ -9,6 +9,7 @@ from bs4 import BeautifulSoup ...@@ -9,6 +9,7 @@ from bs4 import BeautifulSoup
import time import time
import re import re
import os.path import os.path
from selenium.common.exceptions import WebDriverException
from logging import getLogger from logging import getLogger
logger = getLogger(__name__) logger = getLogger(__name__)
...@@ -214,3 +215,15 @@ def save_the_course_content(path='/tmp'): ...@@ -214,3 +215,15 @@ def save_the_course_content(path='/tmp'):
f = open('%s/%s' % (path, filename), 'w') f = open('%s/%s' % (path, filename), 'w')
f.write(output) f.write(output)
f.close f.close
@world.absorb
def css_click(css_selector):
try:
world.browser.find_by_css(css_selector).click()
except WebDriverException:
# Occassionally, MathJax or other JavaScript can cover up
# an element temporarily.
# If this happens, wait a second, then try again
time.sleep(1)
world.browser.find_by_css(css_selector).click()
...@@ -8,7 +8,7 @@ from collections import namedtuple ...@@ -8,7 +8,7 @@ from collections import namedtuple
from fs.osfs import OSFS from fs.osfs import OSFS
from itertools import repeat from itertools import repeat
from path import path from path import path
from datetime import datetime, timedelta from datetime import datetime
from importlib import import_module from importlib import import_module
from xmodule.errortracker import null_error_tracker, exc_info_to_str from xmodule.errortracker import null_error_tracker, exc_info_to_str
...@@ -493,10 +493,12 @@ class MongoModuleStore(ModuleStoreBase): ...@@ -493,10 +493,12 @@ class MongoModuleStore(ModuleStoreBase):
try: try:
source_item = self.collection.find_one(location_to_query(source)) source_item = self.collection.find_one(location_to_query(source))
source_item['_id'] = Location(location).dict() source_item['_id'] = Location(location).dict()
self.collection.insert(source_item, self.collection.insert(
source_item,
# Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False") # Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False")
# from overriding our default value set in the init method. # from overriding our default value set in the init method.
safe=self.collection.safe) safe=self.collection.safe
)
item = self._load_items([source_item])[0] item = self._load_items([source_item])[0]
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so # VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
......
Feature: Answer choice problems Feature: Answer problems
As a student in an edX course As a student in an edX course
In order to test my understanding of the material In order to test my understanding of the material
I want to answer choice based problems I want to answer problems
Scenario: I can answer a problem correctly Scenario: I can answer a problem correctly
Given I am viewing a "<ProblemType>" problem Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly" When I answer a "<ProblemType>" problem "correctly"
Then My "<ProblemType>" answer is marked "correct" Then My "<ProblemType>" answer is marked "correct"
...@@ -17,9 +18,11 @@ Feature: Answer choice problems ...@@ -17,9 +18,11 @@ Feature: Answer choice problems
| numerical | | numerical |
| formula | | formula |
| script | | script |
| code |
Scenario: I can answer a problem incorrectly Scenario: I can answer a problem incorrectly
Given I am viewing a "<ProblemType>" problem Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly" When I answer a "<ProblemType>" problem "incorrectly"
Then My "<ProblemType>" answer is marked "incorrect" Then My "<ProblemType>" answer is marked "incorrect"
...@@ -32,6 +35,7 @@ Feature: Answer choice problems ...@@ -32,6 +35,7 @@ Feature: Answer choice problems
| numerical | | numerical |
| formula | | formula |
| script | | script |
| code |
Scenario: I can submit a blank answer Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem Given I am viewing a "<ProblemType>" problem
......
from lettuce import world, step from lettuce import world, step
from lettuce.django import django_url from lettuce.django import django_url
from selenium.webdriver.support.ui import Select
import random import random
import textwrap import textwrap
import time
from common import i_am_registered_for_the_course, TEST_SECTION_NAME, section_location from common import i_am_registered_for_the_course, TEST_SECTION_NAME, section_location
from capa.tests.response_xml_factory import OptionResponseXMLFactory, \ from capa.tests.response_xml_factory import OptionResponseXMLFactory, \
ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \ ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \
StringResponseXMLFactory, NumericalResponseXMLFactory, \ StringResponseXMLFactory, NumericalResponseXMLFactory, \
FormulaResponseXMLFactory, CustomResponseXMLFactory FormulaResponseXMLFactory, CustomResponseXMLFactory, \
CodeResponseXMLFactory
# Factories from capa.tests.response_xml_factory that we will use # Factories from capa.tests.response_xml_factory that we will use
# to generate the problem XML, with the keyword args used to configure # to generate the problem XML, with the keyword args used to configure
...@@ -77,7 +78,13 @@ PROBLEM_FACTORY_DICT = { ...@@ -77,7 +78,13 @@ PROBLEM_FACTORY_DICT = {
a2=0 a2=0
return (a1+a2)==int(expect) return (a1+a2)==int(expect)
""")}}, """)}},
} 'code': {
'factory': CodeResponseXMLFactory(),
'kwargs': {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', }},
}
def add_problem_to_course(course, problem_type): def add_problem_to_course(course, problem_type):
...@@ -115,6 +122,19 @@ def view_problem(step, problem_type): ...@@ -115,6 +122,19 @@ def view_problem(step, problem_type):
world.browser.visit(url) world.browser.visit(url)
@step(u'External graders respond "([^"]*)"')
def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect'])
response_dict = {'correct': True if correctness == 'correct' else False,
'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded %s' % correctness}
# Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem
world.xqueue_server.set_grade_response(response_dict)
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"') @step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem(step, problem_type, correctness): def answer_problem(step, problem_type, correctness):
""" Mark a given problem type correct or incorrect, then submit it. """ Mark a given problem type correct or incorrect, then submit it.
...@@ -168,18 +188,29 @@ def answer_problem(step, problem_type, correctness): ...@@ -168,18 +188,29 @@ def answer_problem(step, problem_type, correctness):
inputfield('script', input_num=1).fill(str(first_addend)) inputfield('script', input_num=1).fill(str(first_addend))
inputfield('script', input_num=2).fill(str(second_addend)) inputfield('script', input_num=2).fill(str(second_addend))
elif problem_type == 'code':
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
# Submit the problem # Submit the problem
check_problem(step) check_problem(step)
@step(u'I check a problem') @step(u'I check a problem')
def check_problem(step): def check_problem(step):
world.browser.find_by_css("input.check").click() world.css_click("input.check")
@step(u'I reset the problem') @step(u'I reset the problem')
def reset_problem(step): def reset_problem(step):
world.browser.find_by_css('input.reset').click() world.css_click('input.reset')
@step(u'My "([^"]*)" answer is marked "([^"]*)"') @step(u'My "([^"]*)" answer is marked "([^"]*)"')
......
from courseware.mock_xqueue_server.mock_xqueue_server import MockXQueueServer
from lettuce import before, after, world
from django.conf import settings
import threading
@before.all
def setup_mock_xqueue_server():
# Retrieve the local port from settings
server_port = settings.XQUEUE_PORT
# Create the mock server instance
server = MockXQueueServer(server_port)
# Start the server running in a separate daemon thread
# Because the thread is a daemon, it will terminate
# when the main thread terminates.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Store the server instance in lettuce's world
# so that other steps can access it
# (and we can shut it down later)
world.xqueue_server = server
@after.all
def teardown_mock_xqueue_server(total):
# Stop the xqueue server and free up the port
world.xqueue_server.shutdown()
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import json
import urllib
import urlparse
import threading
from logging import getLogger
logger = getLogger(__name__)
class MockXQueueRequestHandler(BaseHTTPRequestHandler):
'''
A handler for XQueue POST requests.
'''
protocol = "HTTP/1.0"
def do_HEAD(self):
self._send_head()
def do_POST(self):
'''
Handle a POST request from the client
Sends back an immediate success/failure response.
It then POSTS back to the client
with grading results, as configured in MockXQueueServer.
'''
self._send_head()
# Retrieve the POST data
post_dict = self._post_dict()
# Log the request
logger.debug("XQueue received POST request %s to path %s" %
(str(post_dict), self.path))
# Respond only to grading requests
if self._is_grade_request():
try:
xqueue_header = json.loads(post_dict['xqueue_header'])
xqueue_body = json.loads(post_dict['xqueue_body'])
callback_url = xqueue_header['lms_callback_url']
except KeyError:
# If the message doesn't have a header or body,
# then it's malformed.
# Respond with failure
error_msg = "XQueue received invalid grade request"
self._send_immediate_response(False, message=error_msg)
except ValueError:
# If we could not decode the body or header,
# respond with failure
error_msg = "XQueue could not decode grade request"
self._send_immediate_response(False, message=error_msg)
else:
# Send an immediate response of success
# The grade request is formed correctly
self._send_immediate_response(True)
# Wait a bit before POSTing back to the callback url with the
# grade result configured by the server
# Otherwise, the problem will not realize it's
# queued and it will keep waiting for a response
# indefinitely
delayed_grade_func = lambda: self._send_grade_response(callback_url,
xqueue_header)
timer = threading.Timer(2, delayed_grade_func)
timer.start()
# If we get a request that's not to the grading submission
# URL, return an error
else:
error_message = "Invalid request URL"
self._send_immediate_response(False, message=error_message)
def _send_head(self):
'''
Send the response code and MIME headers
'''
if self._is_grade_request():
self.send_response(200)
else:
self.send_response(500)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def _post_dict(self):
'''
Retrieve the POST parameters from the client as a dictionary
'''
try:
length = int(self.headers.getheader('content-length'))
post_dict = urlparse.parse_qs(self.rfile.read(length))
# The POST dict will contain a list of values
# for each key.
# None of our parameters are lists, however,
# so we map [val] --> val
# If the list contains multiple entries,
# we pick the first one
post_dict = dict(map(lambda (key, list_val): (key, list_val[0]),
post_dict.items()))
except:
# We return an empty dict here, on the assumption
# that when we later check that the request has
# the correct fields, it won't find them,
# and will therefore send an error response
return {}
return post_dict
def _send_immediate_response(self, success, message=""):
'''
Send an immediate success/failure message
back to the client
'''
# Send the response indicating success/failure
response_str = json.dumps({'return_code': 0 if success else 1,
'content': message})
# Log the response
logger.debug("XQueue: sent response %s" % response_str)
self.wfile.write(response_str)
def _send_grade_response(self, postback_url, xqueue_header):
'''
POST the grade response back to the client
using the response provided by the server configuration
'''
response_dict = {'xqueue_header': json.dumps(xqueue_header),
'xqueue_body': json.dumps(self.server.grade_response())}
# Log the response
logger.debug("XQueue: sent grading response %s" % str(response_dict))
MockXQueueRequestHandler.post_to_url(postback_url, response_dict)
def _is_grade_request(self):
return 'xqueue/submit' in self.path
@staticmethod
def post_to_url(url, param_dict):
'''
POST *param_dict* to *url*
We make this a separate function so we can easily patch
it during testing.
'''
urllib.urlopen(url, urllib.urlencode(param_dict))
class MockXQueueServer(HTTPServer):
'''
A mock XQueue grading server that responds
to POST requests to localhost.
'''
def __init__(self, port_num,
grade_response_dict={'correct': True, 'score': 1, 'msg': ''}):
'''
Initialize the mock XQueue server instance.
*port_num* is the localhost port to listen to
*grade_response_dict* is a dictionary that will be JSON-serialized
and sent in response to XQueue grading requests.
'''
self.set_grade_response(grade_response_dict)
handler = MockXQueueRequestHandler
address = ('', port_num)
HTTPServer.__init__(self, address, handler)
def shutdown(self):
'''
Stop the server and free up the port
'''
# First call superclass shutdown()
HTTPServer.shutdown(self)
# We also need to manually close the socket
self.socket.close()
def grade_response(self):
return self._grade_response
def set_grade_response(self, grade_response_dict):
# Check that the grade response has the right keys
assert('correct' in grade_response_dict and
'score' in grade_response_dict and
'msg' in grade_response_dict)
# Wrap the message in <div> tags to ensure that it is valid XML
grade_response_dict['msg'] = "<div>%s</div>" % grade_response_dict['msg']
# Save the response dictionary
self._grade_response = grade_response_dict
import mock
import unittest
import threading
import json
import urllib
import urlparse
import time
from mock_xqueue_server import MockXQueueServer, MockXQueueRequestHandler
class MockXQueueServerTest(unittest.TestCase):
'''
A mock version of the XQueue server that listens on a local
port and responds with pre-defined grade messages.
Used for lettuce BDD tests in lms/courseware/features/problems.feature
and lms/courseware/features/problems.py
This is temporary and will be removed when XQueue is
rewritten using celery.
'''
def setUp(self):
# Create the server
server_port = 8034
self.server_url = 'http://127.0.0.1:%d' % server_port
self.server = MockXQueueServer(server_port,
{'correct': True, 'score': 1, 'msg': ''})
# Start the server in a separate daemon thread
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def tearDown(self):
# Stop the server, freeing up the port
self.server.shutdown()
def test_grade_request(self):
# Patch post_to_url() so we can intercept
# outgoing POST requests from the server
MockXQueueRequestHandler.post_to_url = mock.Mock()
# Send a grade request
callback_url = 'http://127.0.0.1:8000/test_callback'
grade_header = json.dumps({'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'})
grade_body = json.dumps({'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'})
grade_request = {'xqueue_header': grade_header,
'xqueue_body': grade_body}
response_handle = urllib.urlopen(self.server_url + '/xqueue/submit',
urllib.urlencode(grade_request))
response_dict = json.loads(response_handle.read())
# Expect that the response is success
self.assertEqual(response_dict['return_code'], 0)
# Wait a bit before checking that the server posted back
time.sleep(3)
# Expect that the server tries to post back the grading info
xqueue_body = json.dumps({'correct': True, 'score': 1,
'msg': '<div></div>'})
expected_callback_dict = {'xqueue_header': grade_header,
'xqueue_body': xqueue_body}
MockXQueueRequestHandler.post_to_url.assert_called_with(callback_url,
expected_callback_dict)
...@@ -48,6 +48,18 @@ DATABASES = { ...@@ -48,6 +48,18 @@ DATABASES = {
} }
} }
# Set up XQueue information so that the lms will send
# requests to a mock XQueue server running locally
XQUEUE_PORT = 8027
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:%d" % XQUEUE_PORT,
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Do not display the YouTube videos in the browser while running the # Do not display the YouTube videos in the browser while running the
# acceptance tests. This makes them faster and more reliable # acceptance tests. This makes them faster and more reliable
MITX_FEATURES['STUB_VIDEO_FOR_TESTING'] = True MITX_FEATURES['STUB_VIDEO_FOR_TESTING'] = True
......
...@@ -73,41 +73,6 @@ ...@@ -73,41 +73,6 @@
</article> </article>
--> -->
<article id="associate-legal-counsel" class="job">
<div class="inner-wrapper">
<h3><strong>ASSOCIATE LEGAL COUNSEL</strong></h3>
<p>We are seeking a talented lawyer with the ability to operate independently in a fast-paced environment and work proactively with all members of the edX team. You must have thorough knowledge of intellectual property law, contracts and licensing. </p>
<p><strong>Key Responsibilities: </strong></p>
<ul>
<li>Drive the negotiating, reviewing, drafting and overseeing of a wide range of transactional arrangements, including collaborations related to the provision of online education, inbound and outbound licensing of intellectual property, strategic partnerships, nondisclosure agreements, and services agreements.</li>
<li>Provide counseling on the legal implications/considerations of business and technical strategies and projects, with special emphasis on regulations related to higher education, data security and privacy.</li>
<li>Provide advice and support company-wide on a variety of legal issues in a timely and effective manner.</li>
<li>Assist on other matters as needed.</li>
</ul>
<p><strong>Requirements:</strong></p>
<ul>
<li>JD from an accredited law school</li>
<li>Massachusetts bar admission required</li>
<li>2-3 years of transactional experience at a major law firm and/or as an in-house counselor</li>
<li>Substantial IP licensing experience</li>
<li>Knowledge of copyright, trademark and patent law</li>
<li>Experience with open source content and open source software preferred</li>
<li>Outstanding communications skills (written and oral)</li>
<li>Experience with drafting and legal review of Internet privacy policies and terms of use.</li>
<li>Understanding of how to balance legal risks with business objectives</li>
<li>Ability to develop an innovative approach to legal issues in support of strategic business initiatives</li>
<li>An internal business and customer focused proactive attitude with ability to prioritize effectively</li>
<li>Experience with higher education preferred but not required</li>
</ul>
<p>If you are interested in this position, please send an email to <a href="mailto:jobs@edx.org">jobs@edx.org</a>.</p>
</div>
</article>
<article id="director-of-education-services" class="job"> <article id="director-of-education-services" class="job">
<div class="inner-wrapper"> <div class="inner-wrapper">
<h3><strong>DIRECTOR OF EDUCATIONAL SERVICES</strong></h3> <h3><strong>DIRECTOR OF EDUCATIONAL SERVICES</strong></h3>
...@@ -441,7 +406,6 @@ ...@@ -441,7 +406,6 @@
<section class="jobs-sidebar"> <section class="jobs-sidebar">
<h2>Positions</h2> <h2>Positions</h2>
<nav> <nav>
<a href="#associate-legal-counsel">Associate Legal Counsel</a>
<a href="#director-of-education-services">Director of Education Services</a> <a href="#director-of-education-services">Director of Education Services</a>
<a href="#manager-of-training-services">Manager of Training Services</a> <a href="#manager-of-training-services">Manager of Training Services</a>
<a href="#instructional-designer">Instructional Designer</a> <a href="#instructional-designer">Instructional Designer</a>
......
Django==1.3.1
flup==1.0.3.dev-20110405
lxml==2.3.4
Mako==0.7.0
Markdown==2.1.1
markdown2==1.4.2
python-memcached==1.48
numpy==1.6.1
Pygments==1.5
boto==2.3.0
django-storages==1.1.4
django-masquerade==0.1.5
fs==0.4.0
django-jasmine==0.3.2
path.py==2.2.2
requests==0.12.1
BeautifulSoup==3.2.1
BeautifulSoup4==4.1.1
newrelic==1.3.0.289
ipython==0.12.1
django-pipeline==1.2.12
django-staticfiles==1.2.1
glob2==0.3
sympy==0.7.1
pymongo==2.2.1
rednose==0.3.3
mock==0.8.0
GitPython==0.3.2.RC1
PyYAML==3.10
feedparser==5.1.2
MySQL-python==1.2.3
matplotlib==1.1.0
scipy==0.10.1
akismet==0.2.0
Coffin==0.3.6
django-celery==2.2.7
django-countries==1.0.5
django-followit==0.0.3
django-keyedcache==1.4-6
django-kombu==0.9.2
django-mako==0.1.5pre
django-recaptcha-works==0.3.4
django-robots==0.8.1
django-ses==0.4.1
django-threaded-multihost==1.4-1
html5lib==0.90
Jinja2==2.6
oauth2==1.5.211
pystache==0.3.1
python-openid==2.2.5
South==0.7.5
Unidecode==0.04.9
dogstatsd-python==0.2.1
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment