Commit 021bece8 by Will Daly

Merge pull request #2239 from edx/will/ora-ai-and-peer-tests

ORA AI and Peer acceptance tests
parents ce2c067d b73c6f63
...@@ -8,8 +8,6 @@ from terrain.stubs.youtube import StubYouTubeService ...@@ -8,8 +8,6 @@ from terrain.stubs.youtube import StubYouTubeService
from terrain.stubs.xqueue import StubXQueueService from terrain.stubs.xqueue import StubXQueueService
USAGE = "USAGE: python -m fakes.start SERVICE_NAME PORT_NUM"
SERVICES = { SERVICES = {
"youtube": {"port": settings.YOUTUBE_PORT, "class": StubYouTubeService}, "youtube": {"port": settings.YOUTUBE_PORT, "class": StubYouTubeService},
"xqueue": {"port": settings.XQUEUE_PORT, "class": StubXQueueService}, "xqueue": {"port": settings.XQUEUE_PORT, "class": StubXQueueService},
......
...@@ -23,14 +23,13 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): ...@@ -23,14 +23,13 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
""" """
Redirect messages to keep the test console clean. Redirect messages to keep the test console clean.
""" """
LOGGER.debug(self._format_msg(format_str, *args))
msg = "{0} - - [{1}] {2}\n".format( def log_error(self, format_str, *args):
self.client_address[0], """
self.log_date_time_string(), Helper to log a server error.
format_str % args """
) LOGGER.error(self._format_msg(format_str, *args))
LOGGER.debug(msg)
@lazy @lazy
def request_content(self): def request_content(self):
...@@ -76,22 +75,39 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): ...@@ -76,22 +75,39 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
def do_PUT(self): def do_PUT(self):
""" """
Allow callers to configure the stub server using the /set_config URL. Allow callers to configure the stub server using the /set_config URL.
The request should have POST data, such that:
Each POST parameter is the configuration key.
Each POST value is a JSON-encoded string value for the configuration.
""" """
if self.path == "/set_config" or self.path == "/set_config/": if self.path == "/set_config" or self.path == "/set_config/":
for key, value in self.post_dict.iteritems(): if len(self.post_dict) > 0:
self.log_message("Set config '{0}' to '{1}'".format(key, value)) for key, value in self.post_dict.iteritems():
# Decode the params as UTF-8
try:
key = unicode(key, 'utf-8')
value = unicode(value, 'utf-8')
except UnicodeDecodeError:
self.log_message("Could not decode request params as UTF-8")
self.log_message(u"Set config '{0}' to '{1}'".format(key, value))
try: try:
value = json.loads(value) value = json.loads(value)
except ValueError: except ValueError:
self.log_message(u"Could not parse JSON: {0}".format(value)) self.log_message(u"Could not parse JSON: {0}".format(value))
self.send_response(400) self.send_response(400)
else: else:
self.server.set_config(unicode(key, 'utf-8'), value) self.server.config[key] = value
self.send_response(200) self.send_response(200)
# No parameters sent to configure, so return success by default
else:
self.send_response(200)
else: else:
self.send_response(404) self.send_response(404)
...@@ -119,6 +135,18 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): ...@@ -119,6 +135,18 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
if content is not None: if content is not None:
self.wfile.write(content) self.wfile.write(content)
def _format_msg(self, format_str, *args):
"""
Format message for logging.
`format_str` is a string with old-style Python format escaping;
`args` is an array of values to fill into the string.
"""
return u"{0} - - [{1}] {2}\n".format(
self.client_address[0],
self.log_date_time_string(),
format_str % args
)
class StubHttpService(HTTPServer, object): class StubHttpService(HTTPServer, object):
""" """
...@@ -138,7 +166,7 @@ class StubHttpService(HTTPServer, object): ...@@ -138,7 +166,7 @@ class StubHttpService(HTTPServer, object):
HTTPServer.__init__(self, address, self.HANDLER_CLASS) HTTPServer.__init__(self, address, self.HANDLER_CLASS)
# Create a dict to store configuration values set by the client # Create a dict to store configuration values set by the client
self._config = dict() self.config = dict()
# Start the server in a separate thread # Start the server in a separate thread
server_thread = threading.Thread(target=self.serve_forever) server_thread = threading.Thread(target=self.serve_forever)
...@@ -165,17 +193,3 @@ class StubHttpService(HTTPServer, object): ...@@ -165,17 +193,3 @@ class StubHttpService(HTTPServer, object):
""" """
_, port = self.server_address _, port = self.server_address
return port return port
def config(self, key, default=None):
"""
Return the configuration value for `key`. If this
value has not been set, return `default` instead.
"""
return self._config.get(key, default)
def set_config(self, key, value):
"""
Set the configuration `value` for `key`.
"""
self._config[key] = value
"""
Command-line utility to start a stub service.
"""
import sys
import time
import logging
from .xqueue import StubXQueueService
from .youtube import StubYouTubeService
USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM"
SERVICES = {
'xqueue': StubXQueueService,
'youtube': StubYouTubeService
}
# Log to stdout, including debug messages
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s")
def get_args():
"""
Parse arguments, returning tuple of `(service_name, port_num)`.
Exits with a message if arguments are invalid.
"""
if len(sys.argv) < 3:
print USAGE
sys.exit(1)
service_name = sys.argv[1]
port_num = sys.argv[2]
if service_name not in SERVICES:
print "Unrecognized service '{0}'. Valid choices are: {1}".format(
service_name, ", ".join(SERVICES.keys()))
sys.exit(1)
try:
port_num = int(port_num)
if port_num < 0:
raise ValueError
except ValueError:
print "Port '{0}' must be a positive integer".format(port_num)
sys.exit(1)
return service_name, port_num
def main():
"""
Start a server; shut down on keyboard interrupt signal.
"""
service_name, port_num = get_args()
print "Starting stub service '{0}' on port {1}...".format(service_name, port_num)
server = SERVICES[service_name](port_num=port_num)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Stopping stub service..."
finally:
server.shutdown()
if __name__ == "__main__":
main()
...@@ -13,6 +13,7 @@ class StubHttpServiceTest(unittest.TestCase): ...@@ -13,6 +13,7 @@ class StubHttpServiceTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.server = StubHttpService() self.server = StubHttpService()
self.addCleanup(self.server.shutdown) self.addCleanup(self.server.shutdown)
self.url = "http://127.0.0.1:{0}/set_config".format(self.server.port)
def test_configure(self): def test_configure(self):
""" """
...@@ -21,33 +22,38 @@ class StubHttpServiceTest(unittest.TestCase): ...@@ -21,33 +22,38 @@ class StubHttpServiceTest(unittest.TestCase):
""" """
params = { params = {
'test_str': 'This is only a test', 'test_str': 'This is only a test',
'test_empty': '',
'test_int': 12345, 'test_int': 12345,
'test_float': 123.45, 'test_float': 123.45,
'test_dict': { 'test_key': 'test_val' },
'test_empty_dict': {},
'test_unicode': u'\u2603 the snowman', 'test_unicode': u'\u2603 the snowman',
'test_dict': { 'test_key': 'test_val' } 'test_none': None,
'test_boolean': False
} }
for key, val in params.iteritems(): for key, val in params.iteritems():
post_params = {key: json.dumps(val)}
response = requests.put(
"http://127.0.0.1:{0}/set_config".format(self.server.port),
data=post_params
)
# JSON-encode each parameter
post_params = {key: json.dumps(val)}
response = requests.put(self.url, data=post_params)
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
# Check that the expected values were set in the configuration # Check that the expected values were set in the configuration
for key, val in params.iteritems(): for key, val in params.iteritems():
self.assertEqual(self.server.config(key), val) self.assertEqual(self.server.config.get(key), val)
def test_default_config(self):
self.assertEqual(self.server.config('not_set', default=42), 42)
def test_bad_json(self): def test_bad_json(self):
response = requests.put( response = requests.put(self.url, data="{,}")
"http://127.0.0.1:{0}/set_config".format(self.server.port), self.assertEqual(response.status_code, 400)
data="{,}"
) def test_no_post_data(self):
response = requests.put(self.url, data={})
self.assertEqual(response.status_code, 200)
def test_unicode_non_json(self):
# Send unicode without json-encoding it
response = requests.put(self.url, data={'test_unicode': u'\u2603 the snowman'})
self.assertEqual(response.status_code, 400) self.assertEqual(response.status_code, 400)
def test_unknown_path(self): def test_unknown_path(self):
......
...@@ -5,70 +5,172 @@ Unit tests for stub XQueue implementation. ...@@ -5,70 +5,172 @@ Unit tests for stub XQueue implementation.
import mock import mock
import unittest import unittest
import json import json
import urllib import requests
import time import time
from terrain.stubs.xqueue import StubXQueueService import copy
from terrain.stubs.xqueue import StubXQueueService, StubXQueueHandler
class StubXQueueServiceTest(unittest.TestCase): class StubXQueueServiceTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.server = StubXQueueService() self.server = StubXQueueService()
self.url = "http://127.0.0.1:{0}".format(self.server.port) self.url = "http://127.0.0.1:{0}/xqueue/submit".format(self.server.port)
self.addCleanup(self.server.shutdown) self.addCleanup(self.server.shutdown)
# For testing purposes, do not delay the grading response # For testing purposes, do not delay the grading response
self.server.set_config('response_delay', 0) self.server.config['response_delay'] = 0
@mock.patch('requests.post') @mock.patch('terrain.stubs.xqueue.post')
def test_grade_request(self, post): def test_grade_request(self, post):
# Send a grade request # Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback' callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
grade_header = json.dumps({ # Check the response we receive
'lms_callback_url': callback_url, # (Should be the default grading response)
'lms_key': 'test_queuekey', expected_body = json.dumps({'correct': True, 'score': 1, 'msg': '<div></div>'})
'queue_name': 'test_queue' self._check_grade_response(post, callback_url, expected_header, expected_body)
})
grade_body = json.dumps({ @mock.patch('terrain.stubs.xqueue.post')
'student_info': 'test', def test_configure_default_response(self, post):
'grader_payload': 'test',
'student_response': 'test'
})
grade_request = { # Configure the default response for submissions to any queue
'xqueue_header': grade_header, response_content = {'test_response': 'test_content'}
'xqueue_body': grade_body self.server.config['default'] = response_content
}
response_handle = urllib.urlopen( # Post a submission to the stub XQueue
self.url + '/xqueue/submit', callback_url = 'http://127.0.0.1:8000/test_callback'
urllib.urlencode(grade_request) expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
) )
response_dict = json.loads(response_handle.read()) # Check the response we receive
# (Should be the default grading response)
self._check_grade_response(
post, callback_url, expected_header, json.dumps(response_content)
)
# Expect that the response is success @mock.patch('terrain.stubs.xqueue.post')
self.assertEqual(response_dict['return_code'], 0) def test_configure_specific_response(self, post):
# Configure the XQueue stub response to any submission to the test queue
response_content = {'test_response': 'test_content'}
self.server.config['This is only a test.'] = response_content
# Expect that the server tries to post back the grading info # Post a submission to the XQueue stub
xqueue_body = json.dumps( callback_url = 'http://127.0.0.1:8000/test_callback'
{'correct': True, 'score': 1, 'msg': '<div></div>'} expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'This is only a test.'})
) )
expected_callback_dict = { # Check that we receive the response we configured
'xqueue_header': grade_header, self._check_grade_response(
post, callback_url, expected_header, json.dumps(response_content)
)
@mock.patch('terrain.stubs.xqueue.post')
def test_multiple_response_matches(self, post):
# Configure the XQueue stub with two responses that
# match the same submission
self.server.config['test_1'] = {'response': True}
self.server.config['test_2'] = {'response': False}
with mock.patch('terrain.stubs.http.LOGGER') as logger:
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'test_1 and test_2'})
)
# Wait for the delayed grade response
self._wait_for_mock_called(logger.error, max_time=10)
# Expect that we do NOT receive a response
# and that an error message is logged
self.assertFalse(post.called)
self.assertTrue(logger.error.called)
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
"""
Post a submission to the stub XQueue implementation.
`callback_url` is the URL at which we expect to receive a grade response
`lms_key` is the authentication key sent in the header
`queue_name` is the name of the queue in which to send put the submission
`xqueue_body` is the content of the submission
Returns the header (a string) we send with the submission, which can
be used to validate the response we receive from the stub.
"""
# Post a submission to the XQueue stub
grade_request = {
'xqueue_header': json.dumps({
'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'
}),
'xqueue_body': xqueue_body 'xqueue_body': xqueue_body
} }
resp = requests.post(self.url, data=grade_request)
# Expect that the response is success
self.assertEqual(resp.status_code, 200)
# Return back the header, so we can authenticate the response we receive
return grade_request['xqueue_header']
def _check_grade_response(self, post_mock, callback_url, expected_header, expected_body):
"""
Verify that the stub sent a POST request back to us
with the expected data.
`post_mock` is our mock for `requests.post`
`callback_url` is the URL we expect the stub to POST to
`expected_header` is the header (a string) we expect to receive with the grade.
`expected_body` is the content (a string) we expect to receive with the grade.
Raises an `AssertionError` if the check fails.
"""
# Wait for the server to POST back to the callback URL # Wait for the server to POST back to the callback URL
# Time out if it takes too long # If it takes too long, continue anyway
start_time = time.time() self._wait_for_mock_called(post_mock, max_time=10)
while time.time() - start_time < 5:
if post.called: # Check the response posted back to us
break # This is the default response
expected_callback_dict = {
'xqueue_header': expected_header,
'xqueue_body': expected_body,
}
# Check that the POST request was made with the correct params # Check that the POST request was made with the correct params
post.assert_called_with(callback_url, data=expected_callback_dict) post_mock.assert_called_with(callback_url, data=expected_callback_dict)
def _wait_for_mock_called(self, mock_obj, max_time=10):
"""
Wait for `mock` (a `Mock` object) to be called.
If seconds elapsed exceeds `max_time`, continue without error.
"""
start_time = time.time()
while time.time() - start_time < max_time:
if mock_obj.called:
break
time.sleep(1)
...@@ -12,7 +12,7 @@ class StubYouTubeServiceTest(unittest.TestCase): ...@@ -12,7 +12,7 @@ class StubYouTubeServiceTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.server = StubYouTubeService() self.server = StubYouTubeService()
self.url = "http://127.0.0.1:{0}/".format(self.server.port) self.url = "http://127.0.0.1:{0}/".format(self.server.port)
self.server.set_config('time_to_response', 0.0) self.server.config['time_to_response'] = 0.0
self.addCleanup(self.server.shutdown) self.addCleanup(self.server.shutdown)
def test_unused_url(self): def test_unused_url(self):
......
""" """
Stub implementation of XQueue for acceptance tests. Stub implementation of XQueue for acceptance tests.
Configuration values:
"default" (dict): Default response to be sent to LMS as a grade for a submission
"<submission>" (dict): Grade response to return for submissions containing the text <submission>
If no grade response is configured, a default response will be returned.
""" """
from .http import StubHttpRequestHandler, StubHttpService from .http import StubHttpRequestHandler, StubHttpService
import json import json
import requests import copy
from requests import post
import threading import threading
...@@ -34,15 +41,13 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -34,15 +41,13 @@ class StubXQueueHandler(StubHttpRequestHandler):
except KeyError: except KeyError:
# If the message doesn't have a header or body, # If the message doesn't have a header or body,
# then it's malformed. # then it's malformed. Respond with failure
# Respond with failure
error_msg = "XQueue received invalid grade request" error_msg = "XQueue received invalid grade request"
self._send_immediate_response(False, message=error_msg) self._send_immediate_response(False, message=error_msg)
except ValueError: except ValueError:
# If we could not decode the body or header, # If we could not decode the body or header,
# respond with failure # respond with failure
error_msg = "XQueue could not decode grade request" error_msg = "XQueue could not decode grade request"
self._send_immediate_response(False, message=error_msg) self._send_immediate_response(False, message=error_msg)
...@@ -56,19 +61,18 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -56,19 +61,18 @@ class StubXQueueHandler(StubHttpRequestHandler):
# Otherwise, the problem will not realize it's # Otherwise, the problem will not realize it's
# queued and it will keep waiting for a response indefinitely # queued and it will keep waiting for a response indefinitely
delayed_grade_func = lambda: self._send_grade_response( delayed_grade_func = lambda: self._send_grade_response(
callback_url, xqueue_header callback_url, xqueue_header, self.post_dict['xqueue_body']
) )
threading.Timer( threading.Timer(
self.server.config('response_delay', default=self.DEFAULT_RESPONSE_DELAY), self.server.config.get('response_delay', self.DEFAULT_RESPONSE_DELAY),
delayed_grade_func delayed_grade_func
).start() ).start()
# If we get a request that's not to the grading submission # If we get a request that's not to the grading submission
# URL, return an error # URL, return an error
else: else:
error_message = "Invalid request URL" self._send_immediate_response(False, message="Invalid request URL")
self._send_immediate_response(False, message=error_message)
def _send_immediate_response(self, success, message=""): def _send_immediate_response(self, success, message=""):
""" """
...@@ -90,13 +94,49 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -90,13 +94,49 @@ class StubXQueueHandler(StubHttpRequestHandler):
else: else:
self.send_response(500) self.send_response(500)
def _send_grade_response(self, postback_url, xqueue_header): def _send_grade_response(self, postback_url, xqueue_header, xqueue_body_json):
""" """
POST the grade response back to the client POST the grade response back to the client
using the response provided by the server configuration using the response provided by the server configuration.
Uses the server configuration to determine what response to send:
1) Specific response for submissions containing matching text in `xqueue_body`
2) Default submission configured by client
3) Default submission
`postback_url` is the URL the client told us to post back to
`xqueue_header` (dict) is the full header the client sent us, which we will send back
to the client so it can authenticate us.
`xqueue_body_json` (json-encoded string) is the body of the submission the client sent us.
""" """
# Get the grade response from the server configuration # First check if we have a configured response that matches the submission body
grade_response = self.server.config('grade_response', default=self.DEFAULT_GRADE_RESPONSE) grade_response = None
# This matches the pattern against the JSON-encoded xqueue_body
# This is very simplistic, but sufficient to associate a student response
# with a grading response.
# There is a danger here that a submission will match multiple response patterns.
# Rather than fail silently (which could cause unpredictable behavior in tests)
# we abort and log a debugging message.
for pattern, response in self.server.config.iteritems():
if pattern in xqueue_body_json:
if grade_response is None:
grade_response = response
# Multiple matches, so abort and log an error
else:
self.log_error(
"Multiple response patterns matched '{0}'".format(xqueue_body_json),
)
return
# Fall back to the default grade response configured for this queue,
# then to the default response.
if grade_response is None:
grade_response = self.server.config.get(
'default', copy.deepcopy(self.DEFAULT_GRADE_RESPONSE)
)
# Wrap the message in <div> tags to ensure that it is valid XML # Wrap the message in <div> tags to ensure that it is valid XML
if isinstance(grade_response, dict) and 'msg' in grade_response: if isinstance(grade_response, dict) and 'msg' in grade_response:
...@@ -107,8 +147,8 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -107,8 +147,8 @@ class StubXQueueHandler(StubHttpRequestHandler):
'xqueue_body': json.dumps(grade_response) 'xqueue_body': json.dumps(grade_response)
} }
requests.post(postback_url, data=data) post(postback_url, data=data)
self.log_message("XQueue: sent grading response {0}".format(data)) self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url))
def _is_grade_request(self): def _is_grade_request(self):
return 'xqueue/submit' in self.path return 'xqueue/submit' in self.path
......
...@@ -72,7 +72,7 @@ class StubYouTubeHandler(StubHttpRequestHandler): ...@@ -72,7 +72,7 @@ class StubYouTubeHandler(StubHttpRequestHandler):
Requires sending back callback id. Requires sending back callback id.
""" """
# Delay the response to simulate network latency # Delay the response to simulate network latency
time.sleep(self.server.config('time_to_response', self.DEFAULT_DELAY_SEC)) time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
# Construct the response content # Construct the response content
callback = self.get_params['callback'][0] callback = self.get_params['callback'][0]
......
...@@ -224,15 +224,12 @@ class OpenResponsePage(PageObject): ...@@ -224,15 +224,12 @@ class OpenResponsePage(PageObject):
if assessment_type == 'self': if assessment_type == 'self':
return EmptyPromise(lambda: self.has_rubric, "Rubric has appeared") return EmptyPromise(lambda: self.has_rubric, "Rubric has appeared")
elif assessment_type == 'ai': elif assessment_type == 'ai' or assessment_type == "peer":
return EmptyPromise( return EmptyPromise(
lambda: self.grader_status != 'Unanswered', lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'" "Problem status is no longer 'unanswered'"
) )
elif assessment_type == 'peer':
return EmptyPromise(lambda: False, "Peer assessment not yet implemented")
else: else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type)) self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
return EmptyPromise(lambda: True, "Unrecognized assessment type") return EmptyPromise(lambda: True, "Unrecognized assessment type")
...@@ -27,7 +27,7 @@ class ProgressPage(PageObject): ...@@ -27,7 +27,7 @@ class ProgressPage(PageObject):
for the section. for the section.
Example: Example:
section_scores('Week 1', 'Lesson 1', 2) --> [(2, 4), (0, 1)] scores('Week 1', 'Lesson 1') --> [(2, 4), (0, 1)]
Returns `None` if no such chapter and section can be found. Returns `None` if no such chapter and section can be found.
""" """
......
...@@ -2,3 +2,6 @@ import os ...@@ -2,3 +2,6 @@ import os
# Get the URL of the instance under test # Get the URL of the instance under test
STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031') STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031')
# Get the URL of the XQueue stub used in the test
XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040')
"""
Fixture to configure XQueue response.
"""
import requests
import json
from bok_choy.web_app_fixture import WebAppFixture, WebAppFixtureError
from . import XQUEUE_STUB_URL
class XQueueResponseFixture(WebAppFixture):
"""
Configure the XQueue stub's response to submissions.
"""
def __init__(self, queue_name, response_dict):
"""
Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission to a queue
named `queue_name`.
Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel.
"""
self._queue_name = queue_name
self._response_dict = response_dict
def install(self):
"""
Configure the stub via HTTP.
"""
url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue
payload = {self._queue_name: json.dumps(self._response_dict)}
response = requests.put(url, data=payload)
if not response.ok:
raise WebFixtureError(
"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._queue_name, self._response_dict))
<combinedopenended max_score="1" accept_file_upload="False" markdown="null" max_attempts="10000" skip_spelling_checks="False" version="1">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>
""" """
Test helper functions. Test helper functions and base classes.
""" """
from path import path from path import path
from bok_choy.web_app_test import WebAppTest
def load_data_str(rel_path): def load_data_str(rel_path):
...@@ -12,3 +13,32 @@ def load_data_str(rel_path): ...@@ -12,3 +13,32 @@ def load_data_str(rel_path):
full_path = path(__file__).abspath().dirname() / "data" / rel_path #pylint: disable=E1120 full_path = path(__file__).abspath().dirname() / "data" / rel_path #pylint: disable=E1120
with open(full_path) as data_file: with open(full_path) as data_file:
return data_file.read() return data_file.read()
class UniqueCourseTest(WebAppTest):
"""
Test that provides a unique course ID.
"""
COURSE_ID_SEPARATOR = "/"
def __init__(self, *args, **kwargs):
"""
Create a unique course ID.
"""
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + self.unique_id
}
super(UniqueCourseTest, self).__init__(*args, **kwargs)
@property
def course_id(self):
return self.COURSE_ID_SEPARATOR.join([
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
])
...@@ -2,79 +2,167 @@ ...@@ -2,79 +2,167 @@
Tests for ORA (Open Response Assessment) through the LMS UI. Tests for ORA (Open Response Assessment) through the LMS UI.
""" """
from bok_choy.web_app_test import WebAppTest import json
from bok_choy.promise import fulfill, Promise
from ..edxapp_pages.studio.auto_auth import AutoAuthPage from ..edxapp_pages.studio.auto_auth import AutoAuthPage
from ..edxapp_pages.lms.course_info import CourseInfoPage from ..edxapp_pages.lms.course_info import CourseInfoPage
from ..edxapp_pages.lms.tab_nav import TabNavPage from ..edxapp_pages.lms.tab_nav import TabNavPage
from ..edxapp_pages.lms.course_nav import CourseNavPage from ..edxapp_pages.lms.course_nav import CourseNavPage
from ..edxapp_pages.lms.open_response import OpenResponsePage from ..edxapp_pages.lms.open_response import OpenResponsePage
from ..edxapp_pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture
from .helpers import load_data_str from .helpers import load_data_str, UniqueCourseTest
class OpenResponseTest(WebAppTest): class OpenResponseTest(UniqueCourseTest):
""" """
Tests that interact with ORA (Open Response Assessment) through the LMS UI. Tests that interact with ORA (Open Response Assessment) through the LMS UI.
This base class sets up a course with open response problems and defines
some helper functions used in the ORA tests.
""" """
page_object_classes = [
AutoAuthPage, CourseInfoPage, TabNavPage,
CourseNavPage, OpenResponsePage, ProgressPage
]
# Grade response (dict) to return from the XQueue stub
# in response to our unique submission text.
XQUEUE_GRADE_RESPONSE = None
def setUp(self): def setUp(self):
""" """
Always start in the subsection with open response problems. Always start in the subsection with open response problems.
""" """
# Create a unique submission
# Create a unique course ID self.submission = "Test submission " + self.unique_id
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + self.unique_id
}
# Ensure fixtures are installed # Ensure fixtures are installed
super(OpenResponseTest, self).setUp() super(OpenResponseTest, self).setUp()
# Log in and navigate to the essay problems # Log in and navigate to the essay problems
course_id = '{org}/{number}/{run}'.format(**self.course_info) self.ui.visit('studio.auto_auth', course_id=self.course_id)
self.ui.visit('studio.auto_auth', course_id=course_id) self.ui.visit('lms.course_info', course_id=self.course_id)
self.ui.visit('lms.course_info', course_id=course_id)
self.ui['lms.tab_nav'].go_to_tab('Courseware') self.ui['lms.tab_nav'].go_to_tab('Courseware')
self.ui['lms.course_nav'].go_to_section(
'Example Week 2: Get Interactive', 'Homework - Essays'
)
@property
def page_object_classes(self):
return [AutoAuthPage, CourseInfoPage, TabNavPage, CourseNavPage, OpenResponsePage]
@property @property
def fixtures(self): def fixtures(self):
""" """
Create a test course with open response problems. Create a test course with open response problems.
Configure the XQueue stub to respond to submissions to the open-ended queue.
""" """
# Configure the test course
course_fix = CourseFixture( course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'], self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'] self.course_info['run'], self.course_info['display_name']
) )
course_fix.add_children( course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('combinedopenended', 'Self-Assessed', data=load_data_str('ora_self_problem.xml')),
XBlockFixtureDesc('combinedopenended', 'AI-Assessed', data=load_data_str('ora_ai_problem.xml')) XBlockFixtureDesc('combinedopenended', 'Self-Assessed',
data=load_data_str('ora_self_problem.xml'), metadata={'graded': True}),
XBlockFixtureDesc('combinedopenended', 'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}),
XBlockFixtureDesc('combinedopenended', 'Peer-Assessed',
data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}),
) )
) )
) )
return [course_fix] # Configure the XQueue stub's response for the text we will submit
if self.XQUEUE_GRADE_RESPONSE is not None:
xqueue_fix = XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE)
return [course_fix, xqueue_fix]
else:
return [course_fix]
def submit_essay(self, expected_assessment_type, expected_prompt):
"""
Submit an essay and verify that the problem uses
the `expected_assessment_type` ("self", "ai", or "peer") and
shows the `expected_prompt` (a string).
"""
# Check the assessment type and prompt
self.assertEqual(self.ui['lms.open_response'].assessment_type, expected_assessment_type)
self.assertIn(expected_prompt, self.ui['lms.open_response'].prompt)
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.ui['lms.open_response'].set_response(self.submission)
# Save the response and expect some UI feedback
self.ui['lms.open_response'].save_response()
self.assertEqual(
self.ui['lms.open_response'].alert_message,
"Answer saved, but not yet submitted."
)
# Submit the response
self.ui['lms.open_response'].submit_response()
def get_asynch_feedback(self, assessment_type):
"""
Wait for and retrieve asynchronous feedback
(e.g. from AI, instructor, or peer grading)
`assessment_type` is either "ai" or "peer".
"""
feedback_promise = Promise(
self._check_feedback_func(assessment_type),
'Got feedback for {0} problem'.format(assessment_type)
)
return fulfill(feedback_promise)
def _check_feedback_func(self, assessment_type):
"""
Navigate away from, then return to, the peer problem to
receive updated feedback.
The returned function will return a tuple `(is_success, rubric_feedback)`,
`is_success` is True iff we have received feedback for the problem;
`rubric_feedback` is a list of "correct" or "incorrect" strings.
"""
if assessment_type == 'ai':
section_name = 'AI-Assessed'
elif assessment_type == 'peer':
section_name = 'Peer-Assessed'
else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check():
self.ui['lms.course_nav'].go_to_sequential('Self-Assessed')
self.ui['lms.course_nav'].go_to_sequential(section_name)
feedback = self.ui['lms.open_response'].rubric_feedback
# Successful if `feedback` is a non-empty list
return (bool(feedback), feedback)
return _inner_check
class SelfAssessmentTest(OpenResponseTest):
"""
Test ORA self-assessment.
"""
def test_self_assessment(self): def test_self_assessment(self):
""" """
Test that the user can self-assess an essay. Given I am viewing a self-assessment problem
When I submit an essay and complete a self-assessment rubric
Then I see a scored rubric
And I see my score in the progress page.
""" """
# Navigate to the self-assessment problem and submit an essay # Navigate to the self-assessment problem and submit an essay
self.ui['lms.course_nav'].go_to_sequential('Self-Assessed') self.ui['lms.course_nav'].go_to_sequential('Self-Assessed')
self._submit_essay('self', 'Censorship in the Libraries') self.submit_essay('self', 'Censorship in the Libraries')
# Check the rubric categories # Check the rubric categories
self.assertEqual( self.assertEqual(
...@@ -91,14 +179,42 @@ class OpenResponseTest(WebAppTest): ...@@ -91,14 +179,42 @@ class OpenResponseTest(WebAppTest):
['incorrect', 'correct'] ['incorrect', 'correct']
) )
# Verify the progress page
self.ui.visit('lms.progress', course_id=self.course_id)
scores = self.ui['lms.progress'].scores('Test Section', 'Test Subsection')
# The first score is self-assessment, which we've answered, so it's 1/2
# The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2
self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)])
class AIAssessmentTest(OpenResponseTest):
"""
Test ORA AI-assessment.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': {"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"},
'grader_type': 'BC',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
def test_ai_assessment(self): def test_ai_assessment(self):
""" """
Test that a user can submit an essay and receive AI feedback. Given I am viewing an AI-assessment problem that has a trained ML model
When I submit an essay and wait for a response
Then I see a scored rubric
And I see my score in the progress page.
""" """
# Navigate to the AI-assessment problem and submit an essay # Navigate to the AI-assessment problem and submit an essay
self.ui['lms.course_nav'].go_to_sequential('AI-Assessed') self.ui['lms.course_nav'].go_to_sequential('AI-Assessed')
self._submit_essay('ai', 'Censorship in the Libraries') self.submit_essay('ai', 'Censorship in the Libraries')
# Expect UI feedback that the response was submitted # Expect UI feedback that the response was submitted
self.assertEqual( self.assertEqual(
...@@ -106,27 +222,85 @@ class OpenResponseTest(WebAppTest): ...@@ -106,27 +222,85 @@ class OpenResponseTest(WebAppTest):
"Your response has been submitted. Please check back later for your grade." "Your response has been submitted. Please check back later for your grade."
) )
def _submit_essay(self, expected_assessment_type, expected_prompt): # Refresh the page to get the updated feedback
""" # then verify that we get the feedback sent by our stub XQueue implementation
Submit an essay and verify that the problem uses self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
the `expected_assessment_type` ("self", "ai", or "peer") and
shows the `expected_prompt` (a string).
"""
# Check the assessment type and prompt # Verify the progress page
self.assertEqual(self.ui['lms.open_response'].assessment_type, expected_assessment_type) self.ui.visit('lms.progress', course_id=self.course_id)
self.assertIn(expected_prompt, self.ui['lms.open_response'].prompt) scores = self.ui['lms.progress'].scores('Test Section', 'Test Subsection')
# Enter a response # First score is the self-assessment score, which we haven't answered, so it's 0/2
essay = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut vehicula." # Second score is the AI-assessment score, which we have answered, so it's 1/2
self.ui['lms.open_response'].set_response(essay) # Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
# Save the response and expect some UI feedback
self.ui['lms.open_response'].save_response() class InstructorAssessmentTest(AIAssessmentTest):
"""
Test an AI-assessment that has been graded by an instructor.
This runs the exact same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': {"feedback": "Good job!"},
'grader_type': 'IN',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
class PeerFeedbackTest(OpenResponseTest):
"""
Test ORA peer-assessment. Note that this tests only *receiving* feedback,
not *giving* feedback -- those tests are located in another module.
"""
# Unlike other assessment types, peer assessment has multiple scores
XQUEUE_GRADE_RESPONSE = {
'score': [2, 2, 2],
'feedback': [json.dumps({"feedback": ""})] * 3,
'grader_type': 'PE',
'success': True,
'grader_id': [1, 2, 3],
'submission_id': 1,
'rubric_scores_complete': [True, True, True],
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
}
def test_peer_assessment(self):
"""
Given I have submitted an essay for peer-assessment
And enough other students have scored my essay
Then I can view the scores and written feedback
And I see my score in the progress page.
"""
# Navigate to the peer-assessment problem and submit an essay
self.ui['lms.course_nav'].go_to_sequential('Peer-Assessed')
self.submit_essay('peer', 'Censorship in the Libraries')
# Expect UI feedback that the response was submitted
self.assertEqual( self.assertEqual(
self.ui['lms.open_response'].alert_message, self.ui['lms.open_response'].grader_status,
"Answer saved, but not yet submitted." "Your response has been submitted. Please check back later for your grade."
) )
# Submit the response # Refresh the page to get feedback from the stub XQueue grader.
self.ui['lms.open_response'].submit_response() # We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page
self.ui.visit('lms.progress', course_id=self.course_id)
scores = self.ui['lms.progress'].scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we haven't answered, so it's 0/2
# Third score is peer-assessment, which we have answered, so it's 2/2
self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])
...@@ -22,6 +22,8 @@ from ..edxapp_pages.studio.signup import SignupPage ...@@ -22,6 +22,8 @@ from ..edxapp_pages.studio.signup import SignupPage
from ..edxapp_pages.studio.textbooks import TextbooksPage from ..edxapp_pages.studio.textbooks import TextbooksPage
from ..fixtures.course import CourseFixture from ..fixtures.course import CourseFixture
from .helpers import UniqueCourseTest
class LoggedOutTest(WebAppTest): class LoggedOutTest(WebAppTest):
""" """
...@@ -59,26 +61,13 @@ class LoggedInPagesTest(WebAppTest): ...@@ -59,26 +61,13 @@ class LoggedInPagesTest(WebAppTest):
self.ui.visit('studio.dashboard') self.ui.visit('studio.dashboard')
class CoursePagesTest(WebAppTest): class CoursePagesTest(UniqueCourseTest):
""" """
Tests that verify the pages in Studio that you can get to when logged Tests that verify the pages in Studio that you can get to when logged
in and have a course. in and have a course.
""" """
def setUp(self): COURSE_ID_SEPARATOR = "."
"""
Create a unique identifier for the course used in this test.
"""
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': '101',
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
# Ensure that the superclass sets up
super(CoursePagesTest, self).setUp()
@property @property
def page_object_classes(self): def page_object_classes(self):
...@@ -90,14 +79,13 @@ class CoursePagesTest(WebAppTest): ...@@ -90,14 +79,13 @@ class CoursePagesTest(WebAppTest):
@property @property
def fixtures(self): def fixtures(self):
super_fixtures = super(CoursePagesTest, self).fixtures
course_fix = CourseFixture( course_fix = CourseFixture(
self.course_info['org'], self.course_info['org'],
self.course_info['number'], self.course_info['number'],
self.course_info['run'], self.course_info['run'],
self.course_info['display_name'] self.course_info['display_name']
) )
return set(super_fixtures + [course_fix]) return [course_fix]
def test_page_existence(self): def test_page_existence(self):
""" """
...@@ -113,6 +101,6 @@ class CoursePagesTest(WebAppTest): ...@@ -113,6 +101,6 @@ class CoursePagesTest(WebAppTest):
# Log in # Log in
self.ui.visit('studio.auto_auth', staff=True) self.ui.visit('studio.auto_auth', staff=True)
course_id = '{org}.{number}.{run}'.format(**self.course_info) # Verify that each page is available
for page in pages: for page in pages:
self.ui.visit('studio.{0}'.format(page), course_id=course_id) self.ui.visit('studio.{0}'.format(page), course_id=self.course_id)
...@@ -42,13 +42,15 @@ def view_problem(step, problem_type): ...@@ -42,13 +42,15 @@ def view_problem(step, problem_type):
def set_external_grader_response(step, correctness): def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect']) assert(correctness in ['correct', 'incorrect'])
response_dict = {'correct': True if correctness == 'correct' else False, response_dict = {
'score': 1 if correctness == 'correct' else 0, 'correct': True if correctness == 'correct' else False,
'msg': 'Your problem was graded %s' % correctness} 'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded {0}'.format(correctness)
}
# Set the fake xqueue server to always respond # Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem # correct/incorrect when asked to grade a problem
world.xqueue.set_config('grade_response', response_dict) world.xqueue.config['default'] = response_dict
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"') @step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
......
...@@ -107,7 +107,7 @@ def add_video_to_course(course, player_mode, display_name='Video'): ...@@ -107,7 +107,7 @@ def add_video_to_course(course, player_mode, display_name='Video'):
@step('youtube server is up and response time is (.*) seconds$') @step('youtube server is up and response time is (.*) seconds$')
def set_youtube_response_timeout(_step, time): def set_youtube_response_timeout(_step, time):
world.youtube.set_config('time_to_response', float(time)) world.youtube.config['time_to_response'] = float(time)
@step('when I view the video it has rendered in (.*) mode$') @step('when I view the video it has rendered in (.*) mode$')
......
...@@ -107,7 +107,7 @@ ...@@ -107,7 +107,7 @@
"password": "password", "password": "password",
"username": "lms" "username": "lms"
}, },
"url": "http://localhost:18040" "url": "** OVERRIDDEN **"
}, },
"ZENDESK_API_KEY": "", "ZENDESK_API_KEY": "",
"ZENDESK_USER": "" "ZENDESK_USER": ""
......
...@@ -38,6 +38,9 @@ MONGO_MODULESTORE['OPTIONS']['fs_root'] = (TEST_ROOT / "data").abspath() ...@@ -38,6 +38,9 @@ MONGO_MODULESTORE['OPTIONS']['fs_root'] = (TEST_ROOT / "data").abspath()
XML_MODULESTORE = MODULESTORE['default']['OPTIONS']['stores']['xml'] XML_MODULESTORE = MODULESTORE['default']['OPTIONS']['stores']['xml']
XML_MODULESTORE['OPTIONS']['data_dir'] = (TEST_ROOT / "data").abspath() XML_MODULESTORE['OPTIONS']['data_dir'] = (TEST_ROOT / "data").abspath()
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Enable django-pipeline and staticfiles # Enable django-pipeline and staticfiles
STATIC_ROOT = (TEST_ROOT / "staticfiles").abspath() STATIC_ROOT = (TEST_ROOT / "staticfiles").abspath()
PIPELINE = True PIPELINE = True
......
...@@ -29,22 +29,40 @@ BOK_CHOY_SERVERS = { ...@@ -29,22 +29,40 @@ BOK_CHOY_SERVERS = {
:cms => { :port => 8031, :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_studio.log") } :cms => { :port => 8031, :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_studio.log") }
} }
BOK_CHOY_STUBS = {
:xqueue => { :port => 8040, :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_xqueue.log") }
}
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = File.join(REPO_ROOT, "common", "djangoapps", "terrain")
BOK_CHOY_CACHE = Dalli::Client.new('localhost:11211') BOK_CHOY_CACHE = Dalli::Client.new('localhost:11211')
# Start the server we will run tests on # Start the servers we will run tests on
def start_servers() def start_servers()
BOK_CHOY_SERVERS.each do | service, info | BOK_CHOY_SERVERS.each do | service, info |
address = "0.0.0.0:#{info[:port]}" address = "0.0.0.0:#{info[:port]}"
cmd = "coverage run --rcfile=#{BOK_CHOY_COVERAGE_RC} -m manage #{service} --settings bok_choy runserver #{address} --traceback --noreload" cmd = "coverage run --rcfile=#{BOK_CHOY_COVERAGE_RC} -m manage #{service} --settings bok_choy runserver #{address} --traceback --noreload"
singleton_process(cmd, logfile=info[:log]) singleton_process(cmd, logfile=info[:log])
end end
BOK_CHOY_STUBS.each do | service, info |
Dir.chdir(BOK_CHOY_STUB_DIR) do
singleton_process(
"python -m stubs.start #{service} #{info[:port]}",
logfile=info[:log]
)
end
end
end end
# Wait until we get a successful response from the servers or time out # Wait until we get a successful response from the servers or time out
def wait_for_test_servers() def wait_for_test_servers()
BOK_CHOY_SERVERS.each do | service, info | BOK_CHOY_SERVERS.merge(BOK_CHOY_STUBS).each do | service, info |
ready = wait_for_server("0.0.0.0", info[:port]) ready = wait_for_server("0.0.0.0", info[:port])
if not ready if not ready
fail("Could not contact #{service} test server") fail("Could not contact #{service} test server")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment