Commit 0a9b4ada by Will Daly

Added stub XQueue server to bok_choy test suite.

Added feedback check for AI-assessment test
Added peer assessment feedback test
Added tests for peer grading module
Added stub peer grading module service
Updated Studio/LMS page objects and tests to use bok-choy v0.1.0
Added LMS bok-choy tests from e2e repo
Add bok-choy tests for peer grading module, including calibration.
parent 60f73af5
<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>
<rubric><category><description>Writing Applications</description><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>
...@@ -6,12 +6,60 @@ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler ...@@ -6,12 +6,60 @@ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import urlparse import urlparse
import threading import threading
import json import json
from functools import wraps
from lazy import lazy from lazy import lazy
from logging import getLogger from logging import getLogger
LOGGER = getLogger(__name__) LOGGER = getLogger(__name__)
def require_params(method, *required_keys):
"""
Decorator to ensure that the method has all the required parameters.
Example:
@require_params('GET', 'id', 'state')
def handle_request(self):
# ....
would send a 400 response if no GET parameters were specified
for 'id' or 'state' (or if those parameters had empty values).
The wrapped function should be a method of a `StubHttpRequestHandler`
subclass.
Currently, "GET" and "POST" are the only supported methods.
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
# Read either GET querystring params or POST dict params
if method == "GET":
params = self.get_params
elif method == "POST":
params = self.post_dict
else:
raise ValueError("Unsupported method '{method}'".format(method=method))
# Check for required values
missing = []
for key in required_keys:
if params.get(key) is None:
missing.append(key)
if len(missing) > 0:
msg = "Missing required key(s) {keys}".format(keys=",".join(missing))
self.send_response(400, content=msg, headers={'Content-type': 'text/plain'})
# If nothing is missing, execute the function as usual
else:
return func(self, *args, **kwargs)
return wrapper
return decorator
class StubHttpRequestHandler(BaseHTTPRequestHandler, object): class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
""" """
Handler for the stub HTTP service. Handler for the stub HTTP service.
...@@ -70,7 +118,26 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): ...@@ -70,7 +118,26 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
""" """
Return the GET parameters (querystring in the URL). Return the GET parameters (querystring in the URL).
""" """
return urlparse.parse_qs(self.path) query = urlparse.urlparse(self.path).query
# By default, `parse_qs` returns a list of values for each param
# For convenience, we replace lists of 1 element with just the element
return {
k:v[0] if len(v) == 1 else v
for k,v in urlparse.parse_qs(query).items()
}
@lazy
def path_only(self):
"""
Return the URL path without GET parameters.
Removes the trailing slash if there is one.
"""
path = urlparse.urlparse(self.path).path
if path.endswith('/'):
return path[:-1]
else:
return path
def do_PUT(self): def do_PUT(self):
""" """
......
...@@ -6,13 +6,15 @@ import time ...@@ -6,13 +6,15 @@ import time
import logging import logging
from .xqueue import StubXQueueService from .xqueue import StubXQueueService
from .youtube import StubYouTubeService from .youtube import StubYouTubeService
from .ora import StubOraService
USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM" USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_VAL, ...]"
SERVICES = { SERVICES = {
'xqueue': StubXQueueService, 'xqueue': StubXQueueService,
'youtube': StubYouTubeService 'youtube': StubYouTubeService,
'ora': StubOraService
} }
# Log to stdout, including debug messages # Log to stdout, including debug messages
...@@ -21,7 +23,7 @@ logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s") ...@@ -21,7 +23,7 @@ logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s")
def get_args(): def get_args():
""" """
Parse arguments, returning tuple of `(service_name, port_num)`. Parse arguments, returning tuple of `(service_name, port_num, config_dict)`.
Exits with a message if arguments are invalid. Exits with a message if arguments are invalid.
""" """
if len(sys.argv) < 3: if len(sys.argv) < 3:
...@@ -30,6 +32,7 @@ def get_args(): ...@@ -30,6 +32,7 @@ def get_args():
service_name = sys.argv[1] service_name = sys.argv[1]
port_num = sys.argv[2] port_num = sys.argv[2]
config_dict = _parse_config_args(sys.argv[3:])
if service_name not in SERVICES: if service_name not in SERVICES:
print "Unrecognized service '{0}'. Valid choices are: {1}".format( print "Unrecognized service '{0}'. Valid choices are: {1}".format(
...@@ -45,17 +48,40 @@ def get_args(): ...@@ -45,17 +48,40 @@ def get_args():
print "Port '{0}' must be a positive integer".format(port_num) print "Port '{0}' must be a positive integer".format(port_num)
sys.exit(1) sys.exit(1)
return service_name, port_num return service_name, port_num, config_dict
def _parse_config_args(args):
"""
Parse stub configuration arguments, which are strings of the form "KEY=VAL".
`args` is a list of arguments from the command line.
Any argument that does not match the "KEY=VAL" format will be logged and skipped.
Returns a dictionary with the configuration keys and values.
"""
config_dict = dict()
for config_str in args:
try:
components = config_str.split('=')
if len(components) >= 2:
config_dict[components[0]] = "=".join(components[1:])
except:
print "Warning: could not interpret config value '{0}'".format(config_str)
pass
return config_dict
def main(): def main():
""" """
Start a server; shut down on keyboard interrupt signal. Start a server; shut down on keyboard interrupt signal.
""" """
service_name, port_num = get_args() service_name, port_num, config_dict = get_args()
print "Starting stub service '{0}' on port {1}...".format(service_name, port_num) print "Starting stub service '{0}' on port {1}...".format(service_name, port_num)
server = SERVICES[service_name](port_num=port_num) server = SERVICES[service_name](port_num=port_num)
server.config.update(config_dict)
try: try:
while True: while True:
......
...@@ -5,7 +5,7 @@ Unit tests for stub HTTP server base class. ...@@ -5,7 +5,7 @@ Unit tests for stub HTTP server base class.
import unittest import unittest
import requests import requests
import json import json
from terrain.stubs.http import StubHttpService from terrain.stubs.http import StubHttpService, StubHttpRequestHandler, require_params
class StubHttpServiceTest(unittest.TestCase): class StubHttpServiceTest(unittest.TestCase):
...@@ -62,3 +62,56 @@ class StubHttpServiceTest(unittest.TestCase): ...@@ -62,3 +62,56 @@ class StubHttpServiceTest(unittest.TestCase):
data="{}" data="{}"
) )
self.assertEqual(response.status_code, 404) self.assertEqual(response.status_code, 404)
class RequireRequestHandler(StubHttpRequestHandler):
@require_params('GET', 'test_param')
def do_GET(self):
self.send_response(200)
@require_params('POST', 'test_param')
def do_POST(self):
self.send_response(200)
class RequireHttpService(StubHttpService):
HANDLER_CLASS = RequireRequestHandler
class RequireParamTest(unittest.TestCase):
"""
Test the decorator for requiring parameters.
"""
def setUp(self):
self.server = RequireHttpService()
self.addCleanup(self.server.shutdown)
self.url = "http://127.0.0.1:{port}".format(port=self.server.port)
def test_require_get_param(self):
# Expect success when we provide the required param
response = requests.get(self.url, params={"test_param": 2})
self.assertEqual(response.status_code, 200)
# Expect failure when we do not proivde the param
response = requests.get(self.url)
self.assertEqual(response.status_code, 400)
# Expect failure when we provide an empty param
response = requests.get(self.url + "?test_param=")
self.assertEqual(response.status_code, 400)
def test_require_post_param(self):
# Expect success when we provide the required param
response = requests.post(self.url, data={"test_param": 2})
self.assertEqual(response.status_code, 200)
# Expect failure when we do not proivde the param
response = requests.post(self.url)
self.assertEqual(response.status_code, 400)
# Expect failure when we provide an empty param
response = requests.post(self.url, data={"test_param": None})
self.assertEqual(response.status_code, 400)
...@@ -8,7 +8,7 @@ import json ...@@ -8,7 +8,7 @@ import json
import requests import requests
import time import time
import copy import copy
from terrain.stubs.xqueue import StubXQueueService, StubXQueueHandler from ..xqueue import StubXQueueService, StubXQueueHandler
class StubXQueueServiceTest(unittest.TestCase): class StubXQueueServiceTest(unittest.TestCase):
...@@ -95,7 +95,7 @@ class StubXQueueServiceTest(unittest.TestCase): ...@@ -95,7 +95,7 @@ class StubXQueueServiceTest(unittest.TestCase):
# Post a submission to the XQueue stub # Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback' callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission( self._post_submission(
callback_url, 'test_queuekey', 'test_queue', callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'test_1 and test_2'}) json.dumps({'submission': 'test_1 and test_2'})
) )
...@@ -108,6 +108,23 @@ class StubXQueueServiceTest(unittest.TestCase): ...@@ -108,6 +108,23 @@ class StubXQueueServiceTest(unittest.TestCase):
self.assertFalse(post.called) self.assertFalse(post.called)
self.assertTrue(logger.error.called) self.assertTrue(logger.error.called)
@mock.patch('terrain.stubs.xqueue.post')
def test_register_submission_url(self, post):
# Configure the XQueue stub to notify another service
# when it receives a submission.
register_url = 'http://127.0.0.1:8000/register_submission'
self.server.config['register_submission_url'] = register_url
callback_url = 'http://127.0.0.1:8000/test_callback'
submission = json.dumps({'grader_payload': 'test payload'})
self._post_submission(
callback_url, 'test_queuekey', 'test_queue', submission
)
# Check that a notification was sent
post.assert_any_call(register_url, data={'grader_payload': u'test payload'})
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body): def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
""" """
Post a submission to the stub XQueue implementation. Post a submission to the stub XQueue implementation.
......
...@@ -4,7 +4,7 @@ Unit test for stub YouTube implementation. ...@@ -4,7 +4,7 @@ Unit test for stub YouTube implementation.
import unittest import unittest
import requests import requests
from terrain.stubs.youtube import StubYouTubeService from ..youtube import StubYouTubeService
class StubYouTubeServiceTest(unittest.TestCase): class StubYouTubeServiceTest(unittest.TestCase):
......
...@@ -4,11 +4,12 @@ Stub implementation of XQueue for acceptance tests. ...@@ -4,11 +4,12 @@ Stub implementation of XQueue for acceptance tests.
Configuration values: Configuration values:
"default" (dict): Default response to be sent to LMS as a grade for a submission "default" (dict): Default response to be sent to LMS as a grade for a submission
"<submission>" (dict): Grade response to return for submissions containing the text <submission> "<submission>" (dict): Grade response to return for submissions containing the text <submission>
"register_submission_url" (str): URL to send grader payloads when we receive a submission
If no grade response is configured, a default response will be returned. If no grade response is configured, a default response will be returned.
""" """
from .http import StubHttpRequestHandler, StubHttpService from .http import StubHttpRequestHandler, StubHttpService, require_params
import json import json
import copy import copy
from requests import post from requests import post
...@@ -23,6 +24,7 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -23,6 +24,7 @@ class StubXQueueHandler(StubHttpRequestHandler):
DEFAULT_RESPONSE_DELAY = 2 DEFAULT_RESPONSE_DELAY = 2
DEFAULT_GRADE_RESPONSE = {'correct': True, 'score': 1, 'msg': ''} DEFAULT_GRADE_RESPONSE = {'correct': True, 'score': 1, 'msg': ''}
@require_params('POST', 'xqueue_body', 'xqueue_header')
def do_POST(self): def do_POST(self):
""" """
Handle a POST request from the client Handle a POST request from the client
...@@ -35,6 +37,10 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -35,6 +37,10 @@ class StubXQueueHandler(StubHttpRequestHandler):
# Respond only to grading requests # Respond only to grading requests
if self._is_grade_request(): if self._is_grade_request():
# If configured, send the grader payload to other services.
self._register_submission(self.post_dict['xqueue_body'])
try: try:
xqueue_header = json.loads(self.post_dict['xqueue_header']) xqueue_header = json.loads(self.post_dict['xqueue_header'])
callback_url = xqueue_header['lms_callback_url'] callback_url = xqueue_header['lms_callback_url']
...@@ -118,7 +124,7 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -118,7 +124,7 @@ class StubXQueueHandler(StubHttpRequestHandler):
# There is a danger here that a submission will match multiple response patterns. # There is a danger here that a submission will match multiple response patterns.
# Rather than fail silently (which could cause unpredictable behavior in tests) # Rather than fail silently (which could cause unpredictable behavior in tests)
# we abort and log a debugging message. # we abort and log a debugging message.
for pattern, response in self.server.config.iteritems(): for pattern, response in self.server.queue_responses:
if pattern in xqueue_body_json: if pattern in xqueue_body_json:
if grade_response is None: if grade_response is None:
...@@ -150,7 +156,44 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -150,7 +156,44 @@ class StubXQueueHandler(StubHttpRequestHandler):
post(postback_url, data=data) post(postback_url, data=data)
self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url)) self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url))
def _register_submission(self, xqueue_body_json):
"""
If configured, send the submission's grader payload to another service.
"""
url = self.server.config.get('register_submission_url')
# If not configured, do not need to send anything
if url is not None:
try:
xqueue_body = json.loads(xqueue_body_json)
except ValueError:
self.log_error(
"Could not decode XQueue body as JSON: '{0}'".format(xqueue_body_json))
else:
# Retrieve the grader payload, which should be a JSON-encoded dict.
# We pass the payload directly to the service we are notifying, without
# inspecting the contents.
grader_payload = xqueue_body.get('grader_payload')
if grader_payload is not None:
response = post(url, data={'grader_payload': grader_payload})
if not response.ok:
self.log_error(
"Could register submission at URL '{0}'. Status was {1}".format(
url, response.status_code))
else:
self.log_message(
"XQueue body is missing 'grader_payload' key: '{0}'".format(xqueue_body)
)
def _is_grade_request(self): def _is_grade_request(self):
"""
Return a boolean indicating whether the requested URL indicates a submission.
"""
return 'xqueue/submit' in self.path return 'xqueue/submit' in self.path
...@@ -160,3 +203,19 @@ class StubXQueueService(StubHttpService): ...@@ -160,3 +203,19 @@ class StubXQueueService(StubHttpService):
""" """
HANDLER_CLASS = StubXQueueHandler HANDLER_CLASS = StubXQueueHandler
NON_QUEUE_CONFIG_KEYS = ['default', 'register_submission_url']
@property
def queue_responses(self):
"""
Returns a list of (pattern, response) tuples, where `pattern` is a pattern
to match in the XQueue body, and `response` is a dictionary to return
as the response from the grader.
Every configuration key is a queue name,
except for 'default' and 'register_submission_url' which have special meaning
"""
return {
key:val for key, val in self.config.iteritems()
if key not in self.NON_QUEUE_CONFIG_KEYS
}.items()
...@@ -75,7 +75,7 @@ class StubYouTubeHandler(StubHttpRequestHandler): ...@@ -75,7 +75,7 @@ class StubYouTubeHandler(StubHttpRequestHandler):
time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC)) time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
# Construct the response content # Construct the response content
callback = self.get_params['callback'][0] callback = self.get_params['callback']
data = OrderedDict({ data = OrderedDict({
'data': OrderedDict({ 'data': OrderedDict({
'id': youtube_id, 'id': youtube_id,
......
...@@ -5,3 +5,6 @@ STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031') ...@@ -5,3 +5,6 @@ STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031')
# Get the URL of the XQueue stub used in the test # Get the URL of the XQueue stub used in the test
XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040') XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040')
# Get the URL of the Ora stub used in the test
ORA_STUB_URL = os.environ.get('ora_url', 'http://localhost:8041')
...@@ -29,7 +29,7 @@ class StudioApiFixture(object): ...@@ -29,7 +29,7 @@ class StudioApiFixture(object):
Log in as a staff user, then return a `requests` `session` object for the logged in user. Log in as a staff user, then return a `requests` `session` object for the logged in user.
Raises a `StudioApiLoginError` if the login fails. Raises a `StudioApiLoginError` if the login fails.
""" """
# Use auto-auth to retrieve session for a logged in user # Use auto-auth to retrieve the session for a logged in user
session = requests.Session() session = requests.Session()
response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true") response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true")
......
...@@ -7,22 +7,29 @@ import json ...@@ -7,22 +7,29 @@ import json
from . import XQUEUE_STUB_URL from . import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object): class XQueueResponseFixture(object):
""" """
Configure the XQueue stub's response to submissions. Configure the XQueue stub's response to submissions.
""" """
def __init__(self, queue_name, response_dict): def __init__(self, pattern, response_dict):
""" """
Configure XQueue stub to POST `response_dict` (a dictionary) Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission to a queue back to the LMS when it receives a submission that contains the string
named `queue_name`. `pattern`.
Remember that there is one XQueue stub shared by all the tests; Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel. to avoid conflict between tests running in parallel.
""" """
self._queue_name = queue_name self._pattern = pattern
self._response_dict = response_dict self._response_dict = response_dict
def install(self): def install(self):
...@@ -32,10 +39,10 @@ class XQueueResponseFixture(object): ...@@ -32,10 +39,10 @@ class XQueueResponseFixture(object):
url = XQUEUE_STUB_URL + "/set_config" url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue # Configure the stub to respond to submissions to our queue
payload = {self._queue_name: json.dumps(self._response_dict)} payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload) response = requests.put(url, data=payload)
if not response.ok: if not response.ok:
raise WebFixtureError( raise XQueueResponseFixtureError(
"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format( "Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._queue_name, self._response_dict)) self._pattern, self._response_dict))
...@@ -3,7 +3,8 @@ Open-ended response in the courseware. ...@@ -3,7 +3,8 @@ Open-ended response in the courseware.
""" """
from bok_choy.page_object import PageObject from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, fulfill_after, fulfill_before from bok_choy.promise import EmptyPromise, fulfill_after, fulfill
from .rubric import RubricPage
class OpenResponsePage(PageObject): class OpenResponsePage(PageObject):
...@@ -57,58 +58,14 @@ class OpenResponsePage(PageObject): ...@@ -57,58 +58,14 @@ class OpenResponsePage(PageObject):
return prompts[0] return prompts[0]
@property @property
def has_rubric(self): def rubric(self):
""" """
Return a boolean indicating whether the rubric is available. Return a `RubricPage` for a self-assessment problem.
If no rubric is available, raises a `BrokenPromise` exception.
""" """
return self.is_css_present('div.rubric') rubric = RubricPage(self.browser)
rubric.wait_for_page()
@property return rubric
def rubric_categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.css_text('span.rubric-category')
@property
def rubric_feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.css_map(feedback_css, lambda el: el['class'])
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
@property @property
def written_feedback(self): def written_feedback(self):
...@@ -175,68 +132,26 @@ class OpenResponsePage(PageObject): ...@@ -175,68 +132,26 @@ class OpenResponsePage(PageObject):
""" """
Submit a response for grading. Submit a response for grading.
""" """
with fulfill_after(self._submitted_promise(self.assessment_type)): with self.handle_alert():
with self.handle_alert(): self.css_click('input.submit-button')
self.css_click('input.submit-button')
def submit_self_assessment(self, scores):
"""
Submit a self-assessment rubric.
`scores` is a list of scores (0 to max score) for each category in the rubric.
"""
# Warn if we have the wrong number of scores
num_categories = len(self.rubric_categories)
if len(scores) != num_categories:
msg = "Recieved {0} scores but there are {1} rubric categories".format(
len(scores), num_categories
)
self.warning(msg)
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > self.css_count(category_css + ' input.score-selection'):
msg = "Tried to select score {0} but there are only {1} options".format(score_index, len(scores))
self.warning(msg)
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
self.css_check(input_css)
# Wait for the button to become enabled
button_css = 'input.submit-button'
button_enabled = EmptyPromise(
lambda: all(self.css_map(button_css, lambda el: not el['disabled'])),
"Submit button enabled"
)
# Submit the assessment # Ensure that the submission completes
with fulfill_before(button_enabled): self._wait_for_submitted(self.assessment_type)
self.css_click(button_css)
def _submitted_promise(self, assessment_type): def _wait_for_submitted(self, assessment_type):
""" """
Return a `Promise` that the next step is visible after submitting. Wait for the submission to complete.
This will vary based on the type of assessment.
`assessment_type` is either 'self', 'ai', or 'peer' `assessment_type` is either 'self', 'ai', or 'peer'
""" """
if assessment_type == 'self': if assessment_type == 'self':
return EmptyPromise(lambda: self.has_rubric, "Rubric has appeared") RubricPage(self.browser).wait_for_page()
elif assessment_type == 'ai' or assessment_type == "peer": elif assessment_type == 'ai' or assessment_type == "peer":
return EmptyPromise( fulfill(EmptyPromise(
lambda: self.grader_status != 'Unanswered', lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'" "Problem status is no longer 'unanswered'"
) ))
else: else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type)) self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
return EmptyPromise(lambda: True, "Unrecognized assessment type") fulfill(EmptyPromise(lambda: True, "Unrecognized assessment type"))
"""
Page that allows the student to grade calibration essays
(requirement for being allowed to grade peers).
"""
from bok_choy.page_object import PageObject
from .rubric import RubricPage
class PeerCalibratePage(PageObject):
"""
Grade calibration essays.
"""
url = None
def is_browser_on_page(self):
return (
self.is_css_present('div.peer-grading-tools') or
self.is_css_present('div.calibration-panel.current-state')
)
def continue_to_grading(self):
"""
Continue to peer grading after completing calibration.
"""
self.css_click('input.calibration-feedback-button')
@property
def rubric(self):
"""
Return a `RubricPage` for the calibration essay.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
@property
def message(self):
"""
Return a message shown to the user, or None if no message is available.
"""
messages = self.css_text('div.peer-grading-tools > div.message-container > p')
if len(messages) < 1:
return None
else:
return messages[0]
"""
Confirmation screen for peer calibration and grading.
"""
from bok_choy.page_object import PageObject
class PeerConfirmPage(PageObject):
"""
Confirmation for peer calibration and grading.
"""
url = None
def is_browser_on_page(self):
return self.is_css_present('section.calibration-interstitial-page')
def start(self, is_calibrating=False):
"""
Continue to the next section after the confirmation page.
If `is_calibrating` is false, try to continue to peer grading.
Otherwise, try to continue to calibration grading.
"""
self.css_click(
'input.calibration-interstitial-page-button'
if is_calibrating else 'input.interstitial-page-button'
)
"""
Students grade peer submissions.
"""
from bok_choy.page_object import PageObject
from .rubric import RubricPage
class PeerGradePage(PageObject):
"""
Students grade peer submissions.
"""
url = None
def is_browser_on_page(self):
return (
self.is_css_present('div.peer-grading-tools') or
self.is_css_present('div.grading-panel.current-state')
)
@property
def problem_list(self):
"""
Return the list of available problems to peer grade.
"""
return self.css_text('a.problem-button')
def select_problem(self, problem_name):
"""
Choose the problem with `problem_name` to start grading or calibrating.
"""
index = self.problem_list.index(problem_name) + 1
self.css_click('a.problem-button:nth-of-type({})'.format(index))
@property
def rubric(self):
"""
Return a `RubricPage` to allow students to grade their peers.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, fulfill_after, fulfill_before
class ScoreMismatchError(Exception):
"""
The provided scores do not match the rubric on the page.
"""
pass
class RubricPage(PageObject):
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
url = None
def is_browser_on_page(self):
"""
Return a boolean indicating whether the rubric is available.
"""
return self.is_css_present('div.rubric')
@property
def categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.css_text('span.rubric-category')
def set_scores(self, scores):
"""
Set the rubric scores. `scores` is a list of integers
indicating the number of points in each category.
For example, `scores` might be [0, 2, 1] if the student scored
0 points in the first category, 2 points in the second category,
and 1 point in the third category.
If the number of scores does not match the number of categories,
a `ScoreMismatchError` is raised.
"""
# Warn if we have the wrong number of scores
num_categories = self.categories
if len(scores) != len(num_categories):
raise ScoreMismatchError(
"Recieved {0} scores but there are {1} rubric categories".format(
len(scores), num_categories))
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > self.css_count(category_css + ' input.score-selection'):
raise ScoreMismatchError(
"Tried to select score {0} but there are only {1} options".format(
score_index, len(scores)))
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
self.css_check(input_css)
@property
def feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.css_map(feedback_css, lambda el: el['class'])
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
def submit(self):
"""
Submit the rubric.
"""
# Wait for the button to become enabled
button_css = 'input.submit-button'
button_enabled = EmptyPromise(
lambda: all(self.css_map(button_css, lambda el: not el['disabled'])),
"Submit button enabled"
)
# Submit the assessment
with fulfill_before(button_enabled):
self.css_click(button_css)
...@@ -9,6 +9,9 @@ from ..pages.lms.course_info import CourseInfoPage ...@@ -9,6 +9,9 @@ from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.open_response import OpenResponsePage from ..pages.lms.open_response import OpenResponsePage
from ..pages.lms.peer_grade import PeerGradePage
from ..pages.lms.peer_calibrate import PeerCalibratePage
from ..pages.lms.peer_confirm import PeerConfirmPage
from ..pages.lms.progress import ProgressPage from ..pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture from ..fixtures.xqueue import XQueueResponseFixture
...@@ -40,6 +43,9 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -40,6 +43,9 @@ class OpenResponseTest(UniqueCourseTest):
self.tab_nav = TabNavPage(self.browser) self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser) self.course_nav = CourseNavPage(self.browser)
self.open_response = OpenResponsePage(self.browser) self.open_response = OpenResponsePage(self.browser)
self.peer_grade = PeerGradePage(self.browser)
self.peer_calibrate = PeerCalibratePage(self.browser)
self.peer_confirm = PeerConfirmPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id) self.progress_page = ProgressPage(self.browser, self.course_id)
# Configure the test course # Configure the test course
...@@ -48,6 +54,13 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -48,6 +54,13 @@ class OpenResponseTest(UniqueCourseTest):
self.course_info['run'], self.course_info['display_name'] self.course_info['run'], self.course_info['display_name']
) )
# Create a unique name for the peer assessed problem. This will show up
# in the list of peer problems, which is shared among tests running
# in parallel; it needs to be unique so we can find it.
# It's also import that the problem has "Peer" in the name; otherwise,
# the ORA stub will ignore it.
self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6])
course_fix.add_children( course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
...@@ -58,9 +71,10 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -58,9 +71,10 @@ class OpenResponseTest(UniqueCourseTest):
XBlockFixtureDesc('combinedopenended', 'AI-Assessed', XBlockFixtureDesc('combinedopenended', 'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}), data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}),
XBlockFixtureDesc('combinedopenended', 'Peer-Assessed', XBlockFixtureDesc('combinedopenended', self.peer_problem_name,
data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}), data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}),
# This is the interface a student can use to grade his/her peers
XBlockFixtureDesc('peergrading', 'Peer Module'), XBlockFixtureDesc('peergrading', 'Peer Module'),
))).install() ))).install()
...@@ -128,14 +142,14 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -128,14 +142,14 @@ class OpenResponseTest(UniqueCourseTest):
if assessment_type == 'ai': if assessment_type == 'ai':
section_name = 'AI-Assessed' section_name = 'AI-Assessed'
elif assessment_type == 'peer': elif assessment_type == 'peer':
section_name = 'Peer-Assessed' section_name = self.peer_problem_name
else: else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"') raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check(): def _inner_check():
self.course_nav.go_to_sequential('Self-Assessed') self.course_nav.go_to_sequential('Self-Assessed')
self.course_nav.go_to_sequential(section_name) self.course_nav.go_to_sequential(section_name)
feedback = self.open_response.rubric_feedback feedback = self.open_response.rubric.feedback
# Successful if `feedback` is a non-empty list # Successful if `feedback` is a non-empty list
return (bool(feedback), feedback) return (bool(feedback), feedback)
...@@ -155,22 +169,17 @@ class SelfAssessmentTest(OpenResponseTest): ...@@ -155,22 +169,17 @@ class SelfAssessmentTest(OpenResponseTest):
Then I see a scored rubric Then I see a scored rubric
And I see my score in the progress page. And I see my score in the progress page.
""" """
# Navigate to the self-assessment problem and submit an essay # Navigate to the self-assessment problem and submit an essay
self.course_nav.go_to_sequential('Self-Assessed') self.course_nav.go_to_sequential('Self-Assessed')
self.submit_essay('self', 'Censorship in the Libraries') self.submit_essay('self', 'Censorship in the Libraries')
# Check the rubric categories # Fill in the rubric and expect that we get feedback
self.assertEqual( rubric = self.open_response.rubric
self.open_response.rubric_categories, ["Writing Applications", "Language Conventions"] self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
) rubric.set_scores([0, 1])
rubric.submit()
# Fill in the self-assessment rubric self.assertEqual(rubric.feedback, ['incorrect', 'correct'])
self.open_response.submit_self_assessment([0, 1])
# Expect that we get feedback
self.assertEqual(
self.open_response.rubric_feedback, ['incorrect', 'correct']
)
# Verify the progress page # Verify the progress page
self.progress_page.visit() self.progress_page.visit()
...@@ -223,10 +232,10 @@ class AIAssessmentTest(OpenResponseTest): ...@@ -223,10 +232,10 @@ class AIAssessmentTest(OpenResponseTest):
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)]) self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class InstructorAssessmentTest(AIAssessmentTest): class InstructorAssessmentTest(OpenResponseTest):
""" """
Test an AI-assessment that has been graded by an instructor. Test an AI-assessment that has been graded by an instructor.
This runs the exact same test as the AI-assessment test, except This runs the same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader. that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same. From the student's perspective, it should look the same.
""" """
...@@ -242,11 +251,36 @@ class InstructorAssessmentTest(AIAssessmentTest): ...@@ -242,11 +251,36 @@ class InstructorAssessmentTest(AIAssessmentTest):
'rubric_xml': load_data_str('ora_rubric.xml') 'rubric_xml': load_data_str('ora_rubric.xml')
} }
def test_instructor_assessment(self):
"""
Given an instructor has graded my submission
When I view my submission
Then I see a scored rubric
And my progress page shows the problem score.
"""
# Navigate to the AI-assessment problem and submit an essay
# We have configured the stub to simulate that this essay will be staff-graded
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
class PeerFeedbackTest(OpenResponseTest): # Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class PeerAssessmentTest(OpenResponseTest):
""" """
Test ORA peer-assessment. Note that this tests only *receiving* feedback, Test ORA peer-assessment, including calibration and giving/receiving scores.
not *giving* feedback -- those tests are located in another module.
""" """
# Unlike other assessment types, peer assessment has multiple scores # Unlike other assessment types, peer assessment has multiple scores
...@@ -261,20 +295,58 @@ class PeerFeedbackTest(OpenResponseTest): ...@@ -261,20 +295,58 @@ class PeerFeedbackTest(OpenResponseTest):
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3 'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
} }
def test_peer_assessment(self): def test_peer_calibrate_and_grade(self):
""" """
Given I am viewing a peer-assessment problem
And the instructor has submitted enough example essays
When I submit submit acceptable scores for enough calibration essays
Then I am able to peer-grade other students' essays.
Given I have submitted an essay for peer-assessment Given I have submitted an essay for peer-assessment
And I have peer-graded enough students essays
And enough other students have scored my essay And enough other students have scored my essay
Then I can view the scores and written feedback Then I can view the scores and written feedback
And I see my score in the progress page. And I see my score in the progress page.
""" """
# Navigate to the peer-assessment problem and submit an essay # Initially, the student should NOT be able to grade peers,
self.course_nav.go_to_sequential('Peer-Assessed') # because he/she hasn't submitted any essays.
self.course_nav.go_to_sequential('Peer Module')
self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message)
# Submit an essay
self.course_nav.go_to_sequential(self.peer_problem_name)
self.submit_essay('peer', 'Censorship in the Libraries') self.submit_essay('peer', 'Censorship in the Libraries')
# Refresh the page to get feedback from the stub XQueue grader. # Need to reload the page to update the peer grading module
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Select the problem to calibrate
self.course_nav.go_to_sequential('Peer Module')
self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)
self.peer_grade.select_problem(self.peer_problem_name)
# Calibrate
self.peer_confirm.start(is_calibrating=True)
rubric = self.peer_calibrate.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
self.peer_calibrate.continue_to_grading()
# Grade a peer
self.peer_confirm.start()
rubric = self.peer_grade.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
# Expect to receive essay feedback
# We receive feedback from all three peers, each of which # We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item) # provide 2 scores (one for each rubric item)
# Written feedback is a dummy value sent by the XQueue stub.
self.course_nav.go_to_sequential(self.peer_problem_name)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3) self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page # Verify the progress page
......
...@@ -33,7 +33,14 @@ BOK_CHOY_STUBS = { ...@@ -33,7 +33,14 @@ BOK_CHOY_STUBS = {
:xqueue => { :xqueue => {
:port => 8040, :port => 8040,
:log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_xqueue.log") :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_xqueue.log"),
:config => 'register_submission_url=http://0.0.0.0:8041/test/register_submission'
},
:ora => {
:port => 8041,
:log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_ora.log"),
:config => ''
} }
} }
...@@ -56,14 +63,14 @@ def start_servers() ...@@ -56,14 +63,14 @@ def start_servers()
BOK_CHOY_STUBS.each do | service, info | BOK_CHOY_STUBS.each do | service, info |
Dir.chdir(BOK_CHOY_STUB_DIR) do Dir.chdir(BOK_CHOY_STUB_DIR) do
singleton_process( singleton_process(
"python -m stubs.start #{service} #{info[:port]}", "python -m stubs.start #{service} #{info[:port]} #{info[:config]}",
logfile=info[:log] logfile=info[:log]
) )
end end
end end
end end
# Wait until we get a successful response from the servers or time out # Wait until we get a successful response from the servers or time out
def wait_for_test_servers() def wait_for_test_servers()
BOK_CHOY_SERVERS.merge(BOK_CHOY_STUBS).each do | service, info | BOK_CHOY_SERVERS.merge(BOK_CHOY_STUBS).each do | service, info |
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment