Commit 4c2b7695 by Will Daly

Merge pull request #2431 from edx/will/ora-acceptance-tests-rebase

Peer-grading acceptance tests
parents ea6543cc 0a9b4ada
<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>
<rubric><category><description>Writing Applications</description><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>
...@@ -6,12 +6,60 @@ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler ...@@ -6,12 +6,60 @@ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import urlparse import urlparse
import threading import threading
import json import json
from functools import wraps
from lazy import lazy from lazy import lazy
from logging import getLogger from logging import getLogger
LOGGER = getLogger(__name__) LOGGER = getLogger(__name__)
def require_params(method, *required_keys):
"""
Decorator to ensure that the method has all the required parameters.
Example:
@require_params('GET', 'id', 'state')
def handle_request(self):
# ....
would send a 400 response if no GET parameters were specified
for 'id' or 'state' (or if those parameters had empty values).
The wrapped function should be a method of a `StubHttpRequestHandler`
subclass.
Currently, "GET" and "POST" are the only supported methods.
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
# Read either GET querystring params or POST dict params
if method == "GET":
params = self.get_params
elif method == "POST":
params = self.post_dict
else:
raise ValueError("Unsupported method '{method}'".format(method=method))
# Check for required values
missing = []
for key in required_keys:
if params.get(key) is None:
missing.append(key)
if len(missing) > 0:
msg = "Missing required key(s) {keys}".format(keys=",".join(missing))
self.send_response(400, content=msg, headers={'Content-type': 'text/plain'})
# If nothing is missing, execute the function as usual
else:
return func(self, *args, **kwargs)
return wrapper
return decorator
class StubHttpRequestHandler(BaseHTTPRequestHandler, object): class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
""" """
Handler for the stub HTTP service. Handler for the stub HTTP service.
...@@ -70,7 +118,26 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): ...@@ -70,7 +118,26 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
""" """
Return the GET parameters (querystring in the URL). Return the GET parameters (querystring in the URL).
""" """
return urlparse.parse_qs(self.path) query = urlparse.urlparse(self.path).query
# By default, `parse_qs` returns a list of values for each param
# For convenience, we replace lists of 1 element with just the element
return {
k:v[0] if len(v) == 1 else v
for k,v in urlparse.parse_qs(query).items()
}
@lazy
def path_only(self):
"""
Return the URL path without GET parameters.
Removes the trailing slash if there is one.
"""
path = urlparse.urlparse(self.path).path
if path.endswith('/'):
return path[:-1]
else:
return path
def do_PUT(self): def do_PUT(self):
""" """
......
"""
Stub implementation of ORA service.
This is an extremely simple version of the service, with most
business logic removed. In particular, the stub:
1) Provides an infinite number of peer and calibration essays,
with dummy data.
2) Simulates a set number of pending submissions for each student;
grades submitted by one student are not used for any other student.
3) Ignores the scores/feedback students submit.
4) Ignores problem location: an essay graded for *any* problem is graded
for *every* problem.
Basically, the stub tracks only the *number* of peer/calibration essays
submitted by each student.
"""
import json
import pkg_resources
from .http import StubHttpRequestHandler, StubHttpService, require_params
class StudentState(object):
"""
Store state about the student that the stub
ORA implementation needs to keep track of.
"""
INITIAL_ESSAYS_AVAILABLE = 3
NUM_ESSAYS_REQUIRED = 1
NUM_CALIBRATION_REQUIRED = 1
def __init__(self):
self.num_graded = 0
self.num_calibrated = 0
def grade_peer_essay(self):
self.num_graded += 1
def grade_calibration_essay(self):
self.num_calibrated += 1
@property
def num_pending(self):
return max(self.INITIAL_ESSAYS_AVAILABLE- self.num_graded, 0)
@property
def num_required(self):
return max(self.NUM_ESSAYS_REQUIRED - self.num_graded, 0)
@property
def is_calibrated(self):
return self.num_calibrated >= self.NUM_CALIBRATION_REQUIRED
class StubOraHandler(StubHttpRequestHandler):
"""
Handler for ORA requests.
"""
GET_URL_HANDLERS = {
'/peer_grading/get_next_submission': '_get_next_submission',
'/peer_grading/is_student_calibrated': '_is_student_calibrated',
'/peer_grading/show_calibration_essay': '_show_calibration_essay',
'/peer_grading/get_notifications': '_get_notifications',
'/peer_grading/get_data_for_location': '_get_data_for_location',
'/peer_grading/get_problem_list': '_get_problem_list',
}
POST_URL_HANDLERS = {
'/peer_grading/save_grade': '_save_grade',
'/peer_grading/save_calibration_essay': '_save_calibration_essay',
# Test-specific, used by the XQueue stub to register a new submission,
# which we use to discover valid problem locations in the LMS
'/test/register_submission': '_register_submission'
}
def do_GET(self):
"""
Handle GET methods to the ORA API stub.
"""
self._send_handler_response('GET')
def do_POST(self):
"""
Handle POST methods to the ORA API stub.
"""
self._send_handler_response('POST')
def _send_handler_response(self, method):
"""
Delegate response to handler methods.
If no handler defined, send a 404 response.
"""
# Choose the list of handlers based on the HTTP method
if method == 'GET':
handler_list = self.GET_URL_HANDLERS
elif method == 'POST':
handler_list = self.POST_URL_HANDLERS
else:
self.log_error('Unrecognized method "{method}"'.format(method=method))
return
# Check the path (without querystring params) against our list of handlers
handler_name = handler_list.get(self.path_only)
if handler_name is not None:
handler = getattr(self, handler_name, None)
else:
handler = None
# Delegate to the handler to send a response
if handler is not None:
handler()
# If we don't have a handler for this URL and/or HTTP method,
# respond with a 404. This is the same behavior as the ORA API.
else:
self.send_response(404)
@require_params('GET', 'student_id', 'problem_id')
def _is_student_calibrated(self):
"""
Query whether the student has completed enough calibration
essays to begin peer grading.
Method: GET
Params:
- student_id
- problem_id
Result (JSON):
- success (bool)
- total_calibrated_on_so_far (int)
- calibrated (bool)
"""
student = self._student('GET')
if student is None:
self._error_response()
else:
self._success_response({
'total_calibrated_on_so_far': student.num_calibrated,
'calibrated': student.is_calibrated
})
@require_params('GET', 'student_id', 'problem_id')
def _show_calibration_essay(self):
"""
Retrieve a calibration essay for the student to grade.
Method: GET
Params:
- student_id
- problem_id
Result (JSON):
- success (bool)
- submission_id (str)
- submission_key (str)
- student_response (str)
- prompt (str)
- rubric (str)
- max_score (int)
"""
self._success_response({
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
@require_params('GET', 'student_id', 'course_id')
def _get_notifications(self):
"""
Query counts of submitted, required, graded, and available peer essays
for a particular student.
Method: GET
Params:
- student_id
- course_id
Result (JSON):
- success (bool)
- student_sub_count (int)
- count_required (int)
- count_graded (int)
- count_available (int)
"""
student = self._student('GET')
if student is None:
self._error_response()
else:
self._success_response({
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_required': student.num_required,
'count_graded': student.num_graded,
'count_available': student.num_pending
})
@require_params('GET', 'student_id', 'location')
def _get_data_for_location(self):
"""
Query counts of submitted, required, graded, and available peer essays
for a problem location.
This will send an error response if the problem has not
been registered at the given `location`. This allows us
to ignore problems that are self- or ai-graded.
Method: GET
Params:
- student_id
- location
Result (JSON):
- success (bool)
- student_sub_count (int)
- count_required (int)
- count_graded (int)
- count_available (int)
"""
student = self._student('GET')
location = self.get_params.get('location')
# Do not return data if we're missing the student param
# or the problem has not yet been registered.
if student is None or location not in self.server.problems:
self._error_response()
else:
self._success_response({
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_required': student.num_required,
'count_graded': student.num_graded,
'count_available': student.num_pending
})
@require_params('GET', 'grader_id', 'location')
def _get_next_submission(self):
"""
Retrieve the next submission for the student to peer-grade.
Method: GET
Params:
- grader_id
- location
Result (JSON):
- success (bool)
- submission_id (str)
- submission_key (str)
- student_response (str)
- prompt (str, HTML)
- rubric (str, XML)
- max_score (int)
"""
self._success_response({
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
@require_params('GET', 'course_id')
def _get_problem_list(self):
"""
Retrieve the list of problems available for peer grading.
Method: GET
Params:
- course_id
Result (JSON):
- success (bool)
- problem_list (list)
where `problem_list` is a list of dictionaries with keys:
- location (str)
- problem_name (str)
- num_graded (int)
- num_pending (int)
- num_required (int)
"""
self._success_response({'problem_list': self.server.problem_list})
@require_params('POST', 'grader_id', 'location', 'submission_id', 'score', 'feedback', 'submission_key')
def _save_grade(self):
"""
Save a score and feedback for an essay the student has graded.
Method: POST
Params:
- grader_id
- location
- submission_id
- score
- feedback
- submission_key
Result (JSON):
- success (bool)
"""
student = self._student('POST', key='grader_id')
if student is None:
self._error_response()
else:
# Update the number of essays the student has graded
student.grade_peer_essay()
return self._success_response({})
@require_params('POST', 'student_id', 'location', 'calibration_essay_id', 'score', 'feedback', 'submission_key')
def _save_calibration_essay(self):
"""
Save a score and feedback for a calibration essay the student has graded.
Returns the scores/feedback that the instructor gave for the essay.
Method: POST
Params:
- student_id
- location
- calibration_essay_id
- score
- feedback
- submission_key
Result (JSON):
- success (bool)
- message (str)
- actual_score (int)
- actual_rubric (str, XML)
- actual_feedback (str)
"""
student = self._student('POST')
if student is None:
self._error_response()
else:
# Increment the student calibration count
student.grade_calibration_essay()
self._success_response({
'message': self.server.DUMMY_DATA['message'],
'actual_score': self.server.DUMMY_DATA['actual_score'],
'actual_rubric': self.server.DUMMY_DATA['actual_rubric'],
'actual_feedback': self.server.DUMMY_DATA['actual_feedback']
})
@require_params('POST', 'grader_payload')
def _register_submission(self):
"""
Test-specific method to register a new submission.
This is used by `get_problem_list` to return valid locations in the LMS courseware.
In tests, this end-point gets called by the XQueue stub when it receives new submissions,
much like ORA discovers locations when students submit peer-graded problems to the XQueue.
Since the LMS sends *all* open-ended problems to the XQueue (including self- and ai-graded),
we have to ignore everything except peer-graded problems. We do so by looking
for the text 'peer' in the problem's name. This is a little bit of a hack,
but it makes the implementation much simpler.
Method: POST
Params:
- grader_payload (JSON dict)
Result: Empty
The only keys we use in `grader_payload` are 'location' and 'problem_id'.
"""
# Since this is a required param, we know it is in the post dict
try:
payload = json.loads(self.post_dict['grader_payload'])
except ValueError:
self.log_message(
"Could not decode grader payload as JSON: '{0}'".format(
self.post_dict['grader_payload']))
self.send_response(400)
else:
location = payload.get('location')
name = payload.get('problem_id')
if location is not None and name is not None:
if "peer" in name.lower():
self.server.register_problem(location, name)
self.send_response(200)
else:
self.log_message(
"Problem '{0}' does not have 'peer' in its name. Ignoring...".format(name)
)
self.send_response(200)
else:
self.log_message(
"Grader payload should contain 'location' and 'problem_id' keys: {0}".format(payload)
)
self.send_response(400)
def _student(self, method, key='student_id'):
"""
Return the `StudentState` instance for the student ID given
in the request parameters.
`method` is the HTTP request method (either "GET" or "POST")
and `key` is the parameter key.
"""
if method == 'GET':
student_id = self.get_params.get(key)
elif method == 'POST':
student_id = self.post_dict.get(key)
else:
self.log_error("Unrecognized method '{method}'".format(method=method))
return None
if student_id is None:
self.log_error("Could not get student ID from parameters")
return None
return self.server.student_state(student_id)
def _success_response(self, response_dict):
"""
Send a success response.
`response_dict` is a Python dictionary to JSON-encode.
"""
response_dict['success'] = True
response_dict['version'] = 1
self.send_response(
200, content=json.dumps(response_dict),
headers={'Content-type': 'application/json'}
)
def _error_response(self):
"""
Send an error response.
"""
response_dict = {'success': False, 'version': 1}
self.send_response(
400, content=json.dumps(response_dict),
headers={'Content-type': 'application/json'}
)
class StubOraService(StubHttpService):
"""
Stub ORA service.
"""
HANDLER_CLASS = StubOraHandler
DUMMY_DATA = {
'submission_id': 1,
'submission_key': 'test key',
'student_response': 'Test response',
'prompt': 'Test prompt',
'rubric': pkg_resources.resource_string(__name__, "data/ora_rubric.xml"),
'max_score': 2,
'message': 'Successfully saved calibration record.',
'actual_score': 2,
'actual_rubric': pkg_resources.resource_string(__name__, "data/ora_graded_rubric.xml"),
'actual_feedback': 'Great job!',
'student_sub_count': 1,
'problem_name': 'test problem',
'problem_list_num_graded': 1,
'problem_list_num_pending': 1,
'problem_list_num_required': 0,
}
def __init__(self, *args, **kwargs):
"""
Initialize student submission state.
"""
super(StubOraService, self).__init__(*args, **kwargs)
# Create a dict to map student ID's to their state
self._students = dict()
# By default, no problems are available for peer grading
# You can add to this list using the `register_location` HTTP end-point
# This is a dict mapping problem locations to problem names
self.problems = dict()
def student_state(self, student_id):
"""
Return the `StudentState` (named tuple) for the student
with ID `student_id`. The student state can be modified by the caller.
"""
# Create the student state if it does not already exist
if student_id not in self._students:
student = StudentState()
self._students[student_id] = student
# Retrieve the student state
return self._students[student_id]
@property
def problem_list(self):
"""
Return a list of problems available for peer grading.
"""
return [{
'location': location, 'problem_name': name,
'num_graded': self.DUMMY_DATA['problem_list_num_graded'],
'num_pending': self.DUMMY_DATA['problem_list_num_pending'],
'num_required': self.DUMMY_DATA['problem_list_num_required']
} for location, name in self.problems.items()
]
def register_problem(self, location, name):
"""
Register a new problem with `location` and `name` for peer grading.
"""
self.problems[location] = name
...@@ -6,13 +6,15 @@ import time ...@@ -6,13 +6,15 @@ import time
import logging import logging
from .xqueue import StubXQueueService from .xqueue import StubXQueueService
from .youtube import StubYouTubeService from .youtube import StubYouTubeService
from .ora import StubOraService
USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM" USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_VAL, ...]"
SERVICES = { SERVICES = {
'xqueue': StubXQueueService, 'xqueue': StubXQueueService,
'youtube': StubYouTubeService 'youtube': StubYouTubeService,
'ora': StubOraService
} }
# Log to stdout, including debug messages # Log to stdout, including debug messages
...@@ -21,7 +23,7 @@ logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s") ...@@ -21,7 +23,7 @@ logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s")
def get_args(): def get_args():
""" """
Parse arguments, returning tuple of `(service_name, port_num)`. Parse arguments, returning tuple of `(service_name, port_num, config_dict)`.
Exits with a message if arguments are invalid. Exits with a message if arguments are invalid.
""" """
if len(sys.argv) < 3: if len(sys.argv) < 3:
...@@ -30,6 +32,7 @@ def get_args(): ...@@ -30,6 +32,7 @@ def get_args():
service_name = sys.argv[1] service_name = sys.argv[1]
port_num = sys.argv[2] port_num = sys.argv[2]
config_dict = _parse_config_args(sys.argv[3:])
if service_name not in SERVICES: if service_name not in SERVICES:
print "Unrecognized service '{0}'. Valid choices are: {1}".format( print "Unrecognized service '{0}'. Valid choices are: {1}".format(
...@@ -45,17 +48,40 @@ def get_args(): ...@@ -45,17 +48,40 @@ def get_args():
print "Port '{0}' must be a positive integer".format(port_num) print "Port '{0}' must be a positive integer".format(port_num)
sys.exit(1) sys.exit(1)
return service_name, port_num return service_name, port_num, config_dict
def _parse_config_args(args):
"""
Parse stub configuration arguments, which are strings of the form "KEY=VAL".
`args` is a list of arguments from the command line.
Any argument that does not match the "KEY=VAL" format will be logged and skipped.
Returns a dictionary with the configuration keys and values.
"""
config_dict = dict()
for config_str in args:
try:
components = config_str.split('=')
if len(components) >= 2:
config_dict[components[0]] = "=".join(components[1:])
except:
print "Warning: could not interpret config value '{0}'".format(config_str)
pass
return config_dict
def main(): def main():
""" """
Start a server; shut down on keyboard interrupt signal. Start a server; shut down on keyboard interrupt signal.
""" """
service_name, port_num = get_args() service_name, port_num, config_dict = get_args()
print "Starting stub service '{0}' on port {1}...".format(service_name, port_num) print "Starting stub service '{0}' on port {1}...".format(service_name, port_num)
server = SERVICES[service_name](port_num=port_num) server = SERVICES[service_name](port_num=port_num)
server.config.update(config_dict)
try: try:
while True: while True:
......
...@@ -5,7 +5,7 @@ Unit tests for stub HTTP server base class. ...@@ -5,7 +5,7 @@ Unit tests for stub HTTP server base class.
import unittest import unittest
import requests import requests
import json import json
from terrain.stubs.http import StubHttpService from terrain.stubs.http import StubHttpService, StubHttpRequestHandler, require_params
class StubHttpServiceTest(unittest.TestCase): class StubHttpServiceTest(unittest.TestCase):
...@@ -62,3 +62,56 @@ class StubHttpServiceTest(unittest.TestCase): ...@@ -62,3 +62,56 @@ class StubHttpServiceTest(unittest.TestCase):
data="{}" data="{}"
) )
self.assertEqual(response.status_code, 404) self.assertEqual(response.status_code, 404)
class RequireRequestHandler(StubHttpRequestHandler):
@require_params('GET', 'test_param')
def do_GET(self):
self.send_response(200)
@require_params('POST', 'test_param')
def do_POST(self):
self.send_response(200)
class RequireHttpService(StubHttpService):
HANDLER_CLASS = RequireRequestHandler
class RequireParamTest(unittest.TestCase):
"""
Test the decorator for requiring parameters.
"""
def setUp(self):
self.server = RequireHttpService()
self.addCleanup(self.server.shutdown)
self.url = "http://127.0.0.1:{port}".format(port=self.server.port)
def test_require_get_param(self):
# Expect success when we provide the required param
response = requests.get(self.url, params={"test_param": 2})
self.assertEqual(response.status_code, 200)
# Expect failure when we do not proivde the param
response = requests.get(self.url)
self.assertEqual(response.status_code, 400)
# Expect failure when we provide an empty param
response = requests.get(self.url + "?test_param=")
self.assertEqual(response.status_code, 400)
def test_require_post_param(self):
# Expect success when we provide the required param
response = requests.post(self.url, data={"test_param": 2})
self.assertEqual(response.status_code, 200)
# Expect failure when we do not proivde the param
response = requests.post(self.url)
self.assertEqual(response.status_code, 400)
# Expect failure when we provide an empty param
response = requests.post(self.url, data={"test_param": None})
self.assertEqual(response.status_code, 400)
"""
Unit tests for stub ORA implementation.
"""
import unittest
import requests
import json
from ..ora import StubOraService, StubOraHandler, StudentState
class StubOraServiceTest(unittest.TestCase):
def setUp(self):
"""
Start the stub server.
"""
self.server = StubOraService()
self.addCleanup(self.server.shutdown)
def test_calibration(self):
# Ensure that we use the same student ID throughout
student_id = '1234'
# Initially, student should not be calibrated
response = requests.get(
self._peer_url('is_student_calibrated'),
params={'student_id': student_id, 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'total_calibrated_on_so_far': 0,
'calibrated': False
})
# Retrieve a calibration essay
response = requests.get(
self._peer_url('show_calibration_essay'),
params={'student_id': student_id, 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
# Grade the calibration essay
response = requests.post(
self._peer_url('save_calibration_essay'),
data={
'student_id': student_id,
'location': 'test location',
'calibration_essay_id': 1,
'score': 2,
'submission_key': 'key',
'feedback': 'Good job!'
}
)
self._assert_response(response, {
'version': 1, 'success': True,
'message': self.server.DUMMY_DATA['message'],
'actual_score': self.server.DUMMY_DATA['actual_score'],
'actual_rubric': self.server.DUMMY_DATA['actual_rubric'],
'actual_feedback': self.server.DUMMY_DATA['actual_feedback']
})
# Now the student should be calibrated
response = requests.get(
self._peer_url('is_student_calibrated'),
params={'student_id': student_id, 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'total_calibrated_on_so_far': 1,
'calibrated': True
})
# But a student with a different ID should NOT be calibrated.
response = requests.get(
self._peer_url('is_student_calibrated'),
params={'student_id': 'another', 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'total_calibrated_on_so_far': 0,
'calibrated': False
})
def test_grade_peers(self):
# Ensure a consistent student ID
student_id = '1234'
# Check initial number of submissions
# Should be none graded and 1 required
self._assert_num_graded(student_id, None, 0, 1)
# Register a problem that DOES have "peer" in the name
self._register_problem('test_location', 'Peer Assessed Problem')
# Retrieve the next submission
response = requests.get(
self._peer_url('get_next_submission'),
params={'grader_id': student_id, 'location': 'test_location'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
# Grade the submission
response = requests.post(
self._peer_url('save_grade'),
data={
'location': 'test_location',
'grader_id': student_id,
'submission_id': 1,
'score': 2,
'feedback': 'Good job!',
'submission_key': 'key'
}
)
self._assert_response(response, {'version': 1, 'success': True})
# Check final number of submissions
# Shoud be one graded and none required
self._assert_num_graded(student_id, 'test_location', 1, 0)
# Grade the next submission the submission
response = requests.post(
self._peer_url('save_grade'),
data={
'location': 'test_location',
'grader_id': student_id,
'submission_id': 1,
'score': 2,
'feedback': 'Good job!',
'submission_key': 'key'
}
)
self._assert_response(response, {'version': 1, 'success': True})
# Check final number of submissions
# Shoud be two graded and none required
self._assert_num_graded(student_id, 'test_location', 2, 0)
def test_problem_list(self):
self._register_problem('test_location', 'Peer Grading Problem')
# The problem list returns dummy counts which are not updated
# The location we use is ignored by the LMS, and we ignore it in the stub,
# so we use a dummy value there too.
response = requests.get(
self._peer_url('get_problem_list'),
params={'course_id': 'test course'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'problem_list': [{
'location': 'test_location',
'problem_name': 'Peer Grading Problem',
'num_graded': self.server.DUMMY_DATA['problem_list_num_graded'],
'num_pending': self.server.DUMMY_DATA['problem_list_num_pending'],
'num_required': self.server.DUMMY_DATA['problem_list_num_required']
}]
})
def test_ignore_non_peer_problem(self):
# Register a problem that does NOT have "peer" in the name
self._register_problem('test_location', 'Self Assessed Problem')
# Expect that the problem list is empty
response = requests.get(
self._peer_url('get_problem_list'),
params={'course_id': 'test course'}
)
self._assert_response(response,
{'version': 1, 'success': True, 'problem_list': []}
)
# Expect that no data is available for the problem location
response = requests.get(
self._peer_url('get_data_for_location'),
params={'location': 'test_location', 'student_id': 'test'}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {'version': 1, 'success': False})
def test_empty_problem_list(self):
# Without configuring any problem location, should return an empty list
response = requests.get(
self._peer_url('get_problem_list'),
params={'course_id': 'test course'}
)
self._assert_response(response, {'version': 1, 'success': True, 'problem_list': []})
def _peer_url(self, path):
"""
Construt a URL to the stub ORA peer-grading service.
"""
return "http://127.0.0.1:{port}/peer_grading/{path}/".format(
port=self.server.port, path=path
)
def _register_problem(self, location, name):
"""
Configure the stub to use a particular problem location
The actual implementation discovers problem locations by submission
to the XQueue; we do something similar by having the XQueue stub
register submitted locations with the ORA stub.
"""
grader_payload = json.dumps({'location': location, 'problem_id': name})
url = "http://127.0.0.1:{port}/test/register_submission".format(port=self.server.port)
response = requests.post(url, data={'grader_payload': grader_payload})
self.assertTrue(response.ok)
def _assert_response(self, response, expected_json):
"""
Assert that the `response` was successful and contained
`expected_json` (dict) as its content.
"""
self.assertTrue(response.ok)
self.assertEqual(response.json(), expected_json)
def _assert_num_graded(self, student_id, location, num_graded, num_required):
"""
ORA provides two distinct ways to get the submitted/graded counts.
Here we check both of them to ensure that the number that we've graded
is consistently `num_graded`.
"""
# Unlike the actual ORA service,
# we keep track of counts on a per-student basis.
# This means that every user starts with N essays to grade,
# and as they grade essays, that number decreases.
# We do NOT simulate students adding more essays to the queue,
# and essays that the current student submits are NOT graded
# by other students.
num_pending = StudentState.INITIAL_ESSAYS_AVAILABLE - num_graded
# Notifications
response = requests.get(
self._peer_url('get_notifications'),
params={'student_id': student_id, 'course_id': 'test course'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'count_required': num_required,
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_graded': num_graded,
'count_available': num_pending
})
# Location data
if location is not None:
response = requests.get(
self._peer_url('get_data_for_location'),
params={'location': location, 'student_id': student_id}
)
self._assert_response(response, {
'version': 1, 'success': True,
'count_required': num_required,
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_graded': num_graded,
'count_available': num_pending
})
...@@ -8,7 +8,7 @@ import json ...@@ -8,7 +8,7 @@ import json
import requests import requests
import time import time
import copy import copy
from terrain.stubs.xqueue import StubXQueueService, StubXQueueHandler from ..xqueue import StubXQueueService, StubXQueueHandler
class StubXQueueServiceTest(unittest.TestCase): class StubXQueueServiceTest(unittest.TestCase):
...@@ -95,7 +95,7 @@ class StubXQueueServiceTest(unittest.TestCase): ...@@ -95,7 +95,7 @@ class StubXQueueServiceTest(unittest.TestCase):
# Post a submission to the XQueue stub # Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback' callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission( self._post_submission(
callback_url, 'test_queuekey', 'test_queue', callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'test_1 and test_2'}) json.dumps({'submission': 'test_1 and test_2'})
) )
...@@ -108,6 +108,23 @@ class StubXQueueServiceTest(unittest.TestCase): ...@@ -108,6 +108,23 @@ class StubXQueueServiceTest(unittest.TestCase):
self.assertFalse(post.called) self.assertFalse(post.called)
self.assertTrue(logger.error.called) self.assertTrue(logger.error.called)
@mock.patch('terrain.stubs.xqueue.post')
def test_register_submission_url(self, post):
# Configure the XQueue stub to notify another service
# when it receives a submission.
register_url = 'http://127.0.0.1:8000/register_submission'
self.server.config['register_submission_url'] = register_url
callback_url = 'http://127.0.0.1:8000/test_callback'
submission = json.dumps({'grader_payload': 'test payload'})
self._post_submission(
callback_url, 'test_queuekey', 'test_queue', submission
)
# Check that a notification was sent
post.assert_any_call(register_url, data={'grader_payload': u'test payload'})
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body): def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
""" """
Post a submission to the stub XQueue implementation. Post a submission to the stub XQueue implementation.
......
...@@ -4,7 +4,7 @@ Unit test for stub YouTube implementation. ...@@ -4,7 +4,7 @@ Unit test for stub YouTube implementation.
import unittest import unittest
import requests import requests
from terrain.stubs.youtube import StubYouTubeService from ..youtube import StubYouTubeService
class StubYouTubeServiceTest(unittest.TestCase): class StubYouTubeServiceTest(unittest.TestCase):
......
...@@ -4,11 +4,12 @@ Stub implementation of XQueue for acceptance tests. ...@@ -4,11 +4,12 @@ Stub implementation of XQueue for acceptance tests.
Configuration values: Configuration values:
"default" (dict): Default response to be sent to LMS as a grade for a submission "default" (dict): Default response to be sent to LMS as a grade for a submission
"<submission>" (dict): Grade response to return for submissions containing the text <submission> "<submission>" (dict): Grade response to return for submissions containing the text <submission>
"register_submission_url" (str): URL to send grader payloads when we receive a submission
If no grade response is configured, a default response will be returned. If no grade response is configured, a default response will be returned.
""" """
from .http import StubHttpRequestHandler, StubHttpService from .http import StubHttpRequestHandler, StubHttpService, require_params
import json import json
import copy import copy
from requests import post from requests import post
...@@ -23,6 +24,7 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -23,6 +24,7 @@ class StubXQueueHandler(StubHttpRequestHandler):
DEFAULT_RESPONSE_DELAY = 2 DEFAULT_RESPONSE_DELAY = 2
DEFAULT_GRADE_RESPONSE = {'correct': True, 'score': 1, 'msg': ''} DEFAULT_GRADE_RESPONSE = {'correct': True, 'score': 1, 'msg': ''}
@require_params('POST', 'xqueue_body', 'xqueue_header')
def do_POST(self): def do_POST(self):
""" """
Handle a POST request from the client Handle a POST request from the client
...@@ -35,6 +37,10 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -35,6 +37,10 @@ class StubXQueueHandler(StubHttpRequestHandler):
# Respond only to grading requests # Respond only to grading requests
if self._is_grade_request(): if self._is_grade_request():
# If configured, send the grader payload to other services.
self._register_submission(self.post_dict['xqueue_body'])
try: try:
xqueue_header = json.loads(self.post_dict['xqueue_header']) xqueue_header = json.loads(self.post_dict['xqueue_header'])
callback_url = xqueue_header['lms_callback_url'] callback_url = xqueue_header['lms_callback_url']
...@@ -118,7 +124,7 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -118,7 +124,7 @@ class StubXQueueHandler(StubHttpRequestHandler):
# There is a danger here that a submission will match multiple response patterns. # There is a danger here that a submission will match multiple response patterns.
# Rather than fail silently (which could cause unpredictable behavior in tests) # Rather than fail silently (which could cause unpredictable behavior in tests)
# we abort and log a debugging message. # we abort and log a debugging message.
for pattern, response in self.server.config.iteritems(): for pattern, response in self.server.queue_responses:
if pattern in xqueue_body_json: if pattern in xqueue_body_json:
if grade_response is None: if grade_response is None:
...@@ -150,7 +156,44 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -150,7 +156,44 @@ class StubXQueueHandler(StubHttpRequestHandler):
post(postback_url, data=data) post(postback_url, data=data)
self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url)) self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url))
def _register_submission(self, xqueue_body_json):
"""
If configured, send the submission's grader payload to another service.
"""
url = self.server.config.get('register_submission_url')
# If not configured, do not need to send anything
if url is not None:
try:
xqueue_body = json.loads(xqueue_body_json)
except ValueError:
self.log_error(
"Could not decode XQueue body as JSON: '{0}'".format(xqueue_body_json))
else:
# Retrieve the grader payload, which should be a JSON-encoded dict.
# We pass the payload directly to the service we are notifying, without
# inspecting the contents.
grader_payload = xqueue_body.get('grader_payload')
if grader_payload is not None:
response = post(url, data={'grader_payload': grader_payload})
if not response.ok:
self.log_error(
"Could register submission at URL '{0}'. Status was {1}".format(
url, response.status_code))
else:
self.log_message(
"XQueue body is missing 'grader_payload' key: '{0}'".format(xqueue_body)
)
def _is_grade_request(self): def _is_grade_request(self):
"""
Return a boolean indicating whether the requested URL indicates a submission.
"""
return 'xqueue/submit' in self.path return 'xqueue/submit' in self.path
...@@ -160,3 +203,19 @@ class StubXQueueService(StubHttpService): ...@@ -160,3 +203,19 @@ class StubXQueueService(StubHttpService):
""" """
HANDLER_CLASS = StubXQueueHandler HANDLER_CLASS = StubXQueueHandler
NON_QUEUE_CONFIG_KEYS = ['default', 'register_submission_url']
@property
def queue_responses(self):
"""
Returns a list of (pattern, response) tuples, where `pattern` is a pattern
to match in the XQueue body, and `response` is a dictionary to return
as the response from the grader.
Every configuration key is a queue name,
except for 'default' and 'register_submission_url' which have special meaning
"""
return {
key:val for key, val in self.config.iteritems()
if key not in self.NON_QUEUE_CONFIG_KEYS
}.items()
...@@ -75,7 +75,7 @@ class StubYouTubeHandler(StubHttpRequestHandler): ...@@ -75,7 +75,7 @@ class StubYouTubeHandler(StubHttpRequestHandler):
time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC)) time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
# Construct the response content # Construct the response content
callback = self.get_params['callback'][0] callback = self.get_params['callback']
data = OrderedDict({ data = OrderedDict({
'data': OrderedDict({ 'data': OrderedDict({
'id': youtube_id, 'id': youtube_id,
......
...@@ -5,3 +5,6 @@ STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031') ...@@ -5,3 +5,6 @@ STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031')
# Get the URL of the XQueue stub used in the test # Get the URL of the XQueue stub used in the test
XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040') XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040')
# Get the URL of the Ora stub used in the test
ORA_STUB_URL = os.environ.get('ora_url', 'http://localhost:8041')
...@@ -29,7 +29,7 @@ class StudioApiFixture(object): ...@@ -29,7 +29,7 @@ class StudioApiFixture(object):
Log in as a staff user, then return a `requests` `session` object for the logged in user. Log in as a staff user, then return a `requests` `session` object for the logged in user.
Raises a `StudioApiLoginError` if the login fails. Raises a `StudioApiLoginError` if the login fails.
""" """
# Use auto-auth to retrieve session for a logged in user # Use auto-auth to retrieve the session for a logged in user
session = requests.Session() session = requests.Session()
response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true") response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true")
......
...@@ -7,22 +7,29 @@ import json ...@@ -7,22 +7,29 @@ import json
from . import XQUEUE_STUB_URL from . import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object): class XQueueResponseFixture(object):
""" """
Configure the XQueue stub's response to submissions. Configure the XQueue stub's response to submissions.
""" """
def __init__(self, queue_name, response_dict): def __init__(self, pattern, response_dict):
""" """
Configure XQueue stub to POST `response_dict` (a dictionary) Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission to a queue back to the LMS when it receives a submission that contains the string
named `queue_name`. `pattern`.
Remember that there is one XQueue stub shared by all the tests; Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel. to avoid conflict between tests running in parallel.
""" """
self._queue_name = queue_name self._pattern = pattern
self._response_dict = response_dict self._response_dict = response_dict
def install(self): def install(self):
...@@ -32,10 +39,10 @@ class XQueueResponseFixture(object): ...@@ -32,10 +39,10 @@ class XQueueResponseFixture(object):
url = XQUEUE_STUB_URL + "/set_config" url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue # Configure the stub to respond to submissions to our queue
payload = {self._queue_name: json.dumps(self._response_dict)} payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload) response = requests.put(url, data=payload)
if not response.ok: if not response.ok:
raise WebFixtureError( raise XQueueResponseFixtureError(
"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format( "Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._queue_name, self._response_dict)) self._pattern, self._response_dict))
...@@ -3,7 +3,8 @@ Open-ended response in the courseware. ...@@ -3,7 +3,8 @@ Open-ended response in the courseware.
""" """
from bok_choy.page_object import PageObject from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, fulfill_after, fulfill_before from bok_choy.promise import EmptyPromise, fulfill_after, fulfill
from .rubric import RubricPage
class OpenResponsePage(PageObject): class OpenResponsePage(PageObject):
...@@ -57,58 +58,14 @@ class OpenResponsePage(PageObject): ...@@ -57,58 +58,14 @@ class OpenResponsePage(PageObject):
return prompts[0] return prompts[0]
@property @property
def has_rubric(self): def rubric(self):
""" """
Return a boolean indicating whether the rubric is available. Return a `RubricPage` for a self-assessment problem.
If no rubric is available, raises a `BrokenPromise` exception.
""" """
return self.is_css_present('div.rubric') rubric = RubricPage(self.browser)
rubric.wait_for_page()
@property return rubric
def rubric_categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.css_text('span.rubric-category')
@property
def rubric_feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.css_map(feedback_css, lambda el: el['class'])
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
@property @property
def written_feedback(self): def written_feedback(self):
...@@ -175,68 +132,26 @@ class OpenResponsePage(PageObject): ...@@ -175,68 +132,26 @@ class OpenResponsePage(PageObject):
""" """
Submit a response for grading. Submit a response for grading.
""" """
with fulfill_after(self._submitted_promise(self.assessment_type)): with self.handle_alert():
with self.handle_alert(): self.css_click('input.submit-button')
self.css_click('input.submit-button')
def submit_self_assessment(self, scores):
"""
Submit a self-assessment rubric.
`scores` is a list of scores (0 to max score) for each category in the rubric.
"""
# Warn if we have the wrong number of scores
num_categories = len(self.rubric_categories)
if len(scores) != num_categories:
msg = "Recieved {0} scores but there are {1} rubric categories".format(
len(scores), num_categories
)
self.warning(msg)
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > self.css_count(category_css + ' input.score-selection'):
msg = "Tried to select score {0} but there are only {1} options".format(score_index, len(scores))
self.warning(msg)
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
self.css_check(input_css)
# Wait for the button to become enabled
button_css = 'input.submit-button'
button_enabled = EmptyPromise(
lambda: all(self.css_map(button_css, lambda el: not el['disabled'])),
"Submit button enabled"
)
# Submit the assessment # Ensure that the submission completes
with fulfill_before(button_enabled): self._wait_for_submitted(self.assessment_type)
self.css_click(button_css)
def _submitted_promise(self, assessment_type): def _wait_for_submitted(self, assessment_type):
""" """
Return a `Promise` that the next step is visible after submitting. Wait for the submission to complete.
This will vary based on the type of assessment.
`assessment_type` is either 'self', 'ai', or 'peer' `assessment_type` is either 'self', 'ai', or 'peer'
""" """
if assessment_type == 'self': if assessment_type == 'self':
return EmptyPromise(lambda: self.has_rubric, "Rubric has appeared") RubricPage(self.browser).wait_for_page()
elif assessment_type == 'ai' or assessment_type == "peer": elif assessment_type == 'ai' or assessment_type == "peer":
return EmptyPromise( fulfill(EmptyPromise(
lambda: self.grader_status != 'Unanswered', lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'" "Problem status is no longer 'unanswered'"
) ))
else: else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type)) self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
return EmptyPromise(lambda: True, "Unrecognized assessment type") fulfill(EmptyPromise(lambda: True, "Unrecognized assessment type"))
"""
Page that allows the student to grade calibration essays
(requirement for being allowed to grade peers).
"""
from bok_choy.page_object import PageObject
from .rubric import RubricPage
class PeerCalibratePage(PageObject):
"""
Grade calibration essays.
"""
url = None
def is_browser_on_page(self):
return (
self.is_css_present('div.peer-grading-tools') or
self.is_css_present('div.calibration-panel.current-state')
)
def continue_to_grading(self):
"""
Continue to peer grading after completing calibration.
"""
self.css_click('input.calibration-feedback-button')
@property
def rubric(self):
"""
Return a `RubricPage` for the calibration essay.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
@property
def message(self):
"""
Return a message shown to the user, or None if no message is available.
"""
messages = self.css_text('div.peer-grading-tools > div.message-container > p')
if len(messages) < 1:
return None
else:
return messages[0]
"""
Confirmation screen for peer calibration and grading.
"""
from bok_choy.page_object import PageObject
class PeerConfirmPage(PageObject):
"""
Confirmation for peer calibration and grading.
"""
url = None
def is_browser_on_page(self):
return self.is_css_present('section.calibration-interstitial-page')
def start(self, is_calibrating=False):
"""
Continue to the next section after the confirmation page.
If `is_calibrating` is false, try to continue to peer grading.
Otherwise, try to continue to calibration grading.
"""
self.css_click(
'input.calibration-interstitial-page-button'
if is_calibrating else 'input.interstitial-page-button'
)
"""
Students grade peer submissions.
"""
from bok_choy.page_object import PageObject
from .rubric import RubricPage
class PeerGradePage(PageObject):
"""
Students grade peer submissions.
"""
url = None
def is_browser_on_page(self):
return (
self.is_css_present('div.peer-grading-tools') or
self.is_css_present('div.grading-panel.current-state')
)
@property
def problem_list(self):
"""
Return the list of available problems to peer grade.
"""
return self.css_text('a.problem-button')
def select_problem(self, problem_name):
"""
Choose the problem with `problem_name` to start grading or calibrating.
"""
index = self.problem_list.index(problem_name) + 1
self.css_click('a.problem-button:nth-of-type({})'.format(index))
@property
def rubric(self):
"""
Return a `RubricPage` to allow students to grade their peers.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, fulfill_after, fulfill_before
class ScoreMismatchError(Exception):
"""
The provided scores do not match the rubric on the page.
"""
pass
class RubricPage(PageObject):
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
url = None
def is_browser_on_page(self):
"""
Return a boolean indicating whether the rubric is available.
"""
return self.is_css_present('div.rubric')
@property
def categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.css_text('span.rubric-category')
def set_scores(self, scores):
"""
Set the rubric scores. `scores` is a list of integers
indicating the number of points in each category.
For example, `scores` might be [0, 2, 1] if the student scored
0 points in the first category, 2 points in the second category,
and 1 point in the third category.
If the number of scores does not match the number of categories,
a `ScoreMismatchError` is raised.
"""
# Warn if we have the wrong number of scores
num_categories = self.categories
if len(scores) != len(num_categories):
raise ScoreMismatchError(
"Recieved {0} scores but there are {1} rubric categories".format(
len(scores), num_categories))
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > self.css_count(category_css + ' input.score-selection'):
raise ScoreMismatchError(
"Tried to select score {0} but there are only {1} options".format(
score_index, len(scores)))
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
self.css_check(input_css)
@property
def feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.css_map(feedback_css, lambda el: el['class'])
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
def submit(self):
"""
Submit the rubric.
"""
# Wait for the button to become enabled
button_css = 'input.submit-button'
button_enabled = EmptyPromise(
lambda: all(self.css_map(button_css, lambda el: not el['disabled'])),
"Submit button enabled"
)
# Submit the assessment
with fulfill_before(button_enabled):
self.css_click(button_css)
...@@ -9,6 +9,9 @@ from ..pages.lms.course_info import CourseInfoPage ...@@ -9,6 +9,9 @@ from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.open_response import OpenResponsePage from ..pages.lms.open_response import OpenResponsePage
from ..pages.lms.peer_grade import PeerGradePage
from ..pages.lms.peer_calibrate import PeerCalibratePage
from ..pages.lms.peer_confirm import PeerConfirmPage
from ..pages.lms.progress import ProgressPage from ..pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture from ..fixtures.xqueue import XQueueResponseFixture
...@@ -40,6 +43,9 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -40,6 +43,9 @@ class OpenResponseTest(UniqueCourseTest):
self.tab_nav = TabNavPage(self.browser) self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser) self.course_nav = CourseNavPage(self.browser)
self.open_response = OpenResponsePage(self.browser) self.open_response = OpenResponsePage(self.browser)
self.peer_grade = PeerGradePage(self.browser)
self.peer_calibrate = PeerCalibratePage(self.browser)
self.peer_confirm = PeerConfirmPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id) self.progress_page = ProgressPage(self.browser, self.course_id)
# Configure the test course # Configure the test course
...@@ -48,6 +54,13 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -48,6 +54,13 @@ class OpenResponseTest(UniqueCourseTest):
self.course_info['run'], self.course_info['display_name'] self.course_info['run'], self.course_info['display_name']
) )
# Create a unique name for the peer assessed problem. This will show up
# in the list of peer problems, which is shared among tests running
# in parallel; it needs to be unique so we can find it.
# It's also import that the problem has "Peer" in the name; otherwise,
# the ORA stub will ignore it.
self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6])
course_fix.add_children( course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
...@@ -58,9 +71,10 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -58,9 +71,10 @@ class OpenResponseTest(UniqueCourseTest):
XBlockFixtureDesc('combinedopenended', 'AI-Assessed', XBlockFixtureDesc('combinedopenended', 'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}), data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}),
XBlockFixtureDesc('combinedopenended', 'Peer-Assessed', XBlockFixtureDesc('combinedopenended', self.peer_problem_name,
data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}), data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}),
# This is the interface a student can use to grade his/her peers
XBlockFixtureDesc('peergrading', 'Peer Module'), XBlockFixtureDesc('peergrading', 'Peer Module'),
))).install() ))).install()
...@@ -128,14 +142,14 @@ class OpenResponseTest(UniqueCourseTest): ...@@ -128,14 +142,14 @@ class OpenResponseTest(UniqueCourseTest):
if assessment_type == 'ai': if assessment_type == 'ai':
section_name = 'AI-Assessed' section_name = 'AI-Assessed'
elif assessment_type == 'peer': elif assessment_type == 'peer':
section_name = 'Peer-Assessed' section_name = self.peer_problem_name
else: else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"') raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check(): def _inner_check():
self.course_nav.go_to_sequential('Self-Assessed') self.course_nav.go_to_sequential('Self-Assessed')
self.course_nav.go_to_sequential(section_name) self.course_nav.go_to_sequential(section_name)
feedback = self.open_response.rubric_feedback feedback = self.open_response.rubric.feedback
# Successful if `feedback` is a non-empty list # Successful if `feedback` is a non-empty list
return (bool(feedback), feedback) return (bool(feedback), feedback)
...@@ -155,22 +169,17 @@ class SelfAssessmentTest(OpenResponseTest): ...@@ -155,22 +169,17 @@ class SelfAssessmentTest(OpenResponseTest):
Then I see a scored rubric Then I see a scored rubric
And I see my score in the progress page. And I see my score in the progress page.
""" """
# Navigate to the self-assessment problem and submit an essay # Navigate to the self-assessment problem and submit an essay
self.course_nav.go_to_sequential('Self-Assessed') self.course_nav.go_to_sequential('Self-Assessed')
self.submit_essay('self', 'Censorship in the Libraries') self.submit_essay('self', 'Censorship in the Libraries')
# Check the rubric categories # Fill in the rubric and expect that we get feedback
self.assertEqual( rubric = self.open_response.rubric
self.open_response.rubric_categories, ["Writing Applications", "Language Conventions"] self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
) rubric.set_scores([0, 1])
rubric.submit()
# Fill in the self-assessment rubric self.assertEqual(rubric.feedback, ['incorrect', 'correct'])
self.open_response.submit_self_assessment([0, 1])
# Expect that we get feedback
self.assertEqual(
self.open_response.rubric_feedback, ['incorrect', 'correct']
)
# Verify the progress page # Verify the progress page
self.progress_page.visit() self.progress_page.visit()
...@@ -223,10 +232,10 @@ class AIAssessmentTest(OpenResponseTest): ...@@ -223,10 +232,10 @@ class AIAssessmentTest(OpenResponseTest):
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)]) self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class InstructorAssessmentTest(AIAssessmentTest): class InstructorAssessmentTest(OpenResponseTest):
""" """
Test an AI-assessment that has been graded by an instructor. Test an AI-assessment that has been graded by an instructor.
This runs the exact same test as the AI-assessment test, except This runs the same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader. that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same. From the student's perspective, it should look the same.
""" """
...@@ -242,11 +251,36 @@ class InstructorAssessmentTest(AIAssessmentTest): ...@@ -242,11 +251,36 @@ class InstructorAssessmentTest(AIAssessmentTest):
'rubric_xml': load_data_str('ora_rubric.xml') 'rubric_xml': load_data_str('ora_rubric.xml')
} }
def test_instructor_assessment(self):
"""
Given an instructor has graded my submission
When I view my submission
Then I see a scored rubric
And my progress page shows the problem score.
"""
# Navigate to the AI-assessment problem and submit an essay
# We have configured the stub to simulate that this essay will be staff-graded
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
class PeerFeedbackTest(OpenResponseTest): # Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class PeerAssessmentTest(OpenResponseTest):
""" """
Test ORA peer-assessment. Note that this tests only *receiving* feedback, Test ORA peer-assessment, including calibration and giving/receiving scores.
not *giving* feedback -- those tests are located in another module.
""" """
# Unlike other assessment types, peer assessment has multiple scores # Unlike other assessment types, peer assessment has multiple scores
...@@ -261,20 +295,58 @@ class PeerFeedbackTest(OpenResponseTest): ...@@ -261,20 +295,58 @@ class PeerFeedbackTest(OpenResponseTest):
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3 'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
} }
def test_peer_assessment(self): def test_peer_calibrate_and_grade(self):
""" """
Given I am viewing a peer-assessment problem
And the instructor has submitted enough example essays
When I submit submit acceptable scores for enough calibration essays
Then I am able to peer-grade other students' essays.
Given I have submitted an essay for peer-assessment Given I have submitted an essay for peer-assessment
And I have peer-graded enough students essays
And enough other students have scored my essay And enough other students have scored my essay
Then I can view the scores and written feedback Then I can view the scores and written feedback
And I see my score in the progress page. And I see my score in the progress page.
""" """
# Navigate to the peer-assessment problem and submit an essay # Initially, the student should NOT be able to grade peers,
self.course_nav.go_to_sequential('Peer-Assessed') # because he/she hasn't submitted any essays.
self.course_nav.go_to_sequential('Peer Module')
self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message)
# Submit an essay
self.course_nav.go_to_sequential(self.peer_problem_name)
self.submit_essay('peer', 'Censorship in the Libraries') self.submit_essay('peer', 'Censorship in the Libraries')
# Refresh the page to get feedback from the stub XQueue grader. # Need to reload the page to update the peer grading module
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Select the problem to calibrate
self.course_nav.go_to_sequential('Peer Module')
self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)
self.peer_grade.select_problem(self.peer_problem_name)
# Calibrate
self.peer_confirm.start(is_calibrating=True)
rubric = self.peer_calibrate.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
self.peer_calibrate.continue_to_grading()
# Grade a peer
self.peer_confirm.start()
rubric = self.peer_grade.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
# Expect to receive essay feedback
# We receive feedback from all three peers, each of which # We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item) # provide 2 scores (one for each rubric item)
# Written feedback is a dummy value sent by the XQueue stub.
self.course_nav.go_to_sequential(self.peer_problem_name)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3) self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page # Verify the progress page
......
...@@ -33,7 +33,14 @@ BOK_CHOY_STUBS = { ...@@ -33,7 +33,14 @@ BOK_CHOY_STUBS = {
:xqueue => { :xqueue => {
:port => 8040, :port => 8040,
:log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_xqueue.log") :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_xqueue.log"),
:config => 'register_submission_url=http://0.0.0.0:8041/test/register_submission'
},
:ora => {
:port => 8041,
:log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_ora.log"),
:config => ''
} }
} }
...@@ -56,14 +63,14 @@ def start_servers() ...@@ -56,14 +63,14 @@ def start_servers()
BOK_CHOY_STUBS.each do | service, info | BOK_CHOY_STUBS.each do | service, info |
Dir.chdir(BOK_CHOY_STUB_DIR) do Dir.chdir(BOK_CHOY_STUB_DIR) do
singleton_process( singleton_process(
"python -m stubs.start #{service} #{info[:port]}", "python -m stubs.start #{service} #{info[:port]} #{info[:config]}",
logfile=info[:log] logfile=info[:log]
) )
end end
end end
end end
# Wait until we get a successful response from the servers or time out # Wait until we get a successful response from the servers or time out
def wait_for_test_servers() def wait_for_test_servers()
BOK_CHOY_SERVERS.merge(BOK_CHOY_STUBS).each do | service, info | BOK_CHOY_SERVERS.merge(BOK_CHOY_STUBS).each do | service, info |
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment