Commit 20ccf593 by Will Daly

Implemented CodeResponse lettuce tests

parent 2087b815
......@@ -2,7 +2,10 @@ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import json
import urllib
import urlparse
import time
from logging import getLogger
logger = getLogger(__name__)
class MockXQueueRequestHandler(BaseHTTPRequestHandler):
'''
......@@ -16,11 +19,10 @@ class MockXQueueRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
'''
Handle a POST request from the client, interpreted
as either a login request or a submission for grading request.
Handle a POST request from the client
Sends back an immediate success/failure response.
If grading is required, it then POSTS back to the client
It then POSTS back to the client
with grading results, as configured in MockXQueueServer.
'''
self._send_head()
......@@ -28,21 +30,60 @@ class MockXQueueRequestHandler(BaseHTTPRequestHandler):
# Retrieve the POST data
post_dict = self._post_dict()
# Send a response indicating success/failure
success = self._send_immediate_response(post_dict)
# Log the request
logger.debug("XQueue received POST request %s to path %s" %
(str(post_dict), self.path))
# Respond only to grading requests
if self._is_grade_request():
try:
xqueue_header = json.loads(post_dict['xqueue_header'])
xqueue_body = json.loads(post_dict['xqueue_body'])
callback_url = xqueue_header['lms_callback_url']
except KeyError:
# If the message doesn't have a header or body,
# then it's malformed.
# Respond with failure
error_msg = "XQueue received invalid grade request"
self._send_immediate_response(False, message=error_msg)
except ValueError:
# If we could not decode the body or header,
# respond with failure
error_msg = "XQueue could not decode grade request"
self._send_immediate_response(False, message=error_msg)
else:
# Send an immediate response of success
# The grade request is formed correctly
self._send_immediate_response(True)
# Wait a bit before POSTing back to the callback url with the
# grade result configured by the server
# Otherwise, the problem will not realize it's
# queued and it will keep waiting for a response
# indefinitely
delayed_grade_func = lambda: self._send_grade_response(callback_url,
xqueue_header)
timer = threading.Timer(2, delayed_grade_func)
timer.start()
# If we get a request that's not to the grading submission
# URL, return an error
else:
error_message = "Invalid request URL"
self._send_immediate_response(False, message=error_message)
# If the client submitted a valid submission request,
# we need to post back to the callback url
# with the grading result
if success and self._is_grade_request():
self._send_grade_response(post_dict['lms_callback_url'],
post_dict['lms_key'])
def _send_head(self):
'''
Send the response code and MIME headers
'''
if self._is_login_request() or self._is_grade_request():
if self._is_grade_request():
self.send_response(200)
else:
self.send_response(500)
......@@ -78,46 +119,33 @@ class MockXQueueRequestHandler(BaseHTTPRequestHandler):
return post_dict
def _send_immediate_response(self, post_dict):
def _send_immediate_response(self, success, message=""):
'''
Check the post_dict for the appropriate fields
for this request (login or grade submission)
If it finds them, inform the client of success.
Otherwise, inform the client of failure
Send an immediate success/failure message
back to the client
'''
# Allow any user to log in, as long as the POST
# dict has a username and password
if self._is_login_request():
success = 'username' in post_dict and 'password' in post_dict
elif self._is_grade_request():
success = ('lms_callback_url' in post_dict and
'lms_key' in post_dict and
'queue_name' in post_dict)
else:
success = False
# Send the response indicating success/failure
response_str = json.dumps({'return_code': 0 if success else 1,
'content': '' if success else 'Error'})
'content': message})
self.wfile.write(response_str)
# Log the response
logger.debug("XQueue: sent response %s" % response_str)
return success
self.wfile.write(response_str)
def _send_grade_response(self, postback_url, queuekey):
def _send_grade_response(self, postback_url, xqueue_header):
'''
POST the grade response back to the client
using the response provided by the server configuration
'''
response_dict = {'queuekey': queuekey,
'xqueue_body': self.server.grade_response}
response_dict = {'xqueue_header': json.dumps(xqueue_header),
'xqueue_body': json.dumps(self.server.grade_response())}
MockXQueueRequestHandler.post_to_url(postback_url, response_dict)
# Log the response
logger.debug("XQueue: sent grading response %s" % str(response_dict))
def _is_login_request(self):
return 'xqueue/login' in self.path
MockXQueueRequestHandler.post_to_url(postback_url, response_dict)
def _is_grade_request(self):
return 'xqueue/submit' in self.path
......@@ -138,7 +166,8 @@ class MockXQueueServer(HTTPServer):
to POST requests to localhost.
'''
def __init__(self, port_num, grade_response_dict):
def __init__(self, port_num,
grade_response_dict={'correct':True, 'score': 1, 'msg': ''}):
'''
Initialize the mock XQueue server instance.
......@@ -148,18 +177,36 @@ class MockXQueueServer(HTTPServer):
and sent in response to XQueue grading requests.
'''
self.grade_response = grade_response_dict
self.set_grade_response(grade_response_dict)
handler = MockXQueueRequestHandler
address = ('', port_num)
HTTPServer.__init__(self, address, handler)
@property
def shutdown(self):
'''
Stop the server and free up the port
'''
# First call superclass shutdown()
HTTPServer.shutdown(self)
# We also need to manually close the socket
self.socket.close()
def grade_response(self):
return self._grade_response
@grade_response.setter
def grade_response(self, grade_response_dict):
def set_grade_response(self, grade_response_dict):
# Check that the grade response has the right keys
assert('correct' in grade_response_dict and
'score' in grade_response_dict and
'msg' in grade_response_dict)
# Wrap the message in <div> tags to ensure that it is valid XML
grade_response_dict['msg'] = "<div>%s</div>" % grade_response_dict['msg']
# Save the response dictionary
self._grade_response = grade_response_dict
......@@ -190,16 +237,6 @@ class MockXQueueServerTest(unittest.TestCase):
# Stop the server, freeing up the port
self.server.shutdown()
self.server.socket.close()
def test_login_request(self):
# Send a login request
login_request = {'username': 'Test', 'password': 'Test'}
response_handle = urllib.urlopen(self.server_url + '/xqueue/login',
urllib.urlencode(login_request))
response_dict = json.loads(response_handle.read())
self.assertEqual(response_dict['return_code'], 0)
def test_grade_request(self):
......@@ -209,19 +246,33 @@ class MockXQueueServerTest(unittest.TestCase):
# Send a grade request
callback_url = 'http://127.0.0.1:8000/test_callback'
grade_request = {'lms_callback_url': callback_url,
grade_header = json.dumps({'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'}
'queue_name': 'test_queue'})
grade_body = json.dumps({'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'})
grade_request = {'xqueue_header': grade_header,
'xqueue_body': grade_body}
response_handle = urllib.urlopen(self.server_url + '/xqueue/submit',
urllib.urlencode(grade_request))
response_dict = json.loads(response_handle.read())
# Expect that the response is success
self.assertEqual(response_dict['return_code'], 0)
# Wait a bit before checking that the server posted back
time.sleep(3)
# Expect that the server tries to post back the grading info
expected_callback_dict = {'queuekey': 'test_queuekey',
'xqueue_body': {'correct': True,
'score': 1, 'msg': ''}}
xqueue_body = json.dumps({'correct': True, 'score': 1,
'msg': '<div></div>'})
expected_callback_dict = {'xqueue_header': grade_header,
'xqueue_body': xqueue_body }
MockXQueueRequestHandler.post_to_url.assert_called_with(callback_url,
expected_callback_dict)
......@@ -10,6 +10,8 @@ import time
import re
import os.path
from .xqueue_setup import *
from logging import getLogger
logger = getLogger(__name__)
......
from mock_xqueue_server import MockXQueueServer
from lettuce import before, after, world
from django.conf import settings
import threading
@before.all
def setup_mock_xqueue_server():
# Retrieve the local port from settings
server_port = settings.XQUEUE_PORT
# Create the mock server instance
server = MockXQueueServer(server_port)
# Start the server running in a separate daemon thread
# Because the thread is a daemon, it will terminate
# when the main thread terminates.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Store the server instance in lettuce's world
# so that other steps can access it
# (and we can shut it down later)
world.xqueue_server = server
@after.all
def teardown_mock_xqueue_server(total):
# Stop the xqueue server and free up the port
world.xqueue_server.shutdown()
Feature: Answer choice problems
Feature: Answer problems
As a student in an edX course
In order to test my understanding of the material
I want to answer choice based problems
I want to answer problems
Scenario: I can answer a problem correctly
Given I am viewing a "<ProblemType>" problem
Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then My "<ProblemType>" answer is marked "correct"
......@@ -17,9 +18,11 @@ Feature: Answer choice problems
| numerical |
| formula |
| script |
| code |
Scenario: I can answer a problem incorrectly
Given I am viewing a "<ProblemType>" problem
Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then My "<ProblemType>" answer is marked "incorrect"
......@@ -32,6 +35,7 @@ Feature: Answer choice problems
| numerical |
| formula |
| script |
| code |
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
......
Feature: Answer problems
As a student in an edX course
In order to test my understanding of the material
I want to answer problems
Scenario: I can answer a problem correctly
Given I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then My "<ProblemType>" answer is marked "correct"
Examples:
| ProblemType |
| drop down |
| multiple choice |
| checkbox |
| string |
| numerical |
| formula |
| script |
Scenario: I can answer a problem incorrectly
Given I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then My "<ProblemType>" answer is marked "incorrect"
Examples:
| ProblemType |
| drop down |
| multiple choice |
| checkbox |
| string |
| numerical |
| formula |
| script |
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
When I check a problem
Then My "<ProblemType>" answer is marked "incorrect"
Examples:
| ProblemType |
| drop down |
| multiple choice |
| checkbox |
| string |
| numerical |
| formula |
| script |
Scenario: I can reset a problem
Given I am viewing a "<ProblemType>" problem
And I answer a "<ProblemType>" problem "<Correctness>ly"
When I reset the problem
Then My "<ProblemType>" answer is marked "unanswered"
Examples:
| ProblemType | Correctness |
| drop down | correct |
| drop down | incorrect |
| multiple choice | correct |
| multiple choice | incorrect |
| checkbox | correct |
| checkbox | incorrect |
| string | correct |
| string | incorrect |
| numerical | correct |
| numerical | incorrect |
| formula | correct |
| formula | incorrect |
| script | correct |
| script | incorrect |
Scenario: I can answer a code-based problem
Given I am viewing a "code" problem
And External graders respond "<Correctness>" with message "Test Message"
When I answer a "code" problem "<Correctness>"
Then My "code" answer is marked "<Correctness>"
And I should see "Test Message" somewhere in the page
Examples:
| Correctness |
| correct |
| incorrect |
from lettuce import world, step
from lettuce.django import django_url
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import WebDriverException
import random
import textwrap
import time
from common import i_am_registered_for_the_course, TEST_SECTION_NAME, section_location
from terrain.factories import ItemFactory
from capa.tests.response_xml_factory import OptionResponseXMLFactory, \
ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \
StringResponseXMLFactory, NumericalResponseXMLFactory, \
FormulaResponseXMLFactory, CustomResponseXMLFactory
FormulaResponseXMLFactory, CustomResponseXMLFactory, \
CodeResponseXMLFactory
# Factories from capa.tests.response_xml_factory that we will use
# to generate the problem XML, with the keyword args used to configure
......@@ -77,7 +80,13 @@ PROBLEM_FACTORY_DICT = {
a1=0
a2=0
return (a1+a2)==int(expect)
""")}},
""") }},
'code': {
'factory': CodeResponseXMLFactory(),
'kwargs': {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', }},
}
......@@ -115,6 +124,18 @@ def view_problem(step, problem_type):
world.browser.visit(url)
@step(u'External graders respond "([^"]*)"')
def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect'])
response_dict = {'correct': True if correctness == 'correct' else False,
'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded %s' % correctness}
# Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem
world.xqueue_server.set_grade_response(response_dict)
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem(step, problem_type, correctness):
......@@ -169,12 +190,31 @@ def answer_problem(step, problem_type, correctness):
inputfield('script', input_num=1).fill(str(first_addend))
inputfield('script', input_num=2).fill(str(second_addend))
elif problem_type == 'code':
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
# Submit the problem
check_problem(step)
@step(u'I check a problem')
def check_problem(step):
try:
world.browser.find_by_css("input.check").click()
except WebDriverException:
# Occassionally, MathJax or other JavaScript can cover up
# the 'Check' input temporarily.
# If this happens, wait a second, then try again
time.sleep(1)
world.browser.find_by_css("input.check").click()
......@@ -207,7 +247,8 @@ def assert_answer_mark(step, problem_type, correctness):
'string': ['div.correct'],
'numerical': ['div.correct'],
'formula': ['div.correct'],
'script': ['div.correct'], }
'script': ['div.correct'],
'code': ['span.correct'], }
incorrect_selectors = {'drop down': ['span.incorrect'],
'multiple choice': ['label.choicegroup_incorrect',
......@@ -216,7 +257,8 @@ def assert_answer_mark(step, problem_type, correctness):
'string': ['div.incorrect'],
'numerical': ['div.incorrect'],
'formula': ['div.incorrect'],
'script': ['div.incorrect']}
'script': ['div.incorrect'],
'code': ['span.incorrect'], }
assert(correctness in ['correct', 'incorrect', 'unanswered'])
assert(problem_type in correct_selectors and problem_type in incorrect_selectors)
......
......@@ -40,6 +40,18 @@ DATABASES = {
}
}
# Set up XQueue information so that the lms will send
# requests to a mock XQueue server running locally
XQUEUE_PORT = 8027
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:%d" % XQUEUE_PORT,
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Do not display the YouTube videos in the browser while running the
# acceptance tests. This makes them faster and more reliable
MITX_FEATURES['STUB_VIDEO_FOR_TESTING'] = True
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment