Commit 20ccf593 by Will Daly

Implemented CodeResponse lettuce tests

parent 2087b815
......@@ -10,6 +10,8 @@ import time
import re
import os.path
from .xqueue_setup import *
from logging import getLogger
logger = getLogger(__name__)
......
from mock_xqueue_server import MockXQueueServer
from lettuce import before, after, world
from django.conf import settings
import threading
@before.all
def setup_mock_xqueue_server():
# Retrieve the local port from settings
server_port = settings.XQUEUE_PORT
# Create the mock server instance
server = MockXQueueServer(server_port)
# Start the server running in a separate daemon thread
# Because the thread is a daemon, it will terminate
# when the main thread terminates.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Store the server instance in lettuce's world
# so that other steps can access it
# (and we can shut it down later)
world.xqueue_server = server
@after.all
def teardown_mock_xqueue_server(total):
# Stop the xqueue server and free up the port
world.xqueue_server.shutdown()
Feature: Answer choice problems
Feature: Answer problems
As a student in an edX course
In order to test my understanding of the material
I want to answer choice based problems
I want to answer problems
Scenario: I can answer a problem correctly
Given I am viewing a "<ProblemType>" problem
Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then My "<ProblemType>" answer is marked "correct"
......@@ -17,9 +18,11 @@ Feature: Answer choice problems
| numerical |
| formula |
| script |
| code |
Scenario: I can answer a problem incorrectly
Given I am viewing a "<ProblemType>" problem
Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then My "<ProblemType>" answer is marked "incorrect"
......@@ -32,6 +35,7 @@ Feature: Answer choice problems
| numerical |
| formula |
| script |
| code |
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
......
Feature: Answer problems
As a student in an edX course
In order to test my understanding of the material
I want to answer problems
Scenario: I can answer a problem correctly
Given I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then My "<ProblemType>" answer is marked "correct"
Examples:
| ProblemType |
| drop down |
| multiple choice |
| checkbox |
| string |
| numerical |
| formula |
| script |
Scenario: I can answer a problem incorrectly
Given I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then My "<ProblemType>" answer is marked "incorrect"
Examples:
| ProblemType |
| drop down |
| multiple choice |
| checkbox |
| string |
| numerical |
| formula |
| script |
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
When I check a problem
Then My "<ProblemType>" answer is marked "incorrect"
Examples:
| ProblemType |
| drop down |
| multiple choice |
| checkbox |
| string |
| numerical |
| formula |
| script |
Scenario: I can reset a problem
Given I am viewing a "<ProblemType>" problem
And I answer a "<ProblemType>" problem "<Correctness>ly"
When I reset the problem
Then My "<ProblemType>" answer is marked "unanswered"
Examples:
| ProblemType | Correctness |
| drop down | correct |
| drop down | incorrect |
| multiple choice | correct |
| multiple choice | incorrect |
| checkbox | correct |
| checkbox | incorrect |
| string | correct |
| string | incorrect |
| numerical | correct |
| numerical | incorrect |
| formula | correct |
| formula | incorrect |
| script | correct |
| script | incorrect |
Scenario: I can answer a code-based problem
Given I am viewing a "code" problem
And External graders respond "<Correctness>" with message "Test Message"
When I answer a "code" problem "<Correctness>"
Then My "code" answer is marked "<Correctness>"
And I should see "Test Message" somewhere in the page
Examples:
| Correctness |
| correct |
| incorrect |
from lettuce import world, step
from lettuce.django import django_url
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import WebDriverException
import random
import textwrap
import time
from common import i_am_registered_for_the_course, TEST_SECTION_NAME, section_location
from terrain.factories import ItemFactory
from capa.tests.response_xml_factory import OptionResponseXMLFactory, \
ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \
StringResponseXMLFactory, NumericalResponseXMLFactory, \
FormulaResponseXMLFactory, CustomResponseXMLFactory
FormulaResponseXMLFactory, CustomResponseXMLFactory, \
CodeResponseXMLFactory
# Factories from capa.tests.response_xml_factory that we will use
# to generate the problem XML, with the keyword args used to configure
......@@ -77,7 +80,13 @@ PROBLEM_FACTORY_DICT = {
a1=0
a2=0
return (a1+a2)==int(expect)
""")}},
""") }},
'code': {
'factory': CodeResponseXMLFactory(),
'kwargs': {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', }},
}
......@@ -115,6 +124,18 @@ def view_problem(step, problem_type):
world.browser.visit(url)
@step(u'External graders respond "([^"]*)"')
def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect'])
response_dict = {'correct': True if correctness == 'correct' else False,
'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded %s' % correctness}
# Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem
world.xqueue_server.set_grade_response(response_dict)
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem(step, problem_type, correctness):
......@@ -169,12 +190,31 @@ def answer_problem(step, problem_type, correctness):
inputfield('script', input_num=1).fill(str(first_addend))
inputfield('script', input_num=2).fill(str(second_addend))
elif problem_type == 'code':
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
# Submit the problem
check_problem(step)
@step(u'I check a problem')
def check_problem(step):
try:
world.browser.find_by_css("input.check").click()
except WebDriverException:
# Occassionally, MathJax or other JavaScript can cover up
# the 'Check' input temporarily.
# If this happens, wait a second, then try again
time.sleep(1)
world.browser.find_by_css("input.check").click()
......@@ -207,7 +247,8 @@ def assert_answer_mark(step, problem_type, correctness):
'string': ['div.correct'],
'numerical': ['div.correct'],
'formula': ['div.correct'],
'script': ['div.correct'], }
'script': ['div.correct'],
'code': ['span.correct'], }
incorrect_selectors = {'drop down': ['span.incorrect'],
'multiple choice': ['label.choicegroup_incorrect',
......@@ -216,7 +257,8 @@ def assert_answer_mark(step, problem_type, correctness):
'string': ['div.incorrect'],
'numerical': ['div.incorrect'],
'formula': ['div.incorrect'],
'script': ['div.incorrect']}
'script': ['div.incorrect'],
'code': ['span.incorrect'], }
assert(correctness in ['correct', 'incorrect', 'unanswered'])
assert(problem_type in correct_selectors and problem_type in incorrect_selectors)
......
......@@ -40,6 +40,18 @@ DATABASES = {
}
}
# Set up XQueue information so that the lms will send
# requests to a mock XQueue server running locally
XQUEUE_PORT = 8027
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:%d" % XQUEUE_PORT,
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Do not display the YouTube videos in the browser while running the
# acceptance tests. This makes them faster and more reliable
MITX_FEATURES['STUB_VIDEO_FOR_TESTING'] = True
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment