Commit 8b07411a by Will Daly

Merge pull request #513 from edx/will/student-training-test

More selenium
parents b8be7f5f 97eb3771
......@@ -58,6 +58,7 @@ coverage
ora2db
storage/*
openassessment/xblock/static/js/fixtures/*.html
test/selenium/screenshots/*
# logging
logs/*.log*
......@@ -3,7 +3,6 @@
ddt==0.8.0
django-nose==1.2
bok_choy==0.3.1
mock==1.0.1
moto==0.2.22
nose==1.3.0
......
......@@ -15,7 +15,7 @@ To use the tests:
.. code:: bash
cd edx-ora2
make install-test
pip install -r requirements/test-acceptance.txt
2. Run the tests
......@@ -24,4 +24,4 @@ To use the tests:
cd edx-ora2/test/selenium
export BASE_URL=https://{USER}:{PASSWORD}@example.com
python tests.py
\ No newline at end of file
python tests.py
......@@ -9,6 +9,11 @@ BASE_URL = os.environ.get('BASE_URL')
assert BASE_URL is not None, 'No base URL specified - please set the `BASE_URL` environment variable'
class PageConfigurationError(Exception):
""" A page object was not configured correctly. """
pass
class OpenAssessmentPage(PageObject):
"""
Base class for ORA page objects.
......@@ -82,24 +87,49 @@ class SubmissionPage(OpenAssessmentPage):
return self.q(css=".step--response.is--complete").is_present()
class SelfAssessmentPage(OpenAssessmentPage):
class AssessmentPage(OpenAssessmentPage):
"""
Page object representing the "self assessment" step in an ORA problem.
Page object representing an "assessment" step in an ORA problem.
"""
ASSESSMENT_TYPES = ['self-assessment', 'peer-assessment', 'student-training']
def __init__(self, assessment_type, *args):
"""
Configure which assessment type this page object represents.
Args:
assessment_type: One of the valid assessment types.
*args: Passed to the base class.
"""
super(AssessmentPage, self).__init__(*args)
if assessment_type not in self.ASSESSMENT_TYPES:
msg = "Invalid assessment type; must choose one: {choices}".format(
choices=", ".join(self.ASSESSMENT_TYPES)
)
raise PageConfigurationError(msg)
self._assessment_type = assessment_type
def is_browser_on_page(self):
return self.q(css="#openassessment__self-assessment").is_present()
css_id = "#openassessment__{assessment_type}".format(
assessment_type=self._assessment_type
)
return self.q(css=css_id).is_present()
def assess(self, options_selected):
"""
Create a self-assessment.
Create an assessment.
Args:
options_selected (list of int): list of the indices (starting from 0)
of each option to select in the rubric.
Returns:
AssessmentPage
Example usage:
>>> self_page.assess([0, 2, 1])
>>> page.assess([0, 2, 1])
"""
for criterion_num, option_num in enumerate(options_selected):
......@@ -109,7 +139,7 @@ class SelfAssessmentPage(OpenAssessmentPage):
)
self.q(css=sel).first.click()
self.submit()
EmptyPromise(lambda: self.has_submitted, 'Self assessment is complete').fulfill()
return self
@property
def response_text(self):
......@@ -119,17 +149,97 @@ class SelfAssessmentPage(OpenAssessmentPage):
Returns:
unicode
"""
return u" ".join(self.q(css=".self-assessment__display__response>p").text)
css_sel = ".{assessment_type}__display__response>p".format(
assessment_type=self._assessment_type
)
return u" ".join(self.q(css=css_sel).text)
def wait_for_complete(self):
"""
Wait until the assessment step is marked as complete.
Raises:
BrokenPromise
returns:
AssessmentPage
"""
EmptyPromise(lambda: self.is_complete, 'Assessment is complete').fulfill()
return self
def wait_for_response(self):
"""
Wait for response text to be available.
Raises:
BrokenPromise
Returns:
AssessmentPage
"""
EmptyPromise(
lambda: len(self.response_text) > 0,
"Has response text."
).fulfill()
return self
def wait_for_num_completed(self, num_completed):
"""
Wait for at least a certain number of assessments
to be completed.
Can only be used with peer-assessment and student-training.
Args:
num_completed (int): The number of assessments we expect
to be completed.
Raises:
PageConfigurationError
BrokenPromise
Returns:
AssessmentPage
"""
EmptyPromise(
lambda: self.num_completed >= num_completed,
"Completed at least one assessment."
).fulfill()
return self
@property
def has_submitted(self):
def is_complete(self):
"""
Check whether the assessment was submitted successfully.
Returns:
bool
"""
return self.q(css=".step--self-assessment.is--complete").is_present()
css_sel = ".step--{assessment_type}.is--complete".format(
assessment_type=self._assessment_type
)
return self.q(css=css_sel).is_present()
@property
def num_completed(self):
"""
Retrieve the number of completed assessments.
Can only be used for peer-assessment and student-training.
Returns:
int
Raises:
PageConfigurationError
"""
if self._assessment_type not in ['peer-assessment', 'student-training']:
msg = "Only peer assessment and student training steps can retrieve the number completed"
raise PageConfigurationError(msg)
candidates = [int(x) for x in self.q(css=".step__status__value--completed").text]
return candidates[0] if len(candidates) > 0 else None
class GradePage(OpenAssessmentPage):
......
"""
UI-level acceptance tests for OpenAssessment.
"""
import os
import unittest
import time
from functools import wraps
from bok_choy.web_app_test import WebAppTest
from bok_choy.promise import BrokenPromise
from auto_auth import AutoAuthPage
from pages import (
SubmissionPage, SelfAssessmentPage, GradePage
SubmissionPage, AssessmentPage, GradePage
)
def retry(tries=4, delay=3, backoff=2):
"""
Retry decorator with exponential backoff.
Kwargs:
tries (int): Maximum number of times to execute the function.
delay (int): Starting delay between retries.
backoff (int): Multiplier applied to the delay.
"""
def _decorator(func):
@wraps(func)
def _inner(*args, **kwargs):
_delay = delay
for attempt_num in range(tries):
try:
return func(*args, **kwargs)
except (BrokenPromise, AssertionError) as ex:
if attempt_num >= (tries - 1):
raise ex
else:
print "Test failed with {err}, retrying in {sec} seconds...".format(err=ex, sec=_delay)
time.sleep(_delay)
_delay *= backoff
return _inner
return _decorator
class OpenAssessmentTest(WebAppTest):
"""
UI-level acceptance tests for Open Assessment.
......@@ -17,44 +49,134 @@ class OpenAssessmentTest(WebAppTest):
PROBLEM_LOCATIONS = {
'self_only': u'courses/ora2/1/1/courseware/a4dfec19cf9b4a6fb5b18be6ccd9cecc/338a4affb58a45459629e0566291381e/',
'peer_only': u'courses/ora2/1/1/courseware/a4dfec19cf9b4a6fb5b18be6ccd9cecc/417e47b2663a4f79b62dba20b21628c8/',
'student_training': u'courses/ora2/1/1/courseware/676026889c884ac1827688750871c825/5663e9b038434636977a4226d668fe02/',
}
SUBMISSION = u"This is a test submission."
OPTIONS_SELECTED = [1, 2]
EXPECTED_SCORE = 6
def setUp(self):
def setUp(self, problem_type):
"""
Create an account registered for the test course and log in.
Configure page objects to test Open Assessment.
Args:
problem_type (str): The type of problem being tested,
used to choose which part of the course to load.
"""
super(OpenAssessmentTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID).visit()
problem_loc = self.PROBLEM_LOCATIONS[problem_type]
self.auto_auth_page = AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID)
self.submission_page = SubmissionPage(self.browser, problem_loc)
self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, problem_loc)
self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, problem_loc)
self.student_training_page = AssessmentPage('student-training', self.browser, problem_loc)
self.grade_page = GradePage(self.browser, problem_loc)
class SelfAssessmentTest(OpenAssessmentTest):
"""
Test the self-assessment flow.
"""
def setUp(self):
super(SelfAssessmentTest, self).setUp('self_only')
@retry()
def test_self_assessment(self):
"""
Test the self-only flow.
"""
submission_page = SubmissionPage(
self.browser,
self.PROBLEM_LOCATIONS['self_only']
).visit()
submission_page.submit_response(self.SUBMISSION)
self.assertTrue(submission_page.has_submitted)
self_assessment_page = SelfAssessmentPage(
self.browser,
self.PROBLEM_LOCATIONS['self_only']
).wait_for_page()
self.assertIn(self.SUBMISSION, self_assessment_page.response_text)
self_assessment_page.assess(self.OPTIONS_SELECTED)
self.assertTrue(self_assessment_page.has_submitted)
grade_page = GradePage(
self.browser,
self.PROBLEM_LOCATIONS['self_only']
).wait_for_page()
self.assertEqual(grade_page.score, self.EXPECTED_SCORE)
# Submit a response
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
# Submit a self-assessment
self.self_asmnt_page.wait_for_page().wait_for_response()
self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
self.self_asmnt_page.assess(self.OPTIONS_SELECTED).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
# Verify the grade
self.assertEqual(self.grade_page.wait_for_page().score, self.EXPECTED_SCORE)
class PeerAssessmentTest(OpenAssessmentTest):
"""
Test the peer-assessment flow.
It's complicated to guarantee that a student will both give and
receive enough assessments to receive a grade, so we stop
once we've given one peer assessment.
"""
def setUp(self):
super(PeerAssessmentTest, self).setUp('peer_only')
@retry()
def test_peer_assessment(self):
# Create a submission for the first student, so there's
# at least one submission to assess.
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Create a submission for the second student
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Assess the submission (there should be at least one available)
self.peer_asmnt_page.wait_for_page().wait_for_response().assess(self.OPTIONS_SELECTED)
# Check that the status indicates we've assessed one submission
try:
self.peer_asmnt_page.wait_for_num_completed(1)
except BrokenPromise:
self.fail("Did not complete at least one peer assessment.")
class StudentTrainingTest(OpenAssessmentTest):
"""
Test student training (the "learning to assess" step).
"""
# Select options that are correct so we can complete the flow.
STUDENT_TRAINING_OPTIONS = [
[1, 2],
[0, 2]
]
def setUp(self):
super(StudentTrainingTest, self).setUp('student_training')
@retry()
def test_student_training(self):
# Create a submission so we can get to student training
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Complete two training examples, satisfying the requirements
for example_num, options_selected in enumerate(self.STUDENT_TRAINING_OPTIONS):
try:
self.student_training_page.wait_for_num_completed(example_num)
except BrokenPromise:
msg = "Did not complete at least {num} student training example(s).".format(num=example_num)
self.fail(msg)
self.student_training_page.wait_for_page().wait_for_response().assess(options_selected)
# Check that we've completed student training
try:
self.student_training_page.wait_for_complete()
except BrokenPromise:
self.fail("Student training was not marked complete.")
if __name__ == "__main__":
# Configure the screenshot directory
if 'SCREENSHOT_DIR' not in os.environ:
tests_dir = os.path.dirname(__file__)
os.environ['SCREENSHOT_DIR'] = os.path.join(tests_dir, 'screenshots')
unittest.main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment