Commit ae8b0fd0 by Will Daly

Remove unused submission ID from perf tests

Update perf test to load steps in parallel.
Update perf test to hit end-points whether they're ready or not
Generalize problem fixtures, select rubric options at random
Add continued assessment to peer step
Add example-based grading to perf test
Update test course and readme
parent b4a0f027
......@@ -11,7 +11,7 @@ Performance Tests
2. Import ``course.tar.gz`` into Studio:
* Course ID: 1
* Course Org: tim
* Course Org: ora2
* Course Run: 1
3. Enable ``auto_auth`` in the LMS feature flags:
......@@ -24,13 +24,15 @@ Performance Tests
}
}
4. **Optional**: Increase open file limit:
4. Log in as a staff user and schedule a training task in the Course Staff Debug of the example based assessment problem.
5. **Optional**: Increase open file limit:
.. code:: bash
ulimit -n 2048
5. Start the Locust server, and point it at the test server. **NOTE**: You *must* include the trailing slash in the host URL.
6. Start the Locust server, and point it at the test server. **NOTE**: You *must* include the trailing slash in the host URL.
.. code:: bash
......@@ -45,4 +47,4 @@ If your server has basic auth enabled, provide credentials with environment vars
cd performance
BASIC_AUTH_USER=foo BASIC_AUTH_PASSWORD=bar locust --host=http://example.com/
6. Visit the `Locust web UI <http://localhost:8089>`_ to start the test.
7. Visit the `Locust web UI <http://localhost:8089>`_ to start the test.
......@@ -5,45 +5,74 @@ Performance tests for the OpenAssessment XBlock.
import os
import json
import random
from lxml import etree
from collections import namedtuple
import gevent
import loremipsum
from locust import HttpLocust, TaskSet, task
class OpenAssessmentPage(object):
"""
Encapsulate interactions with the OpenAssessment XBlock's pages.
"""
# These assume that the course fixture has been installed
COURSE_ID = "tim/1/1"
BASE_URL = "courses/tim/1/1/courseware/efa85eb090164a208d772a344df7181d/69f15a02c5af4e95b9c5525771b8f4ee/"
BASE_HANDLER_URL = "courses/tim/1/1/xblock/i4x:;_;_tim;_1;_openassessment;_0e2bbf6cc89e45d98b028fa4e2d46314/handler/"
OPTIONS_SELECTED = {
"Ideas": "Good",
"Content": "Excellent",
ProblemFixture = namedtuple('ProblemFixture', [
'course_id', 'base_url', 'base_handler_url',
'rubric_options', 'render_step_handlers'
])
PROBLEMS = {
'peer_then_self': ProblemFixture(
course_id="ora2/1/1",
base_url= "courses/ora2/1/1/courseware/efa85eb090164a208d772a344df7181d/69f15a02c5af4e95b9c5525771b8f4ee/",
base_handler_url="courses/ora2/1/1/xblock/i4x:;_;_ora2;_1;_openassessment;_0e2bbf6cc89e45d98b028fa4e2d46314/handler/",
rubric_options={
'Ideas': ['Poor', 'Fair', 'Good'],
'Content': ['Poor', 'Fair', 'Good', 'Excellent']
},
render_step_handlers=[
'render_submission', 'render_peer_assessment',
'render_self_assessment', 'render_grade',
]
),
'example_based': ProblemFixture(
course_id="ora2/1/1",
base_url="courses/ora2/1/1/courseware/efa85eb090164a208d772a344df7181d/fb039ef8a34641509190918ada79122a/",
base_handler_url="courses/ora2/1/1/xblock/i4x:;_;_ora2;_1;_openassessment;_8df3fa4de26747e0ad99b4157e45f5e5/handler/",
rubric_options={
'Ideas': ['Bad', 'Good'],
'Content': ['Bad', 'Good']
},
render_step_handlers=['render_submission', 'render_grade']
)
}
def __init__(self, client):
def __init__(self, client, problem_name):
"""
Initialize the page to use specified HTTP client.
Args:
client (HttpSession): The HTTP client to use.
problem_name (unicode): Name of the problem (one of the keys in `OpenAssessmentPage.PROBLEMS`)
"""
self.client = client
self.problem_fixture = self.PROBLEMS[problem_name]
self.logged_in = False
# Configure basic auth
if 'BASIC_AUTH_USER' in os.environ and 'BASIC_AUTH_PASSWORD' in os.environ:
self.client.auth = (os.environ['BASIC_AUTH_USER'], os.environ['BASIC_AUTH_PASSWORD'])
self.step_resp_dict = dict()
def log_in(self):
"""
Log in as a unique user with access to the XBlock(s) under test.
"""
self.client.get("auto_auth", params={'course_id': self.COURSE_ID}, verify=False)
resp = self.client.get("auto_auth", params={'course_id': self.problem_fixture.course_id}, verify=False)
self.logged_in = (resp.status_code == 200)
return self
def load_steps(self):
......@@ -51,74 +80,47 @@ class OpenAssessmentPage(object):
Load all steps in the OpenAssessment flow.
"""
# Load the container page
self.client.get(self.BASE_URL, verify=False)
# Load each of the steps
step_dict = {
'submission': 'render_submission',
'peer': 'render_peer_assessment',
'self': 'render_self_assessment',
'grade': 'render_grade',
}
self.client.get(self.problem_fixture.base_url, verify=False)
self.step_resp_dict = {
name: self.client.get(self.handler_url(handler), verify=False)
for name, handler in step_dict.iteritems()
}
# Load each of the steps in parallel
get_unverified = lambda url: self.client.get(url, verify=False)
gevent.joinall([
gevent.spawn(get_unverified, url) for url in [
self.handler_url(handler)
for handler in self.problem_fixture.render_step_handlers
]
], timeout=0.5)
return self
def can_submit_response(self):
"""
Check whether we're allowed to submit a response.
Should be called after steps have been loaded.
Returns:
bool
"""
resp = self.step_resp_dict.get('submission')
return resp is not None and resp.content is not None and 'id="submission__answer__value"' in resp.content.lower()
def can_peer_assess(self):
"""
Check whether we're allowed to assess a peer.
Should be called after steps have been loaded.
Returns:
bool
"""
resp = self.step_resp_dict.get('peer')
return resp is not None and resp.content is not None and 'class="assessment__fields"' in resp.content.lower()
def can_self_assess(self):
"""
Check whether we're allowed to self-assess.
Should be called after steps have been loaded.
Returns:
bool
"""
resp = self.step_resp_dict.get('self')
return resp is not None and resp.content is not None and 'class="assessment__fields"' in resp.content.lower()
def submit_response(self):
"""
Submit a response.
"""
payload = json.dumps({
'submission': loremipsum.get_paragraphs(random.randint(1, 10)),
'submission': u' '.join(loremipsum.get_paragraphs(random.randint(1, 10))),
})
self.client.post(self.handler_url('submit'), data=payload, headers=self._post_headers, verify=False)
def peer_assess(self):
def peer_assess(self, continue_grading=False):
"""
Assess a peer.
Kwargs:
continue_grading (bool): If true, simulate "continued grading"
in which a student asks to assess peers in addition to the required number.
"""
payload = json.dumps({
'submission_uuid': self._submission_uuid('peer'),
'options_selected': self.OPTIONS_SELECTED,
'feedback': loremipsum.get_paragraphs(random.randint(1, 3)),
})
params = {
'options_selected': self._select_random_options(),
'overall_feedback': loremipsum.get_paragraphs(random.randint(1, 3)),
'criterion_feedback': {}
}
if continue_grading:
params['continue_grading'] = True
payload = json.dumps(params)
self.client.post(self.handler_url('peer_assess'), data=payload, headers=self._post_headers, verify=False)
def self_assess(self):
......@@ -126,8 +128,7 @@ class OpenAssessmentPage(object):
Complete a self-assessment.
"""
payload = json.dumps({
'submission_uuid': self._submission_uuid('self'),
'options_selected': self.OPTIONS_SELECTED,
'options_selected': self._select_random_options()
})
self.client.post(self.handler_url('self_assess'), data=payload, headers=self._post_headers, verify=False)
......@@ -141,30 +142,16 @@ class OpenAssessmentPage(object):
Returns:
str
"""
return "{base}{handler}".format(base=self.BASE_HANDLER_URL, handler=handler_name)
return "{base}{handler}".format(base=self.problem_fixture.base_handler_url, handler=handler_name)
def _submission_uuid(self, step):
def _select_random_options(self):
"""
Retrieve the submission UUID from the DOM.
Args:
step (str): Either "peer" or "self"
Returns:
str or None
"""
resp = self.step_resp_dict.get(step)
if resp is None:
return None
# There might be a faster way to do this
root = etree.fromstring(resp.content)
xpath_sel = "span[@id=\"{step}_submission_uuid\"]".format(step=step)
submission_id_el = root.find(xpath_sel)
if submission_id_el is not None:
return submission_id_el.text.strip()
else:
return None
Select random options for each criterion in the rubric.
"""
return {
criterion: random.choice(options)
for criterion, options in self.problem_fixture.rubric_options.iteritems()
}
@property
def _post_headers(self):
......@@ -178,40 +165,70 @@ class OpenAssessmentPage(object):
}
class OpenAssessmentTasks(TaskSet):
"""
Virtual user interactions with the OpenAssessment XBlock.
"""
def __init__(self, *args, **kwargs):
def __init__(self, *args, **kwargs): # pylint: disable=W0613
"""
Initialize the task set.
"""
super(OpenAssessmentTasks, self).__init__(*args, **kwargs)
self.page = OpenAssessmentPage(self.client)
self.page = None
def on_start(self):
@task
def peer_and_self(self):
"""
Log in as a unique user.
Test the peer-->self workflow.
"""
self.page.log_in()
if self.page is None:
self.page = OpenAssessmentPage(self.client, 'peer_then_self') # pylint: disable=E1101
self.page.log_in()
if not self.page.logged_in:
self.page.log_in()
else:
self._submit_response()
# Randomly peer/self assess or log in as a new user.
# This should be sufficient to get students through
# the entire flow (satisfying the requirements for peer assessment).
action = random.randint(0, 100)
if action <= 80:
continue_grading = random.randint(0, 10) < 4
self.page.peer_assess(continue_grading=continue_grading)
self.page.self_assess()
else:
self.page.log_in()
@task
def workflow(self):
def example_based(self):
"""
Submit a response, if we're allowed to.
Test example-based assessment only.
"""
self.page.load_steps()
if self.page.can_submit_response():
self.page.submit_response()
if self.page.can_peer_assess():
self.page.peer_assess()
if self.page is None:
self.page = OpenAssessmentPage(self.client, 'example_based') # pylint: disable=E1101
self.page.log_in()
if self.page.can_self_assess():
self.page.self_assess()
if not self.page.logged_in:
self.page.log_in()
else:
self._submit_response()
if random.randint(0, 100) < 50:
self.page.log_in()
def _submit_response(self):
"""
Simulate the user loading the page, submitting a response,
then reloading the steps (usually triggered by AJAX).
If the user has already submitted, the handler will return
an error message in the JSON, but the HTTP status will still be 200.
"""
self.page.load_steps()
self.page.submit_response()
self.page.load_steps()
class OpenAssessmentLocust(HttpLocust):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment