Commit d68d8238 by Eric Fischer

Refactor/cleanup performance tests

Prior code was very outdated and not at all useful.

With these changes, I was able to get a "load test" running against
my local devstack setup, with 20 concurrent users.
parent bd5bbe38
locustio==0.7.0 locustio==0.7.3
loremipsum==1.0.5 loremipsum==1.0.5
pyzmq==14.0.1 pyzmq==14.0.1
...@@ -8,11 +8,13 @@ Performance Tests ...@@ -8,11 +8,13 @@ Performance Tests
cd ora2 cd ora2
pip install -r requirements/perf.txt pip install -r requirements/perf.txt
2. Import ``course.tar.gz`` into Studio: 2. Import ``edx-ora2/scripts/data/course.tar.gz`` into Studio:
* Course ID: 1 Note that this is the same course that gets installed for acceptance testing
* Course Org: ora2
* Course Run: 1 * Course Id: ORA203
* Course Org: edx
* Course Run: course
3. Enable ``auto_auth`` in the LMS feature flags: 3. Enable ``auto_auth`` in the LMS feature flags:
...@@ -24,15 +26,13 @@ Performance Tests ...@@ -24,15 +26,13 @@ Performance Tests
} }
} }
4. Log in as a staff user and schedule a training task in the Course Staff Debug of the example based assessment problem. 4. **Optional**: Increase open file limit:
5. **Optional**: Increase open file limit:
.. code:: bash .. code:: bash
ulimit -n 2048 ulimit -n 2048
6. Start the Locust server, and point it at the test server. **NOTE**: You *must* include the trailing slash in the host URL. 5. Start the Locust server, and point it at the test server. **NOTE**: You *must* include the trailing slash in the host URL.
.. code:: bash .. code:: bash
......
""" """
Performance tests for the OpenAssessment XBlock. Performance test utility for the OpenAssessment XBlock.
""" """
import os import os
...@@ -8,8 +8,6 @@ import random ...@@ -8,8 +8,6 @@ import random
from collections import namedtuple from collections import namedtuple
import gevent import gevent
import loremipsum import loremipsum
from locust import HttpLocust, TaskSet, task
class OpenAssessmentPage(object): class OpenAssessmentPage(object):
...@@ -25,9 +23,9 @@ class OpenAssessmentPage(object): ...@@ -25,9 +23,9 @@ class OpenAssessmentPage(object):
PROBLEMS = { PROBLEMS = {
'peer_then_self': ProblemFixture( 'peer_then_self': ProblemFixture(
course_id="ora2/1/1", course_id="course-v1:edx+ORA203+course",
base_url= "courses/ora2/1/1/courseware/efa85eb090164a208d772a344df7181d/69f15a02c5af4e95b9c5525771b8f4ee/", base_url="courses/course-v1:edx+ORA203+course/xblock/block-v1:edx+ORA203+course+type@openassessment+block@47dc34e528f441f493db14a2cbdfa8b9/",
base_handler_url="courses/ora2/1/1/xblock/i4x:;_;_ora2;_1;_openassessment;_0e2bbf6cc89e45d98b028fa4e2d46314/handler/", base_handler_url="courses/course-v1:edx+ORA203+course/xblock/block-v1:edx+ORA203+course+type@openassessment+block@47dc34e528f441f493db14a2cbdfa8b9/handler/",
rubric_options={ rubric_options={
'Ideas': ['Poor', 'Fair', 'Good'], 'Ideas': ['Poor', 'Fair', 'Good'],
'Content': ['Poor', 'Fair', 'Good', 'Excellent'] 'Content': ['Poor', 'Fair', 'Good', 'Excellent']
...@@ -36,17 +34,6 @@ class OpenAssessmentPage(object): ...@@ -36,17 +34,6 @@ class OpenAssessmentPage(object):
'render_submission', 'render_peer_assessment', 'render_submission', 'render_peer_assessment',
'render_self_assessment', 'render_grade', 'render_self_assessment', 'render_grade',
] ]
),
'example_based': ProblemFixture(
course_id="ora2/1/1",
base_url="courses/ora2/1/1/courseware/efa85eb090164a208d772a344df7181d/fb039ef8a34641509190918ada79122a/",
base_handler_url="courses/ora2/1/1/xblock/i4x:;_;_ora2;_1;_openassessment;_8df3fa4de26747e0ad99b4157e45f5e5/handler/",
rubric_options={
'Ideas': ['Bad', 'Good'],
'Content': ['Bad', 'Good']
},
render_step_handlers=['render_submission', 'render_grade']
) )
} }
...@@ -105,7 +92,7 @@ class OpenAssessmentPage(object): ...@@ -105,7 +92,7 @@ class OpenAssessmentPage(object):
Submit a response. Submit a response.
""" """
payload = json.dumps({ payload = json.dumps({
'submission': u' '.join(loremipsum.get_paragraphs(random.randint(1, 10))), 'submission': [u' '.join(loremipsum.get_sentence())],
}) })
self.client.post(self.handler_url('submit'), data=payload, headers=self._post_headers, verify=False) self.client.post(self.handler_url('submit'), data=payload, headers=self._post_headers, verify=False)
...@@ -171,78 +158,3 @@ class OpenAssessmentPage(object): ...@@ -171,78 +158,3 @@ class OpenAssessmentPage(object):
'X-CSRFToken': self.client.cookies.get('csrftoken', ''), 'X-CSRFToken': self.client.cookies.get('csrftoken', ''),
'Referer': self.hostname 'Referer': self.hostname
} }
class OpenAssessmentTasks(TaskSet):
"""
Virtual user interactions with the OpenAssessment XBlock.
"""
def __init__(self, *args, **kwargs): # pylint: disable=W0613
"""
Initialize the task set.
"""
super(OpenAssessmentTasks, self).__init__(*args, **kwargs)
self.hostname = self.locust.host
self.page = None
@task
def peer_and_self(self):
"""
Test the peer-->self workflow.
"""
if self.page is None:
self.page = OpenAssessmentPage(self.hostname, self.client, 'peer_then_self') # pylint: disable=E1101
self.page.log_in()
if not self.page.logged_in:
self.page.log_in()
else:
self._submit_response()
# Randomly peer/self assess or log in as a new user.
# This should be sufficient to get students through
# the entire flow (satisfying the requirements for peer assessment).
action = random.randint(0, 100)
if action <= 80:
continue_grading = random.randint(0, 10) < 4
self.page.peer_assess(continue_grading=continue_grading)
self.page.self_assess()
else:
self.page.log_in()
@task
def example_based(self):
"""
Test example-based assessment only.
"""
if self.page is None:
self.page = OpenAssessmentPage(self.hostname, self.client, 'example_based') # pylint: disable=E1101
self.page.log_in()
if not self.page.logged_in:
self.page.log_in()
else:
self._submit_response()
if random.randint(0, 100) < 50:
self.page.log_in()
def _submit_response(self):
"""
Simulate the user loading the page, submitting a response,
then reloading the steps (usually triggered by AJAX).
If the user has already submitted, the handler will return
an error message in the JSON, but the HTTP status will still be 200.
"""
self.page.load_steps()
self.page.submit_response()
self.page.load_steps()
class OpenAssessmentLocust(HttpLocust):
"""
Performance test definition for the OpenAssessment XBlock.
"""
task_set = OpenAssessmentTasks
min_wait = 10000
max_wait = 15000
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import *
from locust import HttpLocust, TaskSet, task
class MakeManySubmissionsTasks(TaskSet):
"""
In order to test my migration's performance, I need some submissions in the table.
"""
def __init__(self, *args, **kwargs): # pylint: disable=W0613
"""
Initialize the task set.
"""
super(MakeManySubmissionsTasks, self).__init__(*args, **kwargs)
self.hostname = self.locust.host
self.page = None
@task
def generate_data(self):
"""
Puts some submissions in the database
"""
self.page = OpenAssessmentPage(self.hostname, self.client, 'peer_then_self') # pylint: disable=E1101
self.page.log_in()
# Submit something
self.page.submit_response()
class MakeSubDataLocust(HttpLocust):
"""
Performance test definition
"""
task_set = MakeManySubmissionsTasks
min_wait = 0
max_wait = 100
import ../base.py
from locust import HttpLocust, TaskSet, task
class PeerSelfAndExampleBasedTasks(TaskSet):
"""
Virtual user interactions with the OpenAssessment XBlock.
"""
def __init__(self, *args, **kwargs): # pylint: disable=W0613
"""
Initialize the task set.
"""
super(PeerSelfAndExampleBasedTasks, self).__init__(*args, **kwargs)
self.hostname = self.locust.host
self.page = None
@task
def peer_and_self(self):
"""
Test the peer-->self workflow.
"""
if self.page is None:
self.page = OpenAssessmentPage(self.hostname, self.client, 'peer_then_self') # pylint: disable=E1101
self.page.log_in()
if not self.page.logged_in:
self.page.log_in()
else:
self._submit_response()
# Randomly peer/self assess or log in as a new user.
# This should be sufficient to get students through
# the entire flow (satisfying the requirements for peer assessment).
action = random.randint(0, 100)
if action <= 80:
continue_grading = random.randint(0, 10) < 4
self.page.peer_assess(continue_grading=continue_grading)
self.page.self_assess()
else:
self.page.log_in()
@task
def example_based(self):
"""
Test example-based assessment only.
"""
if self.page is None:
self.page = OpenAssessmentPage(self.hostname, self.client, 'example_based') # pylint: disable=E1101
self.page.log_in()
if not self.page.logged_in:
self.page.log_in()
else:
self._submit_response()
if random.randint(0, 100) < 50:
self.page.log_in()
def _submit_response(self):
"""
Simulate the user loading the page, submitting a response,
then reloading the steps (usually triggered by AJAX).
If the user has already submitted, the handler will return
an error message in the JSON, but the HTTP status will still be 200.
"""
self.page.load_steps()
self.page.submit_response()
self.page.load_steps()
class OpenAssessmentLocust(HttpLocust):
"""
Performance test definition for the OpenAssessment XBlock.
"""
task_set = PeerSelfAndExampleBasedTasks
min_wait = 10000
max_wait = 15000
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment