Commit 80389526 by muzaffaryousaf

Adding the new command for performance testing.

Updating the create oa submissions command.

TNL-900
parent e470233d
......@@ -11,6 +11,7 @@ from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
STEPS = ['peer', 'self']
SELF_ASSESSMENT_REQUIRED = False # if you want to make self assessments then make this True.
class Command(BaseCommand):
......@@ -24,7 +25,7 @@ class Command(BaseCommand):
"""
help = 'Create dummy submissions and assessments'
args = '<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
args = '<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>'
# Number of peer assessments to create per submission
NUM_PEER_ASSESSMENTS = 3
......@@ -45,9 +46,10 @@ class Command(BaseCommand):
course_id (unicode): The ID of the course to create submissions for.
item_id (unicode): The ID of the item in the course to create submissions for.
num_submissions (int): Number of submissions to create.
percentage (int or float): Percentage for assessments to be made against submissions.
"""
if len(args) < 3:
raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>')
raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>')
course_id = unicode(args[0])
item_id = unicode(args[1])
......@@ -57,10 +59,18 @@ class Command(BaseCommand):
except ValueError:
raise CommandError('Number of submissions must be an integer')
try:
percentage = float(args[3])
assessments_to_create = (percentage / 100) * num_submissions
except ValueError:
raise CommandError('Percentage for completed submissions must be an integer or float')
print u"Creating {num} submissions for {item} in {course}".format(
num=num_submissions, item=item_id, course=course_id
)
assessments_created = 0
for sub_num in range(num_submissions):
print "Creating submission {num}".format(num=sub_num)
......@@ -80,7 +90,7 @@ class Command(BaseCommand):
# Create peer assessments
for num in range(self.NUM_PEER_ASSESSMENTS):
print "-- Creating peer-assessment {num}".format(num=num)
print "-- Creating peer-workflow {num}".format(num=num)
scorer_id = 'test_{num}'.format(num=num)
......@@ -93,22 +103,26 @@ class Command(BaseCommand):
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)
# Create the peer assessment
peer_api.create_assessment(
scorer_submission_uuid,
scorer_id,
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)),
rubric,
self.NUM_PEER_ASSESSMENTS
if assessments_created < assessments_to_create:
print "-- Creating peer-assessment {num}".format(num=num)
# Create the peer assessment
peer_api.create_assessment(
scorer_submission_uuid,
scorer_id,
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)),
rubric,
self.NUM_PEER_ASSESSMENTS
)
assessments_created += 1
if SELF_ASSESSMENT_REQUIRED:
# Create a self-assessment
print "-- Creating self assessment"
self_api.create_assessment(
submission_uuid, student_item['student_id'],
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)), rubric
)
# Create a self-assessment
print "-- Creating self assessment"
self_api.create_assessment(
submission_uuid, student_item['student_id'],
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)), rubric
)
print "%s assessments being completed for %s submissions" % (assessments_created, num_submissions)
@property
def student_items(self):
......
"""
Gives the time taken by
find_active_assessments
get_submission_for_review
get_submission_for_over_grading
methods for particular set of workflows.
"""
import random
import datetime
from django.core.management.base import BaseCommand
from openassessment.assessment.models import PeerWorkflow
class Command(BaseCommand):
"""
Note the time taken by queries.
"""
help = 'Test the performance for ' \
'find_active_assessments' \
'get_submission_for_review &' \
' get_submission_for_over_grading'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
"""
Execute the command.
Args:
None
"""
peer_workflow_count = PeerWorkflow.objects.filter(submission_uuid__isnull=False).count()
peer_workflow_ids = [random.randint(1, peer_workflow_count) for num in range(100)]
peer_workflows = list(PeerWorkflow.objects.filter(id__in=peer_workflow_ids))
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.find_active_assessments()
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (find_active_assessments) method Is: %s " % time_taken
#### get_submission_for_review ####
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.get_submission_for_review(2)
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (get_submission_for_review) method Is: %s " % time_taken
#### get_submission_for_over_grading ####
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.get_submission_for_over_grading()
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (get_submission_for_over_grading) method Is: %s " % time_taken
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment