Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
80389526
Commit
80389526
authored
Dec 19, 2014
by
muzaffaryousaf
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Adding the new command for performance testing.
Updating the create oa submissions command. TNL-900
parent
e470233d
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
105 additions
and
18 deletions
+105
-18
openassessment/management/commands/create_oa_submissions.py
+32
-18
openassessment/management/commands/performance_test_for_submissions.py
+73
-0
No files found.
openassessment/management/commands/create_oa_submissions.py
View file @
80389526
...
...
@@ -11,6 +11,7 @@ from openassessment.assessment.api import peer as peer_api
from
openassessment.assessment.api
import
self
as
self_api
STEPS
=
[
'peer'
,
'self'
]
SELF_ASSESSMENT_REQUIRED
=
False
# if you want to make self assessments then make this True.
class
Command
(
BaseCommand
):
...
...
@@ -24,7 +25,7 @@ class Command(BaseCommand):
"""
help
=
'Create dummy submissions and assessments'
args
=
'<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
args
=
'<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>
<PERCENTAGE>
'
# Number of peer assessments to create per submission
NUM_PEER_ASSESSMENTS
=
3
...
...
@@ -45,9 +46,10 @@ class Command(BaseCommand):
course_id (unicode): The ID of the course to create submissions for.
item_id (unicode): The ID of the item in the course to create submissions for.
num_submissions (int): Number of submissions to create.
percentage (int or float): Percentage for assessments to be made against submissions.
"""
if
len
(
args
)
<
3
:
raise
CommandError
(
'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
)
raise
CommandError
(
'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>
<PERCENTAGE>
'
)
course_id
=
unicode
(
args
[
0
])
item_id
=
unicode
(
args
[
1
])
...
...
@@ -57,10 +59,18 @@ class Command(BaseCommand):
except
ValueError
:
raise
CommandError
(
'Number of submissions must be an integer'
)
try
:
percentage
=
float
(
args
[
3
])
assessments_to_create
=
(
percentage
/
100
)
*
num_submissions
except
ValueError
:
raise
CommandError
(
'Percentage for completed submissions must be an integer or float'
)
print
u"Creating {num} submissions for {item} in {course}"
.
format
(
num
=
num_submissions
,
item
=
item_id
,
course
=
course_id
)
assessments_created
=
0
for
sub_num
in
range
(
num_submissions
):
print
"Creating submission {num}"
.
format
(
num
=
sub_num
)
...
...
@@ -80,7 +90,7 @@ class Command(BaseCommand):
# Create peer assessments
for
num
in
range
(
self
.
NUM_PEER_ASSESSMENTS
):
print
"-- Creating peer-
assessment
{num}"
.
format
(
num
=
num
)
print
"-- Creating peer-
workflow
{num}"
.
format
(
num
=
num
)
scorer_id
=
'test_{num}'
.
format
(
num
=
num
)
...
...
@@ -93,22 +103,26 @@ class Command(BaseCommand):
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api
.
create_peer_workflow_item
(
scorer_submission_uuid
,
submission_uuid
)
# Create the peer assessment
peer_api
.
create_assessment
(
scorer_submission_uuid
,
scorer_id
,
options_selected
,
{},
" "
.
join
(
loremipsum
.
get_paragraphs
(
2
)),
rubric
,
self
.
NUM_PEER_ASSESSMENTS
if
assessments_created
<
assessments_to_create
:
print
"-- Creating peer-assessment {num}"
.
format
(
num
=
num
)
# Create the peer assessment
peer_api
.
create_assessment
(
scorer_submission_uuid
,
scorer_id
,
options_selected
,
{},
" "
.
join
(
loremipsum
.
get_paragraphs
(
2
)),
rubric
,
self
.
NUM_PEER_ASSESSMENTS
)
assessments_created
+=
1
if
SELF_ASSESSMENT_REQUIRED
:
# Create a self-assessment
print
"-- Creating self assessment"
self_api
.
create_assessment
(
submission_uuid
,
student_item
[
'student_id'
],
options_selected
,
{},
" "
.
join
(
loremipsum
.
get_paragraphs
(
2
)),
rubric
)
# Create a self-assessment
print
"-- Creating self assessment"
self_api
.
create_assessment
(
submission_uuid
,
student_item
[
'student_id'
],
options_selected
,
{},
" "
.
join
(
loremipsum
.
get_paragraphs
(
2
)),
rubric
)
print
"
%
s assessments being completed for
%
s submissions"
%
(
assessments_created
,
num_submissions
)
@property
def
student_items
(
self
):
...
...
openassessment/management/commands/performance_test_for_submissions.py
0 → 100644
View file @
80389526
"""
Gives the time taken by
find_active_assessments
get_submission_for_review
get_submission_for_over_grading
methods for particular set of workflows.
"""
import
random
import
datetime
from
django.core.management.base
import
BaseCommand
from
openassessment.assessment.models
import
PeerWorkflow
class
Command
(
BaseCommand
):
"""
Note the time taken by queries.
"""
help
=
'Test the performance for '
\
'find_active_assessments'
\
'get_submission_for_review &'
\
' get_submission_for_over_grading'
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
Command
,
self
)
.
__init__
(
*
args
,
**
kwargs
)
def
handle
(
self
,
*
args
,
**
options
):
"""
Execute the command.
Args:
None
"""
peer_workflow_count
=
PeerWorkflow
.
objects
.
filter
(
submission_uuid__isnull
=
False
)
.
count
()
peer_workflow_ids
=
[
random
.
randint
(
1
,
peer_workflow_count
)
for
num
in
range
(
100
)]
peer_workflows
=
list
(
PeerWorkflow
.
objects
.
filter
(
id__in
=
peer_workflow_ids
))
pw_dt_before
=
datetime
.
datetime
.
now
()
for
peer_workflow
in
peer_workflows
:
peer_workflow
.
find_active_assessments
()
pw_dt_after
=
datetime
.
datetime
.
now
()
time_taken
=
pw_dt_after
-
pw_dt_before
print
"Time taken by (find_active_assessments) method Is:
%
s "
%
time_taken
#### get_submission_for_review ####
pw_dt_before
=
datetime
.
datetime
.
now
()
for
peer_workflow
in
peer_workflows
:
peer_workflow
.
get_submission_for_review
(
2
)
pw_dt_after
=
datetime
.
datetime
.
now
()
time_taken
=
pw_dt_after
-
pw_dt_before
print
"Time taken by (get_submission_for_review) method Is:
%
s "
%
time_taken
#### get_submission_for_over_grading ####
pw_dt_before
=
datetime
.
datetime
.
now
()
for
peer_workflow
in
peer_workflows
:
peer_workflow
.
get_submission_for_over_grading
()
pw_dt_after
=
datetime
.
datetime
.
now
()
time_taken
=
pw_dt_after
-
pw_dt_before
print
"Time taken by (get_submission_for_over_grading) method Is:
%
s "
%
time_taken
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment