Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
ccace947
Commit
ccace947
authored
Mar 17, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add management command for populating the database with dummy records.
parent
8bfd03c5
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
242 additions
and
1 deletions
+242
-1
apps/openassessment/assessment/peer_api.py
+25
-0
apps/openassessment/management/__init__.py
+0
-0
apps/openassessment/management/commands/__init__.py
+0
-0
apps/openassessment/management/commands/create_oa_submissions.py
+168
-0
apps/openassessment/management/tests/__init__.py
+0
-0
apps/openassessment/management/tests/test_create_oa_submissions.py
+48
-0
requirements/base.txt
+1
-0
requirements/perf.txt
+0
-1
No files found.
apps/openassessment/assessment/peer_api.py
View file @
ccace947
...
...
@@ -16,6 +16,7 @@ from pytz import UTC
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
,
AssessmentFeedback
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
,
AssessmentFeedbackSerializer
)
from
submissions.api
import
get_submission_and_student
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
...
@@ -516,6 +517,30 @@ def create_peer_workflow(submission_uuid):
raise
PeerAssessmentInternalError
(
error_message
)
def
create_peer_workflow_item
(
scorer_id
,
submission_uuid
):
"""
Begin peer-assessing a particular submission.
Note that this does NOT pick the submission from the prioritized list of available submissions.
Mainly useful for testing.
Args:
scorer_id (str): The ID of the scoring student.
submission_uuid (str): The unique identifier of the submission being scored
Returns:
None
Raises:
PeerAssessmentWorkflowError: Could not find the workflow for the student.
PeerAssessmentInternalError: Could not create the peer workflow item.
"""
submission
=
get_submission_and_student
(
submission_uuid
)
student_item_dict
=
copy
.
copy
(
submission
[
'student_item'
])
student_item_dict
[
'student_id'
]
=
scorer_id
workflow
=
_get_latest_workflow
(
student_item_dict
)
_create_peer_workflow_item
(
workflow
,
submission_uuid
)
def
_get_latest_workflow
(
student_item_dict
):
"""Given a student item, return the current workflow for this student.
...
...
apps/openassessment/management/__init__.py
0 → 100644
View file @
ccace947
apps/openassessment/management/commands/__init__.py
0 → 100644
View file @
ccace947
apps/openassessment/management/commands/create_oa_submissions.py
0 → 100644
View file @
ccace947
"""
Create dummy submissions and assessments for testing.
"""
from
uuid
import
uuid4
import
copy
from
django.core.management.base
import
BaseCommand
,
CommandError
import
loremipsum
from
submissions
import
api
as
sub_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment
import
peer_api
,
self_api
class
Command
(
BaseCommand
):
"""
Create dummy submissions and assessments for testing.
This will generate fake (lorem ipsum) data for:
* Submission response text
* Assessment rubric definition
* Assessment rubric scores
* Assessment feedback
"""
help
=
'Create dummy submissions and assessments'
args
=
'<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
# Number of peer assessments to create per submission
NUM_PEER_ASSESSMENTS
=
3
# Number of criteria / options in each rubric
NUM_CRITERIA
=
5
NUM_OPTIONS
=
5
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
Command
,
self
)
.
__init__
(
*
args
,
**
kwargs
)
self
.
_student_items
=
list
()
def
handle
(
self
,
*
args
,
**
options
):
"""
Execute the command.
Args:
course_id (unicode): The ID of the course to create submissions for.
item_id (unicode): The ID of the item in the course to create submissions for.
num_submissions (int): Number of submissions to create.
"""
if
len
(
args
)
<
3
:
raise
CommandError
(
'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
)
course_id
=
unicode
(
args
[
0
])
item_id
=
unicode
(
args
[
1
])
try
:
num_submissions
=
int
(
args
[
2
])
except
ValueError
:
raise
CommandError
(
'Number of submissions must be an integer'
)
print
u"Creating {num} submissions for {item} in {course}"
.
format
(
num
=
num_submissions
,
item
=
item_id
,
course
=
course_id
)
for
sub_num
in
range
(
num_submissions
):
print
"Creating submission {num}"
.
format
(
num
=
sub_num
)
# Create a dummy submission
student_item
=
{
'student_id'
:
uuid4
()
.
hex
[
0
:
10
],
'course_id'
:
course_id
,
'item_id'
:
item_id
,
'item_type'
:
'openassessment'
}
submission_uuid
=
self
.
_create_dummy_submission
(
student_item
)
self
.
_student_items
.
append
(
student_item
)
# Create a dummy rubric
rubric
,
options_selected
=
self
.
_dummy_rubric
()
# Create peer assessments
for
num
in
range
(
self
.
NUM_PEER_ASSESSMENTS
):
print
"-- Creating peer-assessment {num}"
.
format
(
num
=
num
)
scorer_id
=
'test_{num}'
.
format
(
num
=
num
)
# The scorer needs to make a submission before assessing
scorer_student_item
=
copy
.
copy
(
student_item
)
scorer_student_item
[
'student_id'
]
=
scorer_id
self
.
_create_dummy_submission
(
scorer_student_item
)
# Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api
.
create_peer_workflow_item
(
scorer_id
,
submission_uuid
)
# Create the peer assessment
assessment
=
{
'options_selected'
:
options_selected
,
'feedback'
:
loremipsum
.
get_paragraphs
(
2
)
}
peer_api
.
create_assessment
(
submission_uuid
,
scorer_id
,
assessment
,
rubric
)
# Create a self-assessment
print
"-- Creating self assessment"
self_api
.
create_assessment
(
submission_uuid
,
student_item
[
'student_id'
],
options_selected
,
rubric
)
@property
def
student_items
(
self
):
"""
Return the list of student items created by the command.
This is used for testing the command.
Returns:
list of serialized StudentItem models
"""
return
self
.
_student_items
def
_create_dummy_submission
(
self
,
student_item
):
"""
Create a dummy submission for a student.
Args:
student_item (dict): Serialized StudentItem model.
Returns:
str: submission UUID
"""
submission
=
sub_api
.
create_submission
(
student_item
,
loremipsum
.
get_paragraphs
(
5
))
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
update_from_assessments
(
submission
[
'uuid'
],
{
'peer'
:
{
'must_grade'
:
1
,
'must_be_graded_by'
:
1
}}
)
return
submission
[
'uuid'
]
def
_dummy_rubric
(
self
):
"""
Randomly generate a rubric and select options from it.
Returns:
rubric (dict)
options_selected (dict)
"""
rubric
=
{
'criteria'
:
list
()}
options_selected
=
dict
()
words
=
loremipsum
.
Generator
()
.
words
for
criteria_num
in
range
(
self
.
NUM_CRITERIA
):
criterion
=
{
'name'
:
words
[
criteria_num
],
'prompt'
:
loremipsum
.
get_sentences
(
1
),
'order_num'
:
criteria_num
,
'options'
:
list
()
}
for
option_num
in
range
(
self
.
NUM_OPTIONS
):
criterion
[
'options'
]
.
append
({
'order_num'
:
option_num
,
'points'
:
option_num
,
'name'
:
words
[
option_num
],
'explanation'
:
loremipsum
.
get_sentences
(
1
)
})
rubric
[
'criteria'
]
.
append
(
criterion
)
options_selected
[
criterion
[
'name'
]]
=
criterion
[
'options'
][
0
][
'name'
]
return
rubric
,
options_selected
apps/openassessment/management/tests/__init__.py
0 → 100644
View file @
ccace947
apps/openassessment/management/tests/test_create_oa_submissions.py
0 → 100644
View file @
ccace947
"""
Tests for the management command that creates dummy submissions.
"""
from
submissions
import
api
as
sub_api
from
openassessment.assessment
import
peer_api
,
self_api
from
openassessment.management.commands
import
create_oa_submissions
from
django.test
import
TestCase
class
CreateSubmissionsTest
(
TestCase
):
def
test_create_submissions
(
self
):
# Create some submissions
cmd
=
create_oa_submissions
.
Command
()
cmd
.
handle
(
"test_course"
,
"test_item"
,
"5"
)
self
.
assertEqual
(
len
(
cmd
.
student_items
),
5
)
for
student_item
in
cmd
.
student_items
:
# Check that the student item was created for the right course / item
self
.
assertEqual
(
student_item
[
'course_id'
],
'test_course'
)
self
.
assertEqual
(
student_item
[
'item_id'
],
'test_item'
)
# Check that a submission was created
submissions
=
sub_api
.
get_submissions
(
student_item
)
self
.
assertEqual
(
len
(
submissions
),
1
)
self
.
assertGreater
(
len
(
submissions
[
0
][
'answer'
]),
0
)
# Check that peer and self assessments were created
assessments
=
peer_api
.
get_assessments
(
submissions
[
0
][
'uuid'
])
# Verify that the assessments exist and have content
# TODO: currently peer_api.get_assessments() returns both peer- and self-assessments
# When this call gets split, we'll need to update the test
self
.
assertEqual
(
len
(
assessments
),
cmd
.
NUM_PEER_ASSESSMENTS
+
1
)
for
assessment
in
assessments
:
self
.
assertGreater
(
assessment
[
'points_possible'
],
0
)
# Check that a self-assessment was created
submission
,
assessment
=
self_api
.
get_submission_and_assessment
(
submissions
[
0
][
'uuid'
])
# Verify that the assessment exists and has content
self
.
assertIsNot
(
submission
,
None
)
self
.
assertIsNot
(
assessment
,
None
)
self
.
assertGreater
(
assessment
[
'points_possible'
],
0
)
requirements/base.txt
View file @
ccace947
...
...
@@ -8,6 +8,7 @@ django==1.4.8
django-extensions==1.2.5
django-model-utils==1.4.0
djangorestframework==2.3.5
loremipsum==1.0.2
python-dateutil==2.1
pytz==2012h
South==0.7.6
requirements/perf.txt
View file @
ccace947
-r base.txt
locustio==0.7.0
loremipsum==1.0.2
pyzmq==14.0.1
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment