Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
e0f5a8aa
Commit
e0f5a8aa
authored
Mar 11, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add management command for creating test submissions and assessments
parent
5e67d1dc
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
297 additions
and
11 deletions
+297
-11
apps/openassessment/assessment/peer_api.py
+26
-0
apps/openassessment/assessment/test/test_peer.py
+1
-1
apps/openassessment/management/__init__.py
+0
-0
apps/openassessment/management/commands/__init__.py
+0
-0
apps/openassessment/management/commands/create_oa_submission.py
+172
-0
apps/openassessment/management/tests/__init__.py
+0
-0
apps/openassessment/management/tests/test_create_oa_submission.py
+84
-0
apps/openassessment/xblock/openassessmentblock.py
+12
-4
apps/openassessment/xblock/peer_assessment_mixin.py
+1
-6
requirements/base.txt
+1
-0
No files found.
apps/openassessment/assessment/peer_api.py
View file @
e0f5a8aa
...
@@ -16,6 +16,7 @@ from pytz import UTC
...
@@ -16,6 +16,7 @@ from pytz import UTC
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.serializers
import
(
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
)
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
)
from
submissions.api
import
get_submission_and_student
from
submissions.models
import
Submission
,
StudentItem
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
@@ -117,6 +118,7 @@ def create_assessment(
...
@@ -117,6 +118,7 @@ def create_assessment(
is required to create an assessment on a submission.
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
assessment contains points_earned, points_possible, and feedback.
rubric_dict (dict): A serialized Rubric model.
Kwargs:
Kwargs:
scored_at (datetime): Optional argument to override the time in which
scored_at (datetime): Optional argument to override the time in which
...
@@ -482,6 +484,30 @@ def create_peer_workflow(submission_uuid):
...
@@ -482,6 +484,30 @@ def create_peer_workflow(submission_uuid):
raise
PeerAssessmentInternalError
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
create_peer_workflow_item
(
scorer_id
,
submission_uuid
):
"""
Begin peer-assessing a particular submission.
Note that this does NOT pick the submission from the prioritized list of available submissions.
Mainly useful for testing.
Args:
scorer_id (str): The ID of the scoring student.
submission_uuid (str): The unique identifier of the submission being scored
Returns:
None
Raises:
PeerAssessmentWorkflowError: Could not find the workflow for the student.
PeerAssessmentInternalError: Could not create the peer workflow item.
"""
submission
=
get_submission_and_student
(
submission_uuid
)
student_item_dict
=
copy
.
copy
(
submission
[
'student_item'
])
student_item_dict
[
'student_id'
]
=
scorer_id
workflow
=
_get_latest_workflow
(
student_item_dict
)
_create_peer_workflow_item
(
workflow
,
submission_uuid
)
def
_get_latest_workflow
(
student_item_dict
):
def
_get_latest_workflow
(
student_item_dict
):
"""Given a student item, return the current workflow for this student.
"""Given a student item, return the current workflow for this student.
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
e0f5a8aa
...
@@ -254,7 +254,7 @@ class TestPeerApi(TestCase):
...
@@ -254,7 +254,7 @@ class TestPeerApi(TestCase):
"must_be_graded_by"
:
REQUIRED_GRADED_BY
,
"must_be_graded_by"
:
REQUIRED_GRADED_BY
,
}
}
}
}
# 1) Angel Submits
# 1) Angel Submits
angel_sub
,
angel
=
self
.
_create_student_and_submission
(
"Angel"
,
"Angel's answer"
)
angel_sub
,
angel
=
self
.
_create_student_and_submission
(
"Angel"
,
"Angel's answer"
)
...
...
apps/openassessment/management/__init__.py
0 → 100644
View file @
e0f5a8aa
apps/openassessment/management/commands/__init__.py
0 → 100644
View file @
e0f5a8aa
apps/openassessment/management/commands/create_oa_submission.py
0 → 100644
View file @
e0f5a8aa
"""
Create dummy submissions and assessments for testing.
"""
import
copy
from
optparse
import
make_option
from
django.core.management.base
import
BaseCommand
,
CommandError
import
loremipsum
from
submissions
import
api
as
sub_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment
import
peer_api
,
self_api
class
Command
(
BaseCommand
):
"""
Create dummy submissions and assessments for testing.
This will generate fake (lorem ipsum) data for:
* Submission response text
* Assessment rubric definition
* Assessment rubric scores
* Assessment feedback
"""
help
=
'Create dummy submissions and assessments'
args
=
'<USER_ID> <COURSE_ID> <ITEM_ID>'
option_list
=
BaseCommand
.
option_list
+
(
make_option
(
'-p'
,
'--peer-assessments'
,
dest
=
'num_peer_assessments'
,
action
=
'store'
,
default
=
0
,
type
=
int
,
help
=
'Number of peer assessments to create for the submission'
),
make_option
(
'-s'
,
'--self-assessment'
,
dest
=
'has_self_assessment'
,
action
=
'store_true'
,
default
=
False
,
help
=
'If true, create a self-assessment for the submission'
),
)
REQUIRED_NUM_ARGS
=
3
DUMMY_RUBRIC
=
{
'criteria'
:
[
{
'name'
:
"Ideas"
,
'prompt'
:
"Determine if there is a unifying theme or main idea."
,
'order_num'
:
0
,
'options'
:
[
{
'order_num'
:
0
,
'points'
:
0
,
'name'
:
'Poor'
,
'explanation'
:
"""Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus."""
},
{
'order_num'
:
1
,
'points'
:
3
,
'name'
:
'Fair'
,
'explanation'
:
"""Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task."""
},
{
'order_num'
:
2
,
'points'
:
5
,
'name'
:
'Good'
,
'explanation'
:
"""Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task."""
},
],
},
{
'name'
:
"Content"
,
'prompt'
:
"Assess the content of the submission"
,
'order_num'
:
1
,
'options'
:
[
{
'order_num'
:
0
,
'points'
:
0
,
'name'
:
'Poor'
,
'explanation'
:
"""Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic."""
},
{
'order_num'
:
1
,
'points'
:
1
,
'name'
:
'Fair'
,
'explanation'
:
"""Includes little information and few or no details.
Explores only one or two facets of the topic."""
},
{
'order_num'
:
2
,
'points'
:
3
,
'name'
:
'Good'
,
'explanation'
:
"""Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic."""
},
{
'order_num'
:
3
,
'points'
:
3
,
'name'
:
'Excellent'
,
'explanation'
:
"""Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic."""
},
],
},
]
}
def
handle
(
self
,
*
args
,
**
options
):
"""
Execute the command.
Args:
user_id (str): Unique ID of the user creating the submission.
course_id (str): Unique ID of the course in which to create the submission.
item_id (str): Unique ID of the item in the course for which to create the submission.
Kwargs:
num_peer_assessments (int): Number of peer assessments to create for the submission.
has_self_assessment (bool): If true, create a self-assessment for the submission.
"""
# Verify that we have the correct number of positional args
if
len
(
args
)
<
self
.
REQUIRED_NUM_ARGS
:
raise
CommandError
(
'Usage: create_oa_submission <USER_ID> <COURSE_ID> <ITEM_ID>'
)
# Create the submission
student_item
=
{
'student_id'
:
args
[
0
],
'course_id'
:
args
[
1
],
'item_id'
:
args
[
2
],
'item_type'
:
'openassessment'
}
submission_uuid
=
self
.
_create_dummy_submission
(
student_item
)
# Create peer assessments
for
num
in
range
(
options
[
'num_peer_assessments'
]):
scorer_id
=
'test_{num}'
.
format
(
num
=
num
)
# The scorer needs to make a submission before assessing
scorer_student_item
=
copy
.
copy
(
student_item
)
scorer_student_item
[
'student_id'
]
=
scorer_id
self
.
_create_dummy_submission
(
scorer_student_item
)
# Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api
.
create_peer_workflow_item
(
scorer_id
,
submission_uuid
)
# Create the peer assessment
assessment
=
{
'options_selected'
:
{
'Ideas'
:
'Poor'
,
'Content'
:
'Good'
},
'feedback'
:
loremipsum
.
get_paragraphs
(
2
)
}
peer_api
.
create_assessment
(
submission_uuid
,
scorer_id
,
assessment
,
self
.
DUMMY_RUBRIC
)
# Create self-assessment
if
options
[
'has_self_assessment'
]:
self_api
.
create_assessment
(
submission_uuid
,
student_item
[
'student_id'
],
{
'Ideas'
:
'Good'
,
'Content'
:
'Excellent'
},
self
.
DUMMY_RUBRIC
)
def
_create_dummy_submission
(
self
,
student_item
):
"""
Create a dummy submission for a student.
Args:
student_item (dict): Serialized StudentItem model.
Returns:
str: submission UUID
"""
submission
=
sub_api
.
create_submission
(
student_item
,
loremipsum
.
get_paragraphs
(
5
))
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
update_from_assessments
(
submission
[
'uuid'
],
{
'peer'
:
{
'must_grade'
:
1
,
'must_be_graded_by'
:
1
}}
)
return
submission
[
'uuid'
]
\ No newline at end of file
apps/openassessment/management/tests/__init__.py
0 → 100644
View file @
e0f5a8aa
apps/openassessment/management/tests/test_create_oa_submission.py
0 → 100644
View file @
e0f5a8aa
"""
Tests for the management command that creates dummy submissions.
"""
import
copy
from
submissions
import
api
as
sub_api
from
openassessment.assessment
import
peer_api
,
self_api
from
django.core.management
import
call_command
from
django.test
import
TestCase
class
CreateScenarioTest
(
TestCase
):
def
test_create_submission
(
self
):
call_command
(
'create_oa_submission'
,
'test_user'
,
'test_course'
,
'test_problem'
)
submissions
=
sub_api
.
get_submissions
({
'student_id'
:
'test_user'
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_problem'
,
'item_type'
:
'openassessment'
,
})
self
.
assertEqual
(
len
(
submissions
),
1
)
self
.
assertGreater
(
len
(
submissions
[
0
][
'answer'
]),
0
)
def
test_create_peer_assessments
(
self
):
# Create a submission with peer assessments
call_command
(
'create_oa_submission'
,
'test_user'
,
'test_course'
,
'test_problem'
,
num_peer_assessments
=
2
)
# Retrieve the submission
submissions
=
sub_api
.
get_submissions
({
'student_id'
:
'test_user'
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_problem'
,
'item_type'
:
'openassessment'
,
},
limit
=
1
)
self
.
assertEqual
(
len
(
submissions
),
1
)
# Retrieve the peer assessments
assessments
=
peer_api
.
get_assessments
(
submissions
[
0
][
'uuid'
])
# Verify that the assessments exist and have content
self
.
assertEqual
(
len
(
assessments
),
2
)
for
assessment
in
assessments
:
self
.
assertGreater
(
assessment
[
'points_possible'
],
0
)
self
.
assertGreater
(
len
(
assessment
[
'feedback'
]),
0
)
def
test_create_self_assessment
(
self
):
# Create a submission with a self-assessment
call_command
(
'create_oa_submission'
,
'test_user'
,
'test_course'
,
'test_problem'
,
has_self_assessment
=
True
)
# Retrieve the self assessment
submission
,
assessment
=
self_api
.
get_submission_and_assessment
({
'student_id'
:
'test_user'
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_problem'
,
'item_type'
:
'openassessment'
,
})
# Verify that the assessment exists and has content
self
.
assertIsNot
(
submission
,
None
)
self
.
assertIsNot
(
assessment
,
None
)
self
.
assertGreater
(
assessment
[
'points_possible'
],
0
)
def
test_missing_args
(
self
):
full_args
=
[
'test_user'
,
'test_course'
,
'test_problem'
]
# Delete each required arg and verify that the command fails
for
index
in
range
(
len
(
full_args
)):
args
=
copy
.
copy
(
full_args
)
del
args
[
index
]
with
self
.
assertRaises
(
SystemExit
)
as
ex
:
call_command
(
'create_oa_submission'
,
*
args
)
self
.
assertEqual
(
ex
.
exception
.
code
,
1
)
\ No newline at end of file
apps/openassessment/xblock/openassessmentblock.py
View file @
e0f5a8aa
...
@@ -71,7 +71,7 @@ DEFAULT_RUBRIC_CRITERIA = [
...
@@ -71,7 +71,7 @@ DEFAULT_RUBRIC_CRITERIA = [
{
{
'name'
:
"Content"
,
'name'
:
"Content"
,
'prompt'
:
"Assess the content of the submission"
,
'prompt'
:
"Assess the content of the submission"
,
'order_num'
:
0
,
'order_num'
:
1
,
'options'
:
[
'options'
:
[
{
{
'order_num'
:
0
,
'points'
:
0
,
'name'
:
'Poor'
,
'order_num'
:
0
,
'points'
:
0
,
'name'
:
'Poor'
,
...
@@ -80,18 +80,18 @@ DEFAULT_RUBRIC_CRITERIA = [
...
@@ -80,18 +80,18 @@ DEFAULT_RUBRIC_CRITERIA = [
facets of the topic."""
facets of the topic."""
},
},
{
{
'order_num'
:
0
,
'points'
:
1
,
'name'
:
'Fair'
,
'order_num'
:
1
,
'points'
:
1
,
'name'
:
'Fair'
,
'explanation'
:
"""Includes little information and few or no details.
'explanation'
:
"""Includes little information and few or no details.
Explores only one or two facets of the topic."""
Explores only one or two facets of the topic."""
},
},
{
{
'order_num'
:
0
,
'points'
:
3
,
'name'
:
'Good'
,
'order_num'
:
2
,
'points'
:
3
,
'name'
:
'Good'
,
'explanation'
:
"""Includes sufficient information and supporting
'explanation'
:
"""Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic."""
listed.) Explores some facets of the topic."""
},
},
{
{
'order_num'
:
0
,
'points'
:
3
,
'name'
:
'Excellent'
,
'order_num'
:
3
,
'points'
:
3
,
'name'
:
'Excellent'
,
'explanation'
:
"""Includes in-depth information and exceptional
'explanation'
:
"""Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
supporting details that are fully developed. Explores all
facets of the topic."""
facets of the topic."""
...
@@ -467,3 +467,10 @@ class OpenAssessmentBlock(
...
@@ -467,3 +467,10 @@ class OpenAssessmentBlock(
}
}
}
}
return
workflow_api
.
update_from_assessments
(
submission_uuid
,
requirements
)
return
workflow_api
.
update_from_assessments
(
submission_uuid
,
requirements
)
def
get_assessment_module
(
self
,
mixin_name
):
"""Get a configured assessment module by name.
"""
for
assessment
in
self
.
rubric_assessments
:
if
assessment
[
"name"
]
==
mixin_name
:
return
assessment
\ No newline at end of file
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
e0f5a8aa
...
@@ -150,9 +150,4 @@ class PeerAssessmentMixin(object):
...
@@ -150,9 +150,4 @@ class PeerAssessmentMixin(object):
logger
.
exception
(
err
)
logger
.
exception
(
err
)
return
peer_submission
return
peer_submission
def
get_assessment_module
(
self
,
mixin_name
):
"""Get a configured assessment module by name.
"""
for
assessment
in
self
.
rubric_assessments
:
if
assessment
[
"name"
]
==
mixin_name
:
return
assessment
requirements/base.txt
View file @
e0f5a8aa
...
@@ -8,6 +8,7 @@ django==1.4.8
...
@@ -8,6 +8,7 @@ django==1.4.8
django-extensions==1.2.5
django-extensions==1.2.5
django-model-utils==1.4.0
django-model-utils==1.4.0
djangorestframework==2.3.5
djangorestframework==2.3.5
loremipsum==1.0.2
Mako==0.9.1
Mako==0.9.1
python-dateutil==2.1
python-dateutil==2.1
pytz==2012h
pytz==2012h
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment