Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
269a465a
Commit
269a465a
authored
Mar 12, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #115 from edx/will/revert-management-cmd
Revert management cmd
parents
30b77ac9
2a21df49
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
1 additions
and
288 deletions
+1
-288
apps/openassessment/assessment/peer_api.py
+0
-26
apps/openassessment/assessment/test/test_peer.py
+1
-1
apps/openassessment/management/__init__.py
+0
-0
apps/openassessment/management/commands/__init__.py
+0
-0
apps/openassessment/management/commands/create_oa_submission.py
+0
-172
apps/openassessment/management/tests/__init__.py
+0
-0
apps/openassessment/management/tests/test_create_oa_submission.py
+0
-88
requirements/base.txt
+0
-1
No files found.
apps/openassessment/assessment/peer_api.py
View file @
269a465a
...
...
@@ -16,7 +16,6 @@ from pytz import UTC
from
openassessment.assessment.models
import
Assessment
,
InvalidOptionSelection
,
PeerWorkflow
,
PeerWorkflowItem
from
openassessment.assessment.serializers
import
(
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
)
from
submissions.api
import
get_submission_and_student
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
...
@@ -118,7 +117,6 @@ def create_assessment(
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
rubric_dict (dict): A serialized Rubric model.
Kwargs:
scored_at (datetime): Optional argument to override the time in which
...
...
@@ -484,30 +482,6 @@ def create_peer_workflow(submission_uuid):
raise
PeerAssessmentInternalError
(
error_message
)
def
create_peer_workflow_item
(
scorer_id
,
submission_uuid
):
"""
Begin peer-assessing a particular submission.
Note that this does NOT pick the submission from the prioritized list of available submissions.
Mainly useful for testing.
Args:
scorer_id (str): The ID of the scoring student.
submission_uuid (str): The unique identifier of the submission being scored
Returns:
None
Raises:
PeerAssessmentWorkflowError: Could not find the workflow for the student.
PeerAssessmentInternalError: Could not create the peer workflow item.
"""
submission
=
get_submission_and_student
(
submission_uuid
)
student_item_dict
=
copy
.
copy
(
submission
[
'student_item'
])
student_item_dict
[
'student_id'
]
=
scorer_id
workflow
=
_get_latest_workflow
(
student_item_dict
)
_create_peer_workflow_item
(
workflow
,
submission_uuid
)
def
_get_latest_workflow
(
student_item_dict
):
"""Given a student item, return the current workflow for this student.
...
...
apps/openassessment/assessment/test/test_peer.py
View file @
269a465a
...
...
@@ -254,7 +254,7 @@ class TestPeerApi(TestCase):
"must_be_graded_by"
:
REQUIRED_GRADED_BY
,
}
}
# 1) Angel Submits
angel_sub
,
angel
=
self
.
_create_student_and_submission
(
"Angel"
,
"Angel's answer"
)
...
...
apps/openassessment/management/__init__.py
deleted
100644 → 0
View file @
30b77ac9
apps/openassessment/management/commands/__init__.py
deleted
100644 → 0
View file @
30b77ac9
apps/openassessment/management/commands/create_oa_submission.py
deleted
100644 → 0
View file @
30b77ac9
"""
Create dummy submissions and assessments for testing.
"""
import
copy
from
optparse
import
make_option
from
django.core.management.base
import
BaseCommand
,
CommandError
import
loremipsum
from
submissions
import
api
as
sub_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment
import
peer_api
,
self_api
class
Command
(
BaseCommand
):
"""
Create dummy submissions and assessments for testing.
This will generate fake (lorem ipsum) data for:
* Submission response text
* Assessment rubric definition
* Assessment rubric scores
* Assessment feedback
"""
help
=
'Create dummy submissions and assessments'
args
=
'<USER_ID> <COURSE_ID> <ITEM_ID>'
option_list
=
BaseCommand
.
option_list
+
(
make_option
(
'-p'
,
'--peer-assessments'
,
dest
=
'num_peer_assessments'
,
action
=
'store'
,
default
=
0
,
type
=
int
,
help
=
'Number of peer assessments to create for the submission'
),
make_option
(
'-s'
,
'--self-assessment'
,
dest
=
'has_self_assessment'
,
action
=
'store_true'
,
default
=
False
,
help
=
'If true, create a self-assessment for the submission'
),
)
REQUIRED_NUM_ARGS
=
3
DUMMY_RUBRIC
=
{
'criteria'
:
[
{
'name'
:
"Ideas"
,
'prompt'
:
"Determine if there is a unifying theme or main idea."
,
'order_num'
:
0
,
'options'
:
[
{
'order_num'
:
0
,
'points'
:
0
,
'name'
:
'Poor'
,
'explanation'
:
"""Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus."""
},
{
'order_num'
:
1
,
'points'
:
3
,
'name'
:
'Fair'
,
'explanation'
:
"""Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task."""
},
{
'order_num'
:
2
,
'points'
:
5
,
'name'
:
'Good'
,
'explanation'
:
"""Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task."""
},
],
},
{
'name'
:
"Content"
,
'prompt'
:
"Assess the content of the submission"
,
'order_num'
:
1
,
'options'
:
[
{
'order_num'
:
0
,
'points'
:
0
,
'name'
:
'Poor'
,
'explanation'
:
"""Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic."""
},
{
'order_num'
:
1
,
'points'
:
1
,
'name'
:
'Fair'
,
'explanation'
:
"""Includes little information and few or no details.
Explores only one or two facets of the topic."""
},
{
'order_num'
:
2
,
'points'
:
3
,
'name'
:
'Good'
,
'explanation'
:
"""Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic."""
},
{
'order_num'
:
3
,
'points'
:
3
,
'name'
:
'Excellent'
,
'explanation'
:
"""Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic."""
},
],
},
]
}
def
handle
(
self
,
*
args
,
**
options
):
"""
Execute the command.
Args:
user_id (str): Unique ID of the user creating the submission.
course_id (str): Unique ID of the course in which to create the submission.
item_id (str): Unique ID of the item in the course for which to create the submission.
Kwargs:
num_peer_assessments (int): Number of peer assessments to create for the submission.
has_self_assessment (bool): If true, create a self-assessment for the submission.
"""
# Verify that we have the correct number of positional args
if
len
(
args
)
<
self
.
REQUIRED_NUM_ARGS
:
raise
CommandError
(
'Usage: create_oa_submission <USER_ID> <COURSE_ID> <ITEM_ID>'
)
# Create the submission
student_item
=
{
'student_id'
:
args
[
0
],
'course_id'
:
args
[
1
],
'item_id'
:
args
[
2
],
'item_type'
:
'openassessment'
}
submission_uuid
=
self
.
_create_dummy_submission
(
student_item
)
# Create peer assessments
for
num
in
range
(
options
[
'num_peer_assessments'
]):
scorer_id
=
'test_{num}'
.
format
(
num
=
num
)
# The scorer needs to make a submission before assessing
scorer_student_item
=
copy
.
copy
(
student_item
)
scorer_student_item
[
'student_id'
]
=
scorer_id
self
.
_create_dummy_submission
(
scorer_student_item
)
# Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api
.
create_peer_workflow_item
(
scorer_id
,
submission_uuid
)
# Create the peer assessment
assessment
=
{
'options_selected'
:
{
'Ideas'
:
'Poor'
,
'Content'
:
'Good'
},
'feedback'
:
loremipsum
.
get_paragraphs
(
2
)
}
peer_api
.
create_assessment
(
submission_uuid
,
scorer_id
,
assessment
,
self
.
DUMMY_RUBRIC
)
# Create self-assessment
if
options
[
'has_self_assessment'
]:
self_api
.
create_assessment
(
submission_uuid
,
student_item
[
'student_id'
],
{
'Ideas'
:
'Good'
,
'Content'
:
'Excellent'
},
self
.
DUMMY_RUBRIC
)
def
_create_dummy_submission
(
self
,
student_item
):
"""
Create a dummy submission for a student.
Args:
student_item (dict): Serialized StudentItem model.
Returns:
str: submission UUID
"""
submission
=
sub_api
.
create_submission
(
student_item
,
loremipsum
.
get_paragraphs
(
5
))
workflow_api
.
create_workflow
(
submission
[
'uuid'
])
workflow_api
.
update_from_assessments
(
submission
[
'uuid'
],
{
'peer'
:
{
'must_grade'
:
1
,
'must_be_graded_by'
:
1
}}
)
return
submission
[
'uuid'
]
\ No newline at end of file
apps/openassessment/management/tests/__init__.py
deleted
100644 → 0
View file @
30b77ac9
apps/openassessment/management/tests/test_create_oa_submission.py
deleted
100644 → 0
View file @
30b77ac9
"""
Tests for the management command that creates dummy submissions.
"""
import
copy
from
submissions
import
api
as
sub_api
from
openassessment.assessment
import
peer_api
,
self_api
from
django.core.management
import
call_command
from
django.test
import
TestCase
class
CreateScenarioTest
(
TestCase
):
def
test_create_submission
(
self
):
call_command
(
'create_oa_submission'
,
'test_user'
,
'test_course'
,
'test_problem'
)
submissions
=
sub_api
.
get_submissions
({
'student_id'
:
'test_user'
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_problem'
,
'item_type'
:
'openassessment'
,
})
self
.
assertEqual
(
len
(
submissions
),
1
)
self
.
assertGreater
(
len
(
submissions
[
0
][
'answer'
]),
0
)
def
test_create_peer_assessments
(
self
):
# Create a submission with peer assessments
call_command
(
'create_oa_submission'
,
'test_user'
,
'test_course'
,
'test_problem'
,
num_peer_assessments
=
2
)
# Retrieve the submission
submissions
=
sub_api
.
get_submissions
({
'student_id'
:
'test_user'
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_problem'
,
'item_type'
:
'openassessment'
,
},
limit
=
1
)
self
.
assertEqual
(
len
(
submissions
),
1
)
# Retrieve the peer assessments
assessments
=
peer_api
.
get_assessments
(
submissions
[
0
][
'uuid'
])
# Verify that the assessments exist and have content
self
.
assertEqual
(
len
(
assessments
),
2
)
for
assessment
in
assessments
:
self
.
assertGreater
(
assessment
[
'points_possible'
],
0
)
self
.
assertGreater
(
len
(
assessment
[
'feedback'
]),
0
)
def
test_create_self_assessment
(
self
):
# Create a submission with a self-assessment
call_command
(
'create_oa_submission'
,
'test_user'
,
'test_course'
,
'test_problem'
,
has_self_assessment
=
True
)
# Retrieve the submission
submissions
=
sub_api
.
get_submissions
({
'student_id'
:
'test_user'
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_problem'
,
'item_type'
:
'openassessment'
,
},
limit
=
1
)
self
.
assertEqual
(
len
(
submissions
),
1
)
# Retrieve the self assessment
submission
,
assessment
=
self_api
.
get_submission_and_assessment
(
submissions
[
0
][
'uuid'
])
# Verify that the assessment exists and has content
self
.
assertIsNot
(
submission
,
None
)
self
.
assertIsNot
(
assessment
,
None
)
self
.
assertGreater
(
assessment
[
'points_possible'
],
0
)
def
test_missing_args
(
self
):
full_args
=
[
'test_user'
,
'test_course'
,
'test_problem'
]
# Delete each required arg and verify that the command fails
for
index
in
range
(
len
(
full_args
)):
args
=
copy
.
copy
(
full_args
)
del
args
[
index
]
with
self
.
assertRaises
(
SystemExit
)
as
ex
:
call_command
(
'create_oa_submission'
,
*
args
)
self
.
assertEqual
(
ex
.
exception
.
code
,
1
)
\ No newline at end of file
requirements/base.txt
View file @
269a465a
...
...
@@ -8,7 +8,6 @@ django==1.4.8
django-extensions==1.2.5
django-model-utils==1.4.0
djangorestframework==2.3.5
loremipsum==1.0.2
Mako==0.9.1
python-dateutil==2.1
pytz==2012h
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment