Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
58346996
Commit
58346996
authored
May 29, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #374 from edx/will/simulate-grading-error-cmd
Will/simulate grading error cmd
parents
1bef091d
9347baf3
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
164 additions
and
3 deletions
+164
-3
apps/openassessment/management/commands/simulate_ai_grading_error.py
+121
-0
apps/openassessment/management/commands/upload_oa_data.py
+1
-1
apps/openassessment/management/tests/test_simulate_ai_grading_error.py
+40
-0
apps/openassessment/management/tests/test_upload_oa_data.py
+2
-2
No files found.
apps/openassessment/management/commands/simulate_ai_grading_error.py
0 → 100644
View file @
58346996
# -*- coding: utf-8 -*-
"""
Simulate failure of the worker AI grading tasks.
When the workers fail to successfully complete AI grading,
the AI grading workflow in the database will never be marked complete.
To simulate the error condition, therefore, we create incomplete
AI grading workflows without scheduling a grading task.
To recover, a staff member can reschedule incomplete grading tasks.
"""
from
django.core.management.base
import
BaseCommand
,
CommandError
from
submissions
import
api
as
sub_api
from
openassessment.assessment.models
import
AIGradingWorkflow
,
AIClassifierSet
from
openassessment.assessment.serializers
import
rubric_from_dict
class
Command
(
BaseCommand
):
"""
Create submissions and AI incomplete grading workflows.
"""
help
=
(
u"Simulate failure of the worker AI grading tasks "
u"by creating incomplete AI grading workflows in the database."
)
args
=
'<COURSE_ID> <PROBLEM_ID> <NUM_SUBMISSIONS>'
RUBRIC_OPTIONS
=
[
{
"order_num"
:
0
,
"name"
:
u"poor"
,
"explanation"
:
u"Poor job!"
,
"points"
:
0
,
},
{
"order_num"
:
1
,
"name"
:
u"good"
,
"explanation"
:
u"Good job!"
,
"points"
:
1
,
}
]
RUBRIC
=
{
'prompt'
:
u"Test prompt"
,
'criteria'
:
[
{
"order_num"
:
0
,
"name"
:
u"vocabulary"
,
"prompt"
:
u"Vocabulary"
,
"options"
:
RUBRIC_OPTIONS
},
{
"order_num"
:
1
,
"name"
:
u"grammar"
,
"prompt"
:
u"Grammar"
,
"options"
:
RUBRIC_OPTIONS
}
]
}
# Since we're not actually running an AI scoring algorithm,
# we can use dummy data for the classifier, as long as it's
# JSON-serializable.
CLASSIFIERS
=
{
u'vocabulary'
:
{},
u'grammar'
:
{}
}
ALGORITHM_ID
=
u'fake'
STUDENT_ID
=
u'test_student'
ANSWER
=
{
'answer'
:
'test answer'
}
def
handle
(
self
,
*
args
,
**
options
):
"""
Execute the command.
Args:
course_id (unicode): The ID of the course to create submissions/workflows in.
item_id (unicode): The ID of the problem in the course.
num_submissions (int): The number of submissions/workflows to create.
Raises:
CommandError
"""
if
len
(
args
)
<
3
:
raise
CommandError
(
u"Usage: simulate_ai_grading_error {}"
.
format
(
self
.
args
))
# Parse arguments
course_id
=
args
[
0
]
.
decode
(
'utf-8'
)
item_id
=
args
[
1
]
.
decode
(
'utf-8'
)
num_submissions
=
int
(
args
[
2
])
# Create the rubric model
rubric
=
rubric_from_dict
(
self
.
RUBRIC
)
# Create the classifier set
classifier_set
=
AIClassifierSet
.
create_classifier_set
(
self
.
CLASSIFIERS
,
rubric
,
self
.
ALGORITHM_ID
)
# Create submissions and grading workflows
for
num
in
range
(
num_submissions
):
student_item
=
{
'course_id'
:
course_id
,
'item_id'
:
item_id
,
'item_type'
:
'openassessment'
,
'student_id'
:
"{base}_{num}"
.
format
(
base
=
self
.
STUDENT_ID
,
num
=
num
)
}
submission
=
sub_api
.
create_submission
(
student_item
,
self
.
ANSWER
)
workflow
=
AIGradingWorkflow
.
start_workflow
(
submission
[
'uuid'
],
self
.
RUBRIC
,
self
.
ALGORITHM_ID
)
workflow
.
classifier_set
=
classifier_set
workflow
.
save
()
print
u"{num}: Created incomplete grading workflow with UUID {uuid}"
.
format
(
num
=
num
,
uuid
=
workflow
.
uuid
)
apps/openassessment/management/commands/upload_oa_data.py
View file @
58346996
...
...
@@ -52,7 +52,7 @@ class Command(BaseCommand):
Execute the command.
Args:
course_id (unicode): The ID of the course to
create submissions for
.
course_id (unicode): The ID of the course to
use
.
s3_bucket_name (unicode): The name of the S3 bucket to upload to.
Raises:
...
...
apps/openassessment/management/tests/test_simulate_ai_grading_error.py
0 → 100644
View file @
58346996
# -*- coding: utf-8 -*-
"""
Tests for the simulate AI grading error management command.
"""
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.management.commands
import
simulate_ai_grading_error
from
openassessment.assessment.models
import
AIGradingWorkflow
class
SimulateAIGradingErrorTest
(
CacheResetTest
):
"""
Tests for the simulate AI grading error management command.
"""
COURSE_ID
=
u"TɘꙅT ↄoUᴙꙅɘ"
ITEM_ID
=
u"𝖙𝖊𝖘𝖙 𝖎𝖙𝖊𝖒"
NUM_SUBMISSIONS
=
20
def
test_simulate_ai_grading_error
(
self
):
# Run the command
cmd
=
simulate_ai_grading_error
.
Command
()
cmd
.
handle
(
self
.
COURSE_ID
.
encode
(
'utf-8'
),
self
.
ITEM_ID
.
encode
(
'utf-8'
),
self
.
NUM_SUBMISSIONS
)
# Check that the correct number of incomplete workflows
# were created. These workflows should still have
# a classifier set, though, because otherwise they
# wouldn't have been scheduled for grading
# (that is, the submissions were made before classifier
# training completed).
num_errors
=
AIGradingWorkflow
.
objects
.
filter
(
classifier_set__isnull
=
False
,
completed_at__isnull
=
True
)
.
count
()
self
.
assertEqual
(
self
.
NUM_SUBMISSIONS
,
num_errors
)
apps/openassessment/management/tests/test_upload_oa_data.py
View file @
58346996
...
...
@@ -4,15 +4,15 @@ Tests for management command that uploads submission/assessment data.
"""
from
StringIO
import
StringIO
import
tarfile
from
django.test
import
TestCase
import
boto
import
moto
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.management.commands
import
upload_oa_data
from
openassessment.workflow
import
api
as
workflow_api
from
submissions
import
api
as
sub_api
class
UploadDataTest
(
TestCase
):
class
UploadDataTest
(
CacheResetTest
):
"""
Test the upload management command. Archiving and upload are in-scope,
but the contents of the generated CSV files are tested elsewhere.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment