Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
be92b732
Commit
be92b732
authored
Jan 27, 2015
by
Usman Khalid
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
AI grader concatenates parts of multiple submissions.
TNL-708
parent
9a892d0e
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
64 additions
and
26 deletions
+64
-26
openassessment/assessment/models/ai.py
+30
-11
openassessment/assessment/test/constants.py
+1
-1
openassessment/assessment/test/test_ai_models.py
+17
-1
openassessment/management/commands/simulate_ai_grading_error.py
+1
-1
openassessment/workflow/test/test_api.py
+14
-11
openassessment/xblock/test/test_grade.py
+1
-1
No files found.
openassessment/assessment/models/ai.py
View file @
be92b732
...
@@ -40,6 +40,35 @@ CLASSIFIERS_CACHE_IN_FILE = getattr(
...
@@ -40,6 +40,35 @@ CLASSIFIERS_CACHE_IN_FILE = getattr(
)
)
def
essay_text_from_submission
(
submission
):
"""
Retrieve the submission text.
Submissions are arbitrary JSON-blobs, which *should*
contain a single key, "answer", containing the essay
submission text.
If not, though, assume we've been given the essay text
directly (convenient for testing).
"""
if
isinstance
(
submission
,
dict
):
if
'answer'
in
submission
:
# Format used for answer in examples.
if
isinstance
(
submission
[
'answer'
],
unicode
):
return
submission
[
'answer'
]
# Initially there was one prompt and submission had the structure
# {'answer': {'text': 'The text.'}}
elif
'text'
in
submission
[
'answer'
]:
essay_text
=
submission
[
'answer'
][
'text'
]
# When multiple prompts were introduced the structure of submission become:
# {'answer': {'parts': [{'text': 'The text part 1.'}, {'text': 'The text part 2.'}]}}
# We concatenate these parts and let AI grader evaluate the total text.
else
:
essay_text
=
u'
\n
'
.
join
([
part
[
'text'
]
for
part
in
submission
[
'answer'
][
'parts'
]])
else
:
essay_text
=
unicode
(
submission
)
return
essay_text
class
IncompleteClassifierSet
(
Exception
):
class
IncompleteClassifierSet
(
Exception
):
"""
"""
The classifier set is missing a classifier for a criterion in the rubric.
The classifier set is missing a classifier for a criterion in the rubric.
...
@@ -792,20 +821,10 @@ class AIGradingWorkflow(AIWorkflow):
...
@@ -792,20 +821,10 @@ class AIGradingWorkflow(AIWorkflow):
from
openassessment.assessment.serializers
import
rubric_from_dict
from
openassessment.assessment.serializers
import
rubric_from_dict
rubric
=
rubric_from_dict
(
rubric_dict
)
rubric
=
rubric_from_dict
(
rubric_dict
)
# Retrieve the submission text
# Submissions are arbitrary JSON-blobs, which *should*
# contain a single key, "answer", containing the essay
# submission text. If not, though, assume we've been
# given the essay text directly (convenient for testing).
if
isinstance
(
submission
,
dict
):
essay_text
=
submission
.
get
(
'answer'
)
else
:
essay_text
=
unicode
(
submission
)
# Create the workflow
# Create the workflow
workflow
=
cls
.
objects
.
create
(
workflow
=
cls
.
objects
.
create
(
submission_uuid
=
submission_uuid
,
submission_uuid
=
submission_uuid
,
essay_text
=
essay_text
,
essay_text
=
essay_text
_from_submission
(
submission
)
,
algorithm_id
=
algorithm_id
,
algorithm_id
=
algorithm_id
,
student_id
=
submission
[
'student_item'
][
'student_id'
],
student_id
=
submission
[
'student_item'
][
'student_id'
],
item_id
=
submission
[
'student_item'
][
'item_id'
],
item_id
=
submission
[
'student_item'
][
'item_id'
],
...
...
openassessment/assessment/test/constants.py
View file @
be92b732
...
@@ -10,7 +10,7 @@ STUDENT_ITEM = {
...
@@ -10,7 +10,7 @@ STUDENT_ITEM = {
'item_type'
:
u'openassessment'
'item_type'
:
u'openassessment'
}
}
ANSWER
=
u'ẗëṡẗ äṅṡẅëṛ'
ANSWER
=
{
'text'
:
u'ẗëṡẗ äṅṡẅëṛ'
}
RUBRIC_OPTIONS
=
[
RUBRIC_OPTIONS
=
[
{
{
...
...
openassessment/assessment/test/test_ai_models.py
View file @
be92b732
...
@@ -3,11 +3,14 @@
...
@@ -3,11 +3,14 @@
Test AI Django models.
Test AI Django models.
"""
"""
import
copy
import
copy
import
ddt
from
django.test
import
TestCase
from
django.test.utils
import
override_settings
from
django.test.utils
import
override_settings
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.test_utils
import
CacheResetTest
from
openassessment.assessment.models
import
(
from
openassessment.assessment.models
import
(
AIClassifierSet
,
AIClassifier
,
AIGradingWorkflow
,
AI_CLASSIFIER_STORAGE
,
AIClassifierSet
,
AIClassifier
,
AIGradingWorkflow
,
AI_CLASSIFIER_STORAGE
,
CLASSIFIERS_CACHE_IN_MEM
CLASSIFIERS_CACHE_IN_MEM
,
essay_text_from_submission
)
)
from
openassessment.assessment.serializers
import
rubric_from_dict
from
openassessment.assessment.serializers
import
rubric_from_dict
from
.constants
import
RUBRIC
from
.constants
import
RUBRIC
...
@@ -21,6 +24,19 @@ COURSE_ID = u"†3߆ çøU®ß3"
...
@@ -21,6 +24,19 @@ COURSE_ID = u"†3߆ çøU®ß3"
ITEM_ID
=
u"fake_item_id"
ITEM_ID
=
u"fake_item_id"
@ddt.ddt
class
DataConversionTest
(
TestCase
):
@ddt.data
(
(
u'Answer'
,
u'Answer'
),
({
'answer'
:
{
'text'
:
u'Answer'
}},
u'Answer'
),
({
'answer'
:
{
'parts'
:
[{
'text'
:
u'Answer 1'
},
{
'text'
:
u'Answer 2'
}]}},
u'Answer 1
\n
Answer 2'
)
)
@ddt.unpack
def
test_essay_text_from_submission
(
self
,
input
,
output
):
self
.
assertEqual
(
essay_text_from_submission
(
input
),
output
)
class
AIClassifierTest
(
CacheResetTest
):
class
AIClassifierTest
(
CacheResetTest
):
"""
"""
Tests for the AIClassifier model.
Tests for the AIClassifier model.
...
...
openassessment/management/commands/simulate_ai_grading_error.py
View file @
be92b732
...
@@ -91,7 +91,7 @@ class Command(BaseCommand):
...
@@ -91,7 +91,7 @@ class Command(BaseCommand):
}
}
STUDENT_ID
=
u'test_student'
STUDENT_ID
=
u'test_student'
ANSWER
=
{
'answer'
:
'test answer'
}
ANSWER
=
{
"text"
:
'test answer'
}
def
handle
(
self
,
*
args
,
**
options
):
def
handle
(
self
,
*
args
,
**
options
):
"""
"""
...
...
openassessment/workflow/test/test_api.py
View file @
be92b732
...
@@ -32,6 +32,9 @@ RUBRIC_DICT = {
...
@@ -32,6 +32,9 @@ RUBRIC_DICT = {
]
]
}
}
ANSWER_1
=
{
"text"
:
"Shoot Hot Rod"
}
ANSWER_2
=
{
"text"
:
"Ultra Magnus fumble"
}
ALGORITHM_ID
=
"Ease"
ALGORITHM_ID
=
"Ease"
ON_INIT_PARAMS
=
{
ON_INIT_PARAMS
=
{
...
@@ -64,7 +67,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -64,7 +67,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
first_step
=
data
[
"steps"
][
0
]
if
data
[
"steps"
]
else
"peer"
first_step
=
data
[
"steps"
][
0
]
if
data
[
"steps"
]
else
"peer"
if
"ai"
in
data
[
"steps"
]:
if
"ai"
in
data
[
"steps"
]:
first_step
=
data
[
"steps"
][
1
]
if
len
(
data
[
"steps"
])
>
1
else
"waiting"
first_step
=
data
[
"steps"
][
1
]
if
len
(
data
[
"steps"
])
>
1
else
"waiting"
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_1
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
data
[
"steps"
],
ON_INIT_PARAMS
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
data
[
"steps"
],
ON_INIT_PARAMS
)
workflow_keys
=
set
(
workflow
.
keys
())
workflow_keys
=
set
(
workflow
.
keys
())
...
@@ -147,7 +150,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -147,7 +150,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self
.
assertEquals
(
"waiting"
,
workflow
[
'status'
])
self
.
assertEquals
(
"waiting"
,
workflow
[
'status'
])
def
test_update_peer_workflow
(
self
):
def
test_update_peer_workflow
(
self
):
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_1
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"training"
,
"peer"
],
ON_INIT_PARAMS
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"training"
,
"peer"
],
ON_INIT_PARAMS
)
StudentTrainingWorkflow
.
create_workflow
(
submission_uuid
=
submission
[
"uuid"
])
StudentTrainingWorkflow
.
create_workflow
(
submission_uuid
=
submission
[
"uuid"
])
requirements
=
{
requirements
=
{
...
@@ -200,7 +203,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -200,7 +203,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@patch.object
(
ai_api
,
'assessment_is_finished'
)
@patch.object
(
ai_api
,
'assessment_is_finished'
)
@patch.object
(
ai_api
,
'get_score'
)
@patch.object
(
ai_api
,
'get_score'
)
def
test_ai_score_set
(
self
,
mock_score
,
mock_is_finished
):
def
test_ai_score_set
(
self
,
mock_score
,
mock_is_finished
):
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
mock_is_finished
.
return_value
=
True
mock_is_finished
.
return_value
=
True
score
=
{
"points_earned"
:
7
,
"points_possible"
:
10
}
score
=
{
"points_earned"
:
7
,
"points_possible"
:
10
}
mock_score
.
return_value
=
score
mock_score
.
return_value
=
score
...
@@ -213,7 +216,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -213,7 +216,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@ddt.unpack
@ddt.unpack
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_create_ai_workflow_no_rubric
(
self
,
rubric
,
algorithm_id
):
def
test_create_ai_workflow_no_rubric
(
self
,
rubric
,
algorithm_id
):
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_1
)
on_init_params
=
{
on_init_params
=
{
'ai'
:
{
'ai'
:
{
'rubric'
:
rubric
,
'rubric'
:
rubric
,
...
@@ -226,7 +229,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -226,7 +229,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_ai_on_init_failures
(
self
,
mock_on_init
):
def
test_ai_on_init_failures
(
self
,
mock_on_init
):
mock_on_init
.
side_effect
=
AIError
(
"Kaboom!"
)
mock_on_init
.
side_effect
=
AIError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"ai"
],
ON_INIT_PARAMS
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"ai"
],
ON_INIT_PARAMS
)
@patch.object
(
Submission
.
objects
,
'get'
)
@patch.object
(
Submission
.
objects
,
'get'
)
...
@@ -241,14 +244,14 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -241,14 +244,14 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_workflow_errors_wrapped
(
self
,
data
,
mock_create
):
def
test_unexpected_workflow_errors_wrapped
(
self
,
data
,
mock_create
):
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
data
[
"steps"
],
ON_INIT_PARAMS
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
data
[
"steps"
],
ON_INIT_PARAMS
)
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create'
)
@patch.object
(
PeerWorkflow
.
objects
,
'get_or_create'
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_peer_workflow_errors_wrapped
(
self
,
mock_create
):
def
test_unexpected_peer_workflow_errors_wrapped
(
self
,
mock_create
):
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
mock_create
.
side_effect
=
DatabaseError
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
,
"self"
],
ON_INIT_PARAMS
)
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
,
"self"
],
ON_INIT_PARAMS
)
@patch.object
(
AssessmentWorkflow
.
objects
,
'get'
)
@patch.object
(
AssessmentWorkflow
.
objects
,
'get'
)
...
@@ -256,7 +259,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -256,7 +259,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
@raises
(
workflow_api
.
AssessmentWorkflowInternalError
)
def
test_unexpected_exception_wrapped
(
self
,
data
,
mock_create
):
def
test_unexpected_exception_wrapped
(
self
,
data
,
mock_create
):
mock_create
.
side_effect
=
Exception
(
"Kaboom!"
)
mock_create
.
side_effect
=
Exception
(
"Kaboom!"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Ultra Magnus fumble"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_2
)
workflow_api
.
update_from_assessments
(
submission
[
"uuid"
],
data
[
"steps"
])
workflow_api
.
update_from_assessments
(
submission
[
"uuid"
],
data
[
"steps"
])
@ddt.file_data
(
'data/assessments.json'
)
@ddt.file_data
(
'data/assessments.json'
)
...
@@ -363,7 +366,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -363,7 +366,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def
test_cancel_the_assessment_workflow
(
self
):
def
test_cancel_the_assessment_workflow
(
self
):
# Create the submission and assessment workflow.
# Create the submission and assessment workflow.
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_1
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
])
requirements
=
{
requirements
=
{
...
@@ -403,7 +406,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -403,7 +406,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def
test_cancel_the_assessment_workflow_does_not_exist
(
self
):
def
test_cancel_the_assessment_workflow_does_not_exist
(
self
):
# Create the submission and assessment workflow.
# Create the submission and assessment workflow.
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_1
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
])
requirements
=
{
requirements
=
{
...
@@ -432,7 +435,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
...
@@ -432,7 +435,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def
test_get_the_cancelled_workflow
(
self
):
def
test_get_the_cancelled_workflow
(
self
):
# Create the submission and assessment workflow.
# Create the submission and assessment workflow.
submission
=
sub_api
.
create_submission
(
ITEM_1
,
"Shoot Hot Rod"
)
submission
=
sub_api
.
create_submission
(
ITEM_1
,
ANSWER_1
)
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
])
workflow
=
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
"peer"
])
requirements
=
{
requirements
=
{
...
...
openassessment/xblock/test/test_grade.py
View file @
be92b732
...
@@ -150,7 +150,7 @@ class TestGrade(XBlockHandlerTestCase):
...
@@ -150,7 +150,7 @@ class TestGrade(XBlockHandlerTestCase):
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
# Verify that feedback from each scorer appears in the view
self
.
assertNotIn
(
u'єאςєɭɭєภՇ'
,
resp
.
decode
(
'utf-8'
))
self
.
assertNotIn
(
u'єאςєɭɭєภՇ'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'
Good
'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'
Poor
'
,
resp
.
decode
(
'utf-8'
))
# Verify that the submission and peer steps show that we're graded
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# This isn't strictly speaking part of the grade step rendering,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment