Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
5ab1cb6e
Commit
5ab1cb6e
authored
May 30, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix the grade mixin to display AI grades
parent
1cbe38d0
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
236 additions
and
6 deletions
+236
-6
apps/openassessment/assessment/api/ai.py
+29
-0
apps/openassessment/assessment/test/test_ai.py
+19
-0
apps/openassessment/xblock/grade_mixin.py
+14
-6
apps/openassessment/xblock/test/data/grade_scenario_ai_only.xml
+66
-0
apps/openassessment/xblock/test/data/grade_scenario_self_only.xml
+45
-0
apps/openassessment/xblock/test/test_grade.py
+63
-0
No files found.
apps/openassessment/assessment/api/ai.py
View file @
5ab1cb6e
...
...
@@ -205,6 +205,35 @@ def get_latest_assessment(submission_uuid):
return
None
def
get_assessment_scores_by_criteria
(
submission_uuid
):
"""Get the score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessment used to score this submission.
Returns:
(dict): A dictionary of rubric criterion names, with a score of
the example based assessments.
Raises:
AIGradingInternalError: If any error occurs while retrieving
information from the scores, an error is raised.
"""
try
:
assessments
=
list
(
Assessment
.
objects
.
filter
(
score_type
=
AI_ASSESSMENT_TYPE
,
submission_uuid
=
submission_uuid
)
.
order_by
(
'-scored_at'
)[:
1
]
)
scores
=
Assessment
.
scores_by_criterion
(
assessments
)
return
Assessment
.
get_median_score_dict
(
scores
)
except
DatabaseError
:
error_message
=
u"Error getting example-based assessment scores for {}"
.
format
(
submission_uuid
)
logger
.
exception
(
error_message
)
raise
AIGradingInternalError
(
error_message
)
def
train_classifiers
(
rubric_dict
,
examples
,
course_id
,
item_id
,
algorithm_id
):
"""
Schedule a task to train classifiers.
...
...
apps/openassessment/assessment/test/test_ai.py
View file @
5ab1cb6e
...
...
@@ -4,6 +4,7 @@ Tests for AI assessment.
"""
import
copy
import
mock
from
nose.tools
import
raises
from
django.db
import
DatabaseError
from
django.test.utils
import
override_settings
from
openassessment.test_utils
import
CacheResetTest
...
...
@@ -201,6 +202,24 @@ class AIGradingTest(CacheResetTest):
self
.
assertEquals
(
score
[
"points_possible"
],
4
)
self
.
assertEquals
(
score
[
"points_earned"
],
3
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
def
test_get_assessment_scores_by_criteria
(
self
):
ai_api
.
submit
(
self
.
submission_uuid
,
RUBRIC
,
ALGORITHM_ID
)
# Verify that we got the scores we provided to the stub AI algorithm
assessment
=
ai_api
.
get_latest_assessment
(
self
.
submission_uuid
)
assessment_score_dict
=
ai_api
.
get_assessment_scores_by_criteria
(
self
.
submission_uuid
)
for
part
in
assessment
[
'parts'
]:
criterion_name
=
part
[
'option'
][
'criterion'
][
'name'
]
expected_score
=
self
.
CLASSIFIER_SCORE_OVERRIDES
[
criterion_name
][
'score_override'
]
self
.
assertEqual
(
assessment_score_dict
[
criterion_name
],
expected_score
)
@raises
(
ai_api
.
AIGradingInternalError
)
@mock.patch.object
(
Assessment
.
objects
,
'filter'
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
def
test_error_getting_assessment_scores
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Oh no!"
)
ai_api
.
get_assessment_scores_by_criteria
(
self
.
submission_uuid
)
@mock.patch
(
'openassessment.assessment.api.ai.grading_tasks.grade_essay'
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
...
...
apps/openassessment/xblock/grade_mixin.py
View file @
5ab1cb6e
...
...
@@ -9,6 +9,7 @@ from xblock.core import XBlock
from
openassessment.assessment.api
import
peer
as
peer_api
from
openassessment.assessment.api
import
self
as
self_api
from
openassessment.assessment.api
import
ai
as
ai_api
from
openassessment.assessment.errors
import
SelfAssessmentError
,
PeerAssessmentError
from
submissions
import
api
as
sub_api
...
...
@@ -91,19 +92,22 @@ class GradeMixin(object):
assessment_steps
=
self
.
assessment_steps
submission_uuid
=
workflow
[
'submission_uuid'
]
example_based_assessment
=
None
self_assessment
=
None
feedback
=
None
peer_assessments
=
[]
has_submitted_feedback
=
False
if
"peer-assessment"
in
assessment_steps
:
feedback
=
peer_api
.
get_assessment_feedback
(
submission_uuid
)
peer_assessments
=
peer_api
.
get_assessments
(
submission_uuid
)
has_submitted_feedback
=
feedback
is
not
None
else
:
feedback
=
None
peer_assessments
=
[]
has_submitted_feedback
=
False
if
"self-assessment"
in
assessment_steps
:
self_assessment
=
self_api
.
get_assessment
(
submission_uuid
)
else
:
self_assessment
=
None
if
"example-based-assessment"
in
assessment_steps
:
example_based_assessment
=
ai_api
.
get_latest_assessment
(
submission_uuid
)
feedback_text
=
feedback
.
get
(
'feedback'
,
''
)
if
feedback
else
''
student_submission
=
sub_api
.
get_submission
(
submission_uuid
)
...
...
@@ -120,6 +124,7 @@ class GradeMixin(object):
'student_submission'
:
student_submission
,
'peer_assessments'
:
peer_assessments
,
'self_assessment'
:
self_assessment
,
'example_based_assessment'
:
example_based_assessment
,
'rubric_criteria'
:
self
.
_rubric_criteria_with_feedback
(
peer_assessments
),
'has_submitted_feedback'
:
has_submitted_feedback
,
}
...
...
@@ -128,10 +133,13 @@ class GradeMixin(object):
# Note that we are updating a *copy* of the rubric criteria stored in
# the XBlock field
max_scores
=
peer_api
.
get_rubric_max_scores
(
submission_uuid
)
median_scores
=
None
if
"peer-assessment"
in
assessment_steps
:
median_scores
=
peer_api
.
get_assessment_median_scores
(
submission_uuid
)
elif
"self-assessment"
in
assessment_steps
:
median_scores
=
self_api
.
get_assessment_scores_by_criteria
(
submission_uuid
)
elif
"example-based-assessment"
in
assessment_steps
:
median_scores
=
ai_api
.
get_assessment_scores_by_criteria
(
submission_uuid
)
if
median_scores
is
not
None
and
max_scores
is
not
None
:
for
criterion
in
context
[
"rubric_criteria"
]:
...
...
apps/openassessment/xblock/test/data/grade_scenario_ai_only.xml
0 → 100644
View file @
5ab1cb6e
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"example-based-assessment"
algorithm_id=
"fake"
>
<example>
<answer>
Example Answer One
</answer>
<select
criterion=
"𝓒𝓸𝓷𝓬𝓲𝓼𝓮"
option=
"Ġööḋ"
/>
<select
criterion=
"Form"
option=
"Poor"
/>
</example>
<example>
<answer>
Example Answer Two
</answer>
<select
criterion=
"𝓒𝓸𝓷𝓬𝓲𝓼𝓮"
option=
"ﻉซƈﻉɭɭﻉกՇ"
/>
<select
criterion=
"Form"
option=
"Fair"
/>
</example>
<example>
<answer>
Example Answer Three
</answer>
<select
criterion=
"𝓒𝓸𝓷𝓬𝓲𝓼𝓮"
option=
"Ġööḋ"
/>
<select
criterion=
"Form"
option=
"Good"
/>
</example>
<example>
<answer>
Example Answer Four
</answer>
<select
criterion=
"𝓒𝓸𝓷𝓬𝓲𝓼𝓮"
option=
"ﻉซƈﻉɭɭﻉกՇ"
/>
<select
criterion=
"Form"
option=
"Good"
/>
</example>
</assessment>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/grade_scenario_self_only.xml
0 → 100644
View file @
5ab1cb6e
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/test_grade.py
View file @
5ab1cb6e
...
...
@@ -85,6 +85,69 @@ class TestGrade(XBlockHandlerTestCase):
self
.
assertIn
(
'complete'
,
resp
.
lower
())
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/grade_scenario_self_only.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade_self_only
(
self
,
xblock
):
rubric
=
create_rubric_dict
(
xblock
.
prompt
,
xblock
.
rubric_criteria
)
train_classifiers
(
rubric
,
CLASSIFIER_SCORE_OVERRIDES
)
# Submit, assess, and render the grade view
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
[],
[],
self
.
ASSESSMENTS
[
0
],
waiting_for_peer
=
True
,
waiting_for_ai
=
True
)
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
self
.
assertIn
(
u'ﻉซƈﻉɭɭﻉกՇ'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Fair'
,
resp
.
decode
(
'utf-8'
))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp
=
self
.
request
(
xblock
,
'render_submission'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'response'
,
resp
.
lower
())
self
.
assertIn
(
'complete'
,
resp
.
lower
())
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertNotIn
(
'peer'
,
resp
.
lower
())
self
.
assertNotIn
(
'complete'
,
resp
.
lower
())
resp
=
self
.
request
(
xblock
,
'render_self_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'self'
,
resp
.
lower
())
self
.
assertIn
(
'complete'
,
resp
.
lower
())
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/grade_scenario_ai_only.xml'
,
user_id
=
'Greggs'
)
def
test_render_grade_ai_only
(
self
,
xblock
):
rubric
=
create_rubric_dict
(
xblock
.
prompt
,
xblock
.
rubric_criteria
)
train_classifiers
(
rubric
,
CLASSIFIER_SCORE_OVERRIDES
)
# Submit, assess, and render the grade view
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
[],
[],
None
,
waiting_for_peer
=
True
)
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that feedback from each scorer appears in the view
self
.
assertNotIn
(
u'єאςєɭɭєภՇ'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Fair'
,
resp
.
decode
(
'utf-8'
))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp
=
self
.
request
(
xblock
,
'render_submission'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'response'
,
resp
.
lower
())
self
.
assertIn
(
'complete'
,
resp
.
lower
())
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertNotIn
(
'peer'
,
resp
.
lower
())
self
.
assertNotIn
(
'complete'
,
resp
.
lower
())
resp
=
self
.
request
(
xblock
,
'render_self_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertNotIn
(
'self'
,
resp
.
lower
())
self
.
assertNotIn
(
'complete'
,
resp
.
lower
())
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/feedback_per_criterion.xml'
,
user_id
=
'Bernard'
)
def
test_render_grade_feedback_per_criterion
(
self
,
xblock
):
# Submit, assess, and render the grade view
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment