Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
04b682e5
Commit
04b682e5
authored
Jun 17, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #440 from edx/will/staff-debug-classifier-info
Classifier info in staff debug
parents
3cbc8fd8
9e4d35f9
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
381 additions
and
187 deletions
+381
-187
openassessment/assessment/api/ai.py
+45
-4
openassessment/assessment/errors/ai.py
+3
-3
openassessment/assessment/models/ai.py
+86
-65
openassessment/assessment/test/test_ai.py
+66
-3
openassessment/templates/openassessmentblock/staff_debug/staff_debug.html
+32
-0
openassessment/xblock/staff_info_mixin.py
+92
-48
openassessment/xblock/static/css/openassessment.css
+0
-0
openassessment/xblock/static/sass/oa/elements/_staff.scss
+3
-2
openassessment/xblock/test/test_staff_info.py
+54
-62
No files found.
openassessment/assessment/api/ai.py
View file @
04b682e5
...
...
@@ -4,11 +4,9 @@ Public interface for AI training and grading, used by students/course authors.
import
logging
from
django.db
import
DatabaseError
from
submissions
import
api
as
sub_api
from
celery.exceptions
import
(
ChordError
,
InvalidTaskError
,
NotConfigured
,
NotRegistered
,
QueueNotFound
,
TaskRevokedError
)
from
openassessment.assessment.serializers
import
(
deserialize_training_examples
,
InvalidTrainingExample
,
InvalidRubric
,
full_assessment_dict
deserialize_training_examples
,
rubric_from_dict
,
InvalidTrainingExample
,
InvalidRubric
,
full_assessment_dict
)
from
openassessment.assessment.errors
import
(
AITrainingRequestError
,
AITrainingInternalError
,
AIGradingRequestError
,
...
...
@@ -22,6 +20,7 @@ from openassessment.assessment.models import (
from
openassessment.assessment.worker
import
training
as
training_tasks
from
openassessment.assessment.worker
import
grading
as
grading_tasks
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -347,3 +346,45 @@ def reschedule_unfinished_tasks(course_id=None, item_id=None, task_type=u"grade"
)
.
format
(
cid
=
course_id
,
iid
=
item_id
,
ex
=
ex
)
logger
.
exception
(
msg
)
raise
AIGradingInternalError
(
ex
)
def
get_classifier_set_info
(
rubric_dict
,
algorithm_id
,
course_id
,
item_id
):
"""
Get information about the classifier available for a particular problem.
This is the classifier that would be selected to grade essays for the problem.
Args:
rubric_dict (dict): The serialized rubric model.
algorithm_id (unicode): The algorithm to use for classification.
course_id (unicode): The course identifier for the current problem.
item_id (unicode): The item identifier for the current problem.
Returns:
dict with keys 'created_at', 'algorithm_id', 'course_id', and 'item_id'
Note that course ID and item ID might be different than the current problem
if a classifier from a different problem with a similar rubric
is the best available match.
"""
try
:
rubric
=
rubric_from_dict
(
rubric_dict
)
classifier_set
=
AIClassifierSet
.
most_recent_classifier_set
(
rubric
,
algorithm_id
,
course_id
,
item_id
)
if
classifier_set
is
not
None
:
return
{
'created_at'
:
classifier_set
.
created_at
,
'algorithm_id'
:
classifier_set
.
algorithm_id
,
'course_id'
:
classifier_set
.
course_id
,
'item_id'
:
classifier_set
.
item_id
}
else
:
return
None
except
InvalidRubric
:
msg
=
u"Could not retrieve classifier set info: the rubric definition was not valid."
logger
.
exception
(
msg
)
raise
AIGradingRequestError
(
msg
)
except
DatabaseError
as
ex
:
msg
=
u"An unexpected error occurred while retrieving classifier set info: {ex}"
.
format
(
ex
=
ex
)
logger
.
exception
(
msg
)
raise
AIGradingInternalError
(
msg
)
openassessment/assessment/errors/ai.py
View file @
04b682e5
...
...
@@ -3,8 +3,9 @@ Errors related to AI assessment.
"""
from
celery.exceptions
import
InvalidTaskError
,
NotConfigured
,
NotRegistered
,
QueueNotFound
from
socket
import
error
as
socket_error
ANTICIPATED_CELERY_ERRORS
=
(
InvalidTaskError
,
NotConfigured
,
NotRegistered
,
QueueNotFound
)
ANTICIPATED_CELERY_ERRORS
=
(
InvalidTaskError
,
NotConfigured
,
NotRegistered
,
QueueNotFound
,
socket_error
)
class
AIError
(
Exception
):
"""
...
...
@@ -52,4 +53,4 @@ class AIReschedulingInternalError(AIError):
"""
An unexpected error occurred while using the AI assessment API.
"""
pass
\ No newline at end of file
pass
openassessment/assessment/models/ai.py
View file @
04b682e5
...
...
@@ -4,18 +4,15 @@ Database models for AI assessment.
from
uuid
import
uuid4
import
json
import
logging
import
itertools
from
django.conf
import
settings
from
django.core.files.base
import
ContentFile
from
django.core.cache
import
cache
from
django.db
import
models
,
transaction
,
DatabaseError
from
django.utils.timezone
import
now
from
django.core.exceptions
import
ObjectDoesNotExist
from
django_extensions.db.fields
import
UUIDField
from
dogapi
import
dog_stats_api
from
submissions
import
api
as
sub_api
from
openassessment.assessment.serializers
import
rubric_from_dict
from
openassessment.assessment.errors.ai
import
AIReschedulingInternalError
from
.base
import
Rubric
,
Criterion
,
Assessment
,
AssessmentPart
from
.training
import
TrainingExample
...
...
@@ -177,6 +174,84 @@ class AIClassifierSet(models.Model):
return
classifier_set
@classmethod
def
most_recent_classifier_set
(
cls
,
rubric
,
algorithm_id
,
course_id
,
item_id
):
"""
Finds the most relevant classifier set based on the following line of succession:
1 -- Classifier sets with the same COURSE, ITEM, RUBRIC *content* hash, and ALGORITHM
- Newest first. If none exist...
2 -- Classifier sets with the same COURSE, ITEM, and RUBRIC *structure* hash, and ALGORITHM.
- Newest first. If none exist...
3 -- The newest classifier set with the same RUBRIC and ALGORITHM
- Newest first. If none exist...
4 -- Do no assignment and return False
Case #1 is ideal: we get a classifier set trained for the rubric as currently defined.
Case #2 handles when a course author makes a cosmetic change to a rubric after training.
We don't want to stop grading students because an author fixed a typo!
Case #3 handles problems that are duplicated, such as the default problem prompt.
If we've already trained classifiers for the identical rubric somewhere else,
then the author can use them to test out the feature immediately.
Case #4: Someone will need to schedule training; however, we will still accept
student submissions and grade them once training completes.
Args:
rubric (Rubric): The rubric associated with the classifier set.
algorithm_id (unicode): The algorithm used to create the classifier set.
course_id (unicode): The course identifier for the current problem.
item_id (unicode): The item identifier for the current problem.
Returns:
ClassifierSet or None
Raises:
DatabaseError
"""
# List of the parameters we will search for, in order of decreasing priority
search_parameters
=
[
# Case #1: same course / item / rubric (exact) / algorithm
{
'rubric__content_hash'
:
rubric
.
content_hash
,
'algorithm_id'
:
algorithm_id
,
'course_id'
:
course_id
,
'item_id'
:
item_id
},
# Case #2: same course / item / rubric (structure only) / algorithm
{
'rubric__structure_hash'
:
rubric
.
structure_hash
,
# pylint: disable=E1101
'algorithm_id'
:
algorithm_id
,
'course_id'
:
course_id
,
'item_id'
:
item_id
},
# Case #3: same rubric (exact) / algorithm
{
'rubric__content_hash'
:
rubric
.
content_hash
,
'algorithm_id'
:
algorithm_id
}
]
# Perform each query, starting with the highest priority
for
params
in
search_parameters
:
# Retrieve the most recent classifier set that matches our query
# (rely on implicit ordering in the model definition)
classifier_set_candidates
=
cls
.
objects
.
filter
(
**
params
)[:
1
]
# If we find a classifier set,
# then associate the most recent classifiers with it and return true
if
len
(
classifier_set_candidates
)
>
0
:
return
classifier_set_candidates
[
0
]
# If we get to this point, no classifiers exist with this rubric and algorithm.
return
None
@property
def
classifiers_dict
(
self
):
"""
...
...
@@ -564,27 +639,7 @@ class AIGradingWorkflow(AIWorkflow):
def
assign_most_recent_classifier_set
(
self
):
"""
Finds the most relevant classifier set based on the following line of succession:
1 -- Classifier sets with the same COURSE, ITEM, RUBRIC *content* hash, and ALGORITHM
- Newest first. If none exist...
2 -- Classifier sets with the same COURSE, ITEM, and RUBRIC *structure* hash, and ALGORITHM.
- Newest first. If none exist...
3 -- The newest classifier set with the same RUBRIC and ALGORITHM
- Newest first. If none exist...
4 -- Do no assignment and return False
Case #1 is ideal: we get a classifier set trained for the rubric as currently defined.
Case #2 handles when a course author makes a cosmetic change to a rubric after training.
We don't want to stop grading students because an author fixed a typo!
Case #3 handles problems that are duplicated, such as the default problem prompt.
If we've already trained classifiers for the identical rubric somewhere else,
then the author can use them to test out the feature immediately.
Case #4: Someone will need to schedule training; however, we will still accept
student submissions and grade them once training completes.
Find the most recent classifier set and assign it to this workflow.
Returns:
(bool) indicates whether or not classifiers were able to be assigned to the AIGradingWorkflow
...
...
@@ -592,47 +647,13 @@ class AIGradingWorkflow(AIWorkflow):
Raises:
DatabaseError
"""
# List of the parameters we will search for, in order of decreasing priority
search_parameters
=
[
# Case #1: same course / item / rubric (exact) / algorithm
{
'rubric__content_hash'
:
self
.
rubric
.
content_hash
,
'algorithm_id'
:
self
.
algorithm_id
,
'course_id'
:
self
.
course_id
,
'item_id'
:
self
.
item_id
},
# Case #2: same course / item / rubric (structure only) / algorithm
{
'rubric__structure_hash'
:
self
.
rubric
.
structure_hash
,
# pylint: disable=E1101
'algorithm_id'
:
self
.
algorithm_id
,
'course_id'
:
self
.
course_id
,
'item_id'
:
self
.
item_id
},
# Case #3: same rubric (exact) / algorithm
{
'rubric__content_hash'
:
self
.
rubric
.
content_hash
,
'algorithm_id'
:
self
.
algorithm_id
}
]
# Perform each query, starting with the highest priority
for
params
in
search_parameters
:
# Retrieve the most recent classifier set that matches our query
# (rely on implicit ordering in the model definition)
classifier_set_candidates
=
AIClassifierSet
.
objects
.
filter
(
**
params
)[:
1
]
# If we find a classifier set,
# then associate the most recent classifiers with it and return true
if
len
(
classifier_set_candidates
)
>
0
:
self
.
classifier_set
=
classifier_set_candidates
[
0
]
self
.
save
()
return
True
# If we get to this point, no classifiers exist with this rubric and algorithm.
return
False
classifier_set
=
AIClassifierSet
.
most_recent_classifier_set
(
self
.
rubric
,
self
.
algorithm_id
,
self
.
course_id
,
self
.
item_id
)
if
classifier_set
is
not
None
:
self
.
classifier_set
=
classifier_set
self
.
save
()
return
classifier_set
is
not
None
@classmethod
@transaction.commit_on_success
...
...
openassessment/assessment/test/test_ai.py
View file @
04b682e5
...
...
@@ -5,17 +5,15 @@ Tests for AI assessment.
import
copy
import
mock
from
nose.tools
import
raises
from
celery.exceptions
import
NotConfigured
,
InvalidTaskError
from
celery.exceptions
import
NotConfigured
from
django.db
import
DatabaseError
from
django.test.utils
import
override_settings
from
openassessment.test_utils
import
CacheResetTest
from
submissions
import
api
as
sub_api
from
openassessment.assessment.api
import
ai
as
ai_api
from
openassessment.assessment.models
import
(
AITrainingWorkflow
,
AIGradingWorkflow
,
AIClassifierSet
,
Assessment
)
from
openassessment.assessment.models
import
AITrainingWorkflow
,
AIGradingWorkflow
,
AIClassifierSet
from
openassessment.assessment.worker.algorithm
import
AIAlgorithm
,
AIAlgorithmError
from
openassessment.assessment.serializers
import
rubric_from_dict
...
...
@@ -531,3 +529,68 @@ class AIAutomaticGradingTest(CacheResetTest):
return
False
except
StopIteration
:
return
True
class
AIClassifierInfoTest
(
CacheResetTest
):
"""
Tests for retrieving info about classifier sets.
"""
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
def
test_no_classifier_set
(
self
):
classifier_info
=
ai_api
.
get_classifier_set_info
(
RUBRIC
,
ALGORITHM_ID
,
'test_course'
,
'test_item'
)
self
.
assertIs
(
classifier_info
,
None
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
def
test_classifier_set_info
(
self
):
workflow_uuid
=
ai_api
.
train_classifiers
(
RUBRIC
,
EXAMPLES
,
'test_course'
,
'test_item'
,
ALGORITHM_ID
)
classifier_info
=
ai_api
.
get_classifier_set_info
(
RUBRIC
,
ALGORITHM_ID
,
'test_course'
,
'test_item'
)
# Retrieve the classifier set so we can get its actual creation date
workflow
=
AITrainingWorkflow
.
objects
.
get
(
uuid
=
workflow_uuid
)
classifier_set
=
workflow
.
classifier_set
expected_info
=
{
'created_at'
:
classifier_set
.
created_at
,
'algorithm_id'
:
ALGORITHM_ID
,
'course_id'
:
'test_course'
,
'item_id'
:
'test_item'
}
self
.
assertEqual
(
classifier_info
,
expected_info
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
def
test_multiple_classifier_sets
(
self
):
# Train multiple classifiers
ai_api
.
train_classifiers
(
RUBRIC
,
EXAMPLES
,
'test_course'
,
'test_item'
,
ALGORITHM_ID
)
second_uuid
=
ai_api
.
train_classifiers
(
RUBRIC
,
EXAMPLES
,
'test_course'
,
'test_item'
,
ALGORITHM_ID
)
# Expect that we get the info for the second classifier
classifier_info
=
ai_api
.
get_classifier_set_info
(
RUBRIC
,
ALGORITHM_ID
,
'test_course'
,
'test_item'
)
workflow
=
AITrainingWorkflow
.
objects
.
get
(
uuid
=
second_uuid
)
classifier_set
=
workflow
.
classifier_set
self
.
assertEqual
(
classifier_info
[
'created_at'
],
classifier_set
.
created_at
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@raises
(
AIGradingInternalError
)
@mock.patch.object
(
AIClassifierSet
,
'most_recent_classifier_set'
)
def
test_database_error
(
self
,
mock_call
):
mock_call
.
side_effect
=
DatabaseError
(
'OH NO!'
)
ai_api
.
get_classifier_set_info
(
RUBRIC
,
ALGORITHM_ID
,
'test_course'
,
'test_item'
)
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@raises
(
AIGradingRequestError
)
def
test_invalid_rubric_error
(
self
):
invalid_rubric
=
{}
ai_api
.
get_classifier_set_info
(
invalid_rubric
,
ALGORITHM_ID
,
'test_course'
,
'test_item'
)
openassessment/templates/openassessmentblock/staff_debug/staff_debug.html
View file @
04b682e5
...
...
@@ -79,6 +79,38 @@
</div>
{% if display_schedule_training %}
<div
class=
"staff-info__classifierset ui-staff__content__section"
>
{% if classifierset %}
<table
class=
"staff-info__classifierset__table"
summary=
"{% trans "
Classifier
set
"
%}"
>
<caption
class=
"title"
>
{% trans "Classifier set" %}
</caption>
<thead>
<th
abbr=
"Field"
scope=
"col"
>
{% trans "Field" %}
</th>
<th
abbr=
"Value"
scope=
"col"
>
{% trans "Value" %}
</th>
</thead>
<tbody>
<tr>
<td
class=
"value"
>
{% trans "Created at" %}
</td>
<td
class=
"value"
>
{{ classifierset.created_at }}
</td>
</tr>
<tr>
<td
class=
"value"
>
{% trans "Algorithm ID" %}
</td>
<td
class=
"value"
>
{{ classifierset.algorithm_id }}
</td>
</tr>
<tr>
<td
class=
"value"
>
{% trans "Course ID" %}
</td>
<td
class=
"value"
>
{{ classifierset.course_id }}
</td>
</tr>
<tr>
<td
class=
"value"
>
{% trans "Item ID" %}
</td>
<td
class=
"value"
>
{{ classifierset.item_id }}
</td>
</tr>
</tbody>
</table>
{% else %}
{% trans "No classifiers are available for this problem" %}
{% endif %}
</div>
<div
class=
"staff-info__status ui-staff__content__section"
>
<a
aria-role=
"button"
href=
""
id=
"schedule_training"
class=
"action--submit"
><span
class=
"copy"
>
{% trans "Schedule Example Based Assessment Training" %}
</span></a>
<div
id=
"schedule_training_message"
></div>
...
...
openassessment/xblock/staff_info_mixin.py
View file @
04b682e5
...
...
@@ -3,22 +3,78 @@ The Staff Info View mixin renders all the staff-specific information used to
determine the flow of the problem.
"""
import
copy
from
functools
import
wraps
from
django.utils.translation
import
ugettext
as
_
from
django.utils.translation
import
ugettext_lazy
from
xblock.core
import
XBlock
from
openassessment.assessment.errors.ai
import
AIError
,
AIGradingInternalError
,
AITrainingInternalError
from
openassessment.assessment.errors.ai
import
AIError
from
openassessment.xblock.resolve_dates
import
DISTANT_PAST
,
DISTANT_FUTURE
from
openassessment.xblock.data_conversion
import
create_rubric_dict
,
convert_training_examples_list_to_dict
from
openassessment.xblock.data_conversion
import
(
create_rubric_dict
,
convert_training_examples_list_to_dict
)
from
submissions
import
api
as
submission_api
from
openassessment.assessment.api
import
peer
as
peer_api
from
openassessment.assessment.api
import
self
as
self_api
from
openassessment.assessment.api
import
ai
as
ai_api
def
require_global_admin
(
error_msg
):
"""
Method decorator to restrict access to an XBlock handler
to only global staff.
Args:
error_msg (unicode): The error message to display to the user
if they do not have sufficient permissions.
Returns:
Decorated function
"""
def
_decorator
(
func
):
# pylint: disable=C0111
@wraps
(
func
)
def
_wrapped
(
xblock
,
*
args
,
**
kwargs
):
# pylint: disable=C0111
if
not
xblock
.
is_admin
or
xblock
.
in_studio_preview
:
return
{
'success'
:
False
,
'msg'
:
unicode
(
error_msg
)}
else
:
return
func
(
xblock
,
*
args
,
**
kwargs
)
return
_wrapped
return
_decorator
def
require_course_staff
(
error_msg
):
"""
Method decorator to restrict access to an XBlock render
method to only course staff.
Args:
error_msg (unicode): The error message to display to the user
if they do not have sufficient permissions.
Returns:
decorated function
"""
def
_decorator
(
func
):
# pylint: disable=C0111
@wraps
(
func
)
def
_wrapped
(
xblock
,
*
args
,
**
kwargs
):
# pylint: disable=C0111
if
not
xblock
.
is_course_staff
or
xblock
.
in_studio_preview
:
return
xblock
.
render_error
(
unicode
(
error_msg
))
else
:
return
func
(
xblock
,
*
args
,
**
kwargs
)
return
_wrapped
return
_decorator
class
StaffInfoMixin
(
object
):
"""
Display debug information to course and global staff.
"""
@XBlock.handler
def
render_staff_info
(
self
,
data
,
suffix
=
''
):
@require_course_staff
(
ugettext_lazy
(
u"You do not have permission to access staff information"
))
def
render_staff_info
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Template context dictionary for course staff debug panel.
...
...
@@ -26,12 +82,6 @@ class StaffInfoMixin(object):
dict: The template context specific to the course staff debug panel.
"""
# If we're not course staff, or in preview mode, return nothing for the
# staff info view.
if
not
self
.
is_course_staff
or
self
.
in_studio_preview
:
return
self
.
render_error
(
_
(
u"You do not have permission to access staff information"
))
path
,
context
=
self
.
get_staff_path_and_context
()
return
self
.
render_assessment
(
path
,
context
)
...
...
@@ -42,6 +92,13 @@ class StaffInfoMixin(object):
context
=
{}
path
=
'openassessmentblock/staff_debug/staff_debug.html'
student_item
=
self
.
get_student_item_dict
()
# We need to display the new-style locations in the course staff
# info, even if we're using old-style locations internally,
# so course staff can use the locations to delete student state.
context
[
'item_id'
]
=
student_item
[
"item_id"
]
# Calculate how many students are in each step of the workflow
status_counts
,
num_submissions
=
self
.
get_workflow_status_counts
()
context
[
'status_counts'
]
=
status_counts
...
...
@@ -49,18 +106,21 @@ class StaffInfoMixin(object):
# Show the schedule training button if example based assessment is
# configured, and the current user has admin privileges.
assessment
=
self
.
get_assessment_module
(
'example-based-assessment'
)
context
[
'display_schedule_training'
]
=
self
.
is_admin
and
assessment
# Show the reschedule tasks button if the user is an administrator and
# is not in studio preview mode and there exists example based assessment
# as part of the problem definition.
context
[
'display_reschedule_unfinished_tasks'
]
=
self
.
is_admin
and
assessment
and
not
self
.
in_studio_preview
# We need to display the new-style locations in the course staff
# info, even if we're using old-style locations internally,
# so course staff can use the locations to delete student state.
context
[
'item_id'
]
=
self
.
get_student_item_dict
()[
"item_id"
]
example_based_assessment
=
self
.
get_assessment_module
(
'example-based-assessment'
)
display_ai_staff_info
=
(
self
.
is_admin
and
bool
(
example_based_assessment
)
and
not
self
.
in_studio_preview
)
context
[
'display_schedule_training'
]
=
display_ai_staff_info
context
[
'display_reschedule_unfinished_tasks'
]
=
display_ai_staff_info
if
display_ai_staff_info
:
context
[
'classifierset'
]
=
ai_api
.
get_classifier_set_info
(
create_rubric_dict
(
self
.
prompt
,
self
.
rubric_criteria
),
example_based_assessment
[
'algorithm_id'
],
student_item
[
'course_id'
],
student_item
[
'item_id'
]
)
# Include release/due dates for each step in the problem
context
[
'step_dates'
]
=
list
()
...
...
@@ -82,13 +142,11 @@ class StaffInfoMixin(object):
return
path
,
context
@XBlock.json_handler
def
schedule_training
(
self
,
data
,
suffix
=
''
):
if
not
self
.
is_admin
or
self
.
in_studio_preview
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"You do not have permission to schedule training"
)
}
@require_global_admin
(
ugettext_lazy
(
u"You do not have permission to schedule training"
))
def
schedule_training
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Schedule a new training task for example-based grading.
"""
assessment
=
self
.
get_assessment_module
(
'example-based-assessment'
)
student_item_dict
=
self
.
get_student_item_dict
()
...
...
@@ -105,12 +163,12 @@ class StaffInfoMixin(object):
return
{
'success'
:
True
,
'workflow_uuid'
:
workflow_uuid
,
'msg'
:
_
(
u"Training scheduled with new Workflow UUID: {
}"
.
format
(
workflow_uuid
))
'msg'
:
_
(
u"Training scheduled with new Workflow UUID: {
uuid}"
.
format
(
uuid
=
workflow_uuid
))
}
except
AIError
as
err
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"An error occurred scheduling classifier training
{}"
.
format
(
err
))
'msg'
:
_
(
u"An error occurred scheduling classifier training
: {error}"
.
format
(
error
=
err
))
}
else
:
...
...
@@ -120,7 +178,8 @@ class StaffInfoMixin(object):
}
@XBlock.handler
def
render_student_info
(
self
,
data
,
suffix
=
''
):
@require_course_staff
(
ugettext_lazy
(
u"You do not have permission to access student information."
))
def
render_student_info
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Renders all relative information for a specific student's workflow.
...
...
@@ -130,14 +189,6 @@ class StaffInfoMixin(object):
Must be course staff to render this view.
"""
# If request does not come from course staff, return nothing.
# This should not be able to happen unless someone attempts to
# explicitly invoke this handler.
if
not
self
.
is_course_staff
or
self
.
in_studio_preview
:
return
self
.
render_error
(
_
(
u"You do not have permission to access student information."
))
path
,
context
=
self
.
get_student_info_path_and_context
(
data
)
return
self
.
render_assessment
(
path
,
context
)
...
...
@@ -197,7 +248,8 @@ class StaffInfoMixin(object):
return
path
,
context
@XBlock.json_handler
def
reschedule_unfinished_tasks
(
self
,
data
,
suffix
=
''
):
@require_global_admin
(
ugettext_lazy
(
u"You do not have permission to reschedule tasks."
))
def
reschedule_unfinished_tasks
(
self
,
data
,
suffix
=
''
):
# pylint: disable=W0613
"""
Wrapper which invokes the API call for rescheduling grading tasks.
...
...
@@ -215,14 +267,6 @@ class StaffInfoMixin(object):
'success': (bool) Indicates whether or not the tasks were rescheduled successfully
'msg': The response to the server (could be error message or success message)
"""
# Verifies permissions after the push of the button is made
if
not
self
.
is_admin
or
self
.
in_studio_preview
:
return
{
'success'
:
False
,
'msg'
:
_
(
u"You do not have permission to reschedule tasks."
)
}
# Identifies the course and item that will need to be re-run
student_item_dict
=
self
.
get_student_item_dict
()
course_id
=
student_item_dict
.
get
(
'course_id'
)
...
...
openassessment/xblock/static/css/openassessment.css
View file @
04b682e5
This diff is collapsed.
Click to expand it.
openassessment/xblock/static/sass/oa/elements/_staff.scss
View file @
04b682e5
...
...
@@ -84,11 +84,11 @@
}
// UI - status (table)
.staff-info__status
{
.staff-info__status
,
.staff-info__classifierset
{
}
.staff-info__status__table
{
.staff-info__status__table
,
.staff-info__classifierset__table
{
@extend
%copy-3
;
border-radius
:
(
$baseline-v
/
10
);
color
:
$copy-staff-color
;
...
...
@@ -132,4 +132,5 @@
}
}
}
openassessment/xblock/test/test_staff_info.py
View file @
04b682e5
# coding=utf-8
from
collections
import
namedtuple
import
pytz
import
json
import
datetime
from
mock
import
Mock
,
patch
from
django.test.utils
import
override_settings
from
openassessment.assessment.api
import
peer
as
peer_api
from
openassessment.assessment.api
import
self
as
self_api
from
openassessment.assessment.api
import
ai
as
ai_api
from
openassessment.workflow
import
api
as
workflow_api
from
openassessment.assessment.errors.ai
import
AIError
,
AIGradingInternalError
,
AITrainingInternalError
from
openassessment.assessment.errors.ai
import
AIError
,
AIGradingInternalError
from
submissions
import
api
as
sub_api
from
openassessment.xblock.test.base
import
scenario
,
XBlockHandlerTestCase
# Test dependency on Stub AI Algorithm configuration
from
openassessment.assessment.test.test_ai
import
(
ALGORITHM_ID
,
AI_ALGORITHMS
,
train_classifiers
)
ALGORITHM_ID
=
'fake'
AI_ALGORITHMS
=
{
'fake'
:
'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
STUDENT_ITEM
=
dict
(
student_id
=
"Bob"
,
...
...
@@ -32,45 +34,6 @@ ASSESSMENT_DICT = {
},
}
EXAMPLE_BASED_ASSESSMENT
=
{
"name"
:
"example-based-assessment"
,
"algorithm_id"
:
"1"
,
"examples"
:
[
{
"answer"
:
"Foo"
,
"options_selected"
:
[
{
"criterion"
:
"Ideas"
,
"option"
:
"Fair"
},
{
"criterion"
:
"Content"
,
"option"
:
"Good"
}
]
},
{
"answer"
:
"Bar"
,
"options_selected"
:
[
{
"criterion"
:
"Ideas"
,
"option"
:
"Poor"
},
{
"criterion"
:
"Content"
,
"option"
:
"Good"
}
]
}
]
}
# Rubric-specific classifier score override
CLASSIFIER_SCORE_OVERRIDES
=
{
u"Ideas"
:
{
'score_override'
:
1
},
u"Content"
:
{
'score_override'
:
2
}
}
class
TestCourseStaff
(
XBlockHandlerTestCase
):
"""
...
...
@@ -261,10 +224,10 @@ class TestCourseStaff(XBlockHandlerTestCase):
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_staff_debug_student_info_full_workflow
(
self
,
xblock
):
#
Train classifiers.
example_based_assessment
=
xblock
.
get_assessment_module
(
'example-based-assessment'
)
example_based_assessment
[
'algorithm_id'
]
=
ALGORITHM_ID
train_classifiers
({
'criteria'
:
xblock
.
rubric_criteria
},
CLASSIFIER_SCORE_OVERRIDES
)
#
Simulate that we are course staff
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
False
,
"Bob"
)
# Commonly chosen options for assessments
options_selected
=
{
...
...
@@ -272,13 +235,9 @@ class TestCourseStaff(XBlockHandlerTestCase):
"Content"
:
"Poor"
,
}
# Simulate that we are course staff
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
False
,
"Bob"
)
bob_item
=
STUDENT_ITEM
.
copy
()
bob_item
[
"item_id"
]
=
xblock
.
scope_ids
.
usage_id
# Create a submission for Bob, and corresponding workflow.
submission
=
sub_api
.
create_submission
(
bob_item
,
{
'text'
:
"Bob Answer"
})
peer_api
.
on_start
(
submission
[
"uuid"
])
...
...
@@ -318,7 +277,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_display_schedule_training
(
self
,
xblock
):
xblock
.
rubric_assessments
.
append
(
EXAMPLE_BASED_ASSESSMENT
)
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
True
,
"Bob"
)
...
...
@@ -329,20 +287,17 @@ class TestCourseStaff(XBlockHandlerTestCase):
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_schedule_training
(
self
,
xblock
):
example_based_assessment
=
xblock
.
get_assessment_module
(
'example-based-assessment'
)
example_based_assessment
[
'algorithm_id'
]
=
ALGORITHM_ID
train_classifiers
({
'criteria'
:
xblock
.
rubric_criteria
},
CLASSIFIER_SCORE_OVERRIDES
)
xblock
.
rubric_assessments
.
append
(
EXAMPLE_BASED_ASSESSMENT
)
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
True
,
"Bob"
)
# Schedule training
response
=
self
.
request
(
xblock
,
'schedule_training'
,
json
.
dumps
({}),
response_format
=
'json'
)
self
.
assertTrue
(
response
[
'success'
],
msg
=
response
.
get
(
'msg'
))
self
.
assertTrue
(
'workflow_uuid'
in
response
)
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_not_displaying_schedule_training
(
self
,
xblock
):
xblock
.
rubric_assessments
.
append
(
EXAMPLE_BASED_ASSESSMENT
)
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
False
,
"Bob"
)
...
...
@@ -363,7 +318,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_admin_schedule_training_error
(
self
,
xblock
,
mock_api
):
mock_api
.
side_effect
=
AIError
(
"Oh no!"
)
xblock
.
rubric_assessments
.
append
(
EXAMPLE_BASED_ASSESSMENT
)
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
True
,
"Bob"
)
...
...
@@ -373,7 +327,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_display_reschedule_unfinished_grading_tasks
(
self
,
xblock
):
xblock
.
rubric_assessments
.
append
(
EXAMPLE_BASED_ASSESSMENT
)
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
True
,
"Bob"
)
...
...
@@ -425,6 +378,45 @@ class TestCourseStaff(XBlockHandlerTestCase):
self
.
assertFalse
(
response
[
'success'
])
self
.
assertTrue
(
'not configured'
in
response
[
'msg'
])
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_classifier_set_info
(
self
,
xblock
):
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
True
,
"Bob"
)
# Initially, there should be no classifier set info
# because we haven't trained any classifiers for this problem
__
,
context
=
xblock
.
get_staff_path_and_context
()
self
.
assertIn
(
'classifierset'
,
context
)
self
.
assertIs
(
context
[
'classifierset'
],
None
)
# Schedule a training task, which should create classifiers
response
=
self
.
request
(
xblock
,
'schedule_training'
,
json
.
dumps
({}),
response_format
=
'json'
)
self
.
assertTrue
(
response
[
'success'
],
msg
=
response
.
get
(
'msg'
))
# Now classifier info should be available in the context
__
,
context
=
xblock
.
get_staff_path_and_context
()
self
.
assertIn
(
'classifierset'
,
context
)
self
.
assertTrue
(
isinstance
(
context
[
'classifierset'
][
'created_at'
],
datetime
.
datetime
))
self
.
assertEqual
(
context
[
'classifierset'
][
'algorithm_id'
],
ALGORITHM_ID
)
self
.
assertEqual
(
context
[
'classifierset'
][
'course_id'
],
xblock
.
get_student_item_dict
()[
'course_id'
])
self
.
assertEqual
(
context
[
'classifierset'
][
'item_id'
],
xblock
.
get_student_item_dict
()[
'item_id'
])
# Verify that the classifier set appears in the rendered template
resp
=
self
.
request
(
xblock
,
'render_staff_info'
,
json
.
dumps
({}))
self
.
assertIn
(
"classifier set"
,
resp
.
decode
(
'utf-8'
)
.
lower
())
self
.
assertIn
(
ALGORITHM_ID
,
resp
.
decode
(
'utf-8'
))
@override_settings
(
ORA2_AI_ALGORITHMS
=
AI_ALGORITHMS
)
@scenario
(
'data/example_based_assessment.xml'
,
user_id
=
'Bob'
)
def
test_classifier_set_info_hidden_for_course_staff
(
self
,
xblock
):
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
False
,
"Bob"
)
__
,
context
=
xblock
.
get_staff_path_and_context
()
self
.
assertNotIn
(
'classifierset'
,
context
)
def
_create_mock_runtime
(
self
,
item_id
,
is_staff
,
is_admin
,
anonymous_user_id
):
mock_runtime
=
Mock
(
course_id
=
'test_course'
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment