Commit 04b682e5 by Will Daly

Merge pull request #440 from edx/will/staff-debug-classifier-info

Classifier info in staff debug
parents 3cbc8fd8 9e4d35f9
...@@ -4,11 +4,9 @@ Public interface for AI training and grading, used by students/course authors. ...@@ -4,11 +4,9 @@ Public interface for AI training and grading, used by students/course authors.
import logging import logging
from django.db import DatabaseError from django.db import DatabaseError
from submissions import api as sub_api from submissions import api as sub_api
from celery.exceptions import (
ChordError, InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound, TaskRevokedError
)
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
deserialize_training_examples, InvalidTrainingExample, InvalidRubric, full_assessment_dict deserialize_training_examples, rubric_from_dict,
InvalidTrainingExample, InvalidRubric, full_assessment_dict
) )
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
AITrainingRequestError, AITrainingInternalError, AIGradingRequestError, AITrainingRequestError, AITrainingInternalError, AIGradingRequestError,
...@@ -22,6 +20,7 @@ from openassessment.assessment.models import ( ...@@ -22,6 +20,7 @@ from openassessment.assessment.models import (
from openassessment.assessment.worker import training as training_tasks from openassessment.assessment.worker import training as training_tasks
from openassessment.assessment.worker import grading as grading_tasks from openassessment.assessment.worker import grading as grading_tasks
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -347,3 +346,45 @@ def reschedule_unfinished_tasks(course_id=None, item_id=None, task_type=u"grade" ...@@ -347,3 +346,45 @@ def reschedule_unfinished_tasks(course_id=None, item_id=None, task_type=u"grade"
).format(cid=course_id, iid=item_id, ex=ex) ).format(cid=course_id, iid=item_id, ex=ex)
logger.exception(msg) logger.exception(msg)
raise AIGradingInternalError(ex) raise AIGradingInternalError(ex)
def get_classifier_set_info(rubric_dict, algorithm_id, course_id, item_id):
"""
Get information about the classifier available for a particular problem.
This is the classifier that would be selected to grade essays for the problem.
Args:
rubric_dict (dict): The serialized rubric model.
algorithm_id (unicode): The algorithm to use for classification.
course_id (unicode): The course identifier for the current problem.
item_id (unicode): The item identifier for the current problem.
Returns:
dict with keys 'created_at', 'algorithm_id', 'course_id', and 'item_id'
Note that course ID and item ID might be different than the current problem
if a classifier from a different problem with a similar rubric
is the best available match.
"""
try:
rubric = rubric_from_dict(rubric_dict)
classifier_set = AIClassifierSet.most_recent_classifier_set(
rubric, algorithm_id, course_id, item_id
)
if classifier_set is not None:
return {
'created_at': classifier_set.created_at,
'algorithm_id': classifier_set.algorithm_id,
'course_id': classifier_set.course_id,
'item_id': classifier_set.item_id
}
else:
return None
except InvalidRubric:
msg = u"Could not retrieve classifier set info: the rubric definition was not valid."
logger.exception(msg)
raise AIGradingRequestError(msg)
except DatabaseError as ex:
msg = u"An unexpected error occurred while retrieving classifier set info: {ex}".format(ex=ex)
logger.exception(msg)
raise AIGradingInternalError(msg)
...@@ -3,8 +3,9 @@ Errors related to AI assessment. ...@@ -3,8 +3,9 @@ Errors related to AI assessment.
""" """
from celery.exceptions import InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound from celery.exceptions import InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound
from socket import error as socket_error
ANTICIPATED_CELERY_ERRORS = (InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound) ANTICIPATED_CELERY_ERRORS = (InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound, socket_error)
class AIError(Exception): class AIError(Exception):
""" """
...@@ -52,4 +53,4 @@ class AIReschedulingInternalError(AIError): ...@@ -52,4 +53,4 @@ class AIReschedulingInternalError(AIError):
""" """
An unexpected error occurred while using the AI assessment API. An unexpected error occurred while using the AI assessment API.
""" """
pass pass
\ No newline at end of file
...@@ -4,18 +4,15 @@ Database models for AI assessment. ...@@ -4,18 +4,15 @@ Database models for AI assessment.
from uuid import uuid4 from uuid import uuid4
import json import json
import logging import logging
import itertools
from django.conf import settings from django.conf import settings
from django.core.files.base import ContentFile from django.core.files.base import ContentFile
from django.core.cache import cache from django.core.cache import cache
from django.db import models, transaction, DatabaseError from django.db import models, transaction, DatabaseError
from django.utils.timezone import now from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from dogapi import dog_stats_api from dogapi import dog_stats_api
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.serializers import rubric_from_dict from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.errors.ai import AIReschedulingInternalError
from .base import Rubric, Criterion, Assessment, AssessmentPart from .base import Rubric, Criterion, Assessment, AssessmentPart
from .training import TrainingExample from .training import TrainingExample
...@@ -177,6 +174,84 @@ class AIClassifierSet(models.Model): ...@@ -177,6 +174,84 @@ class AIClassifierSet(models.Model):
return classifier_set return classifier_set
@classmethod
def most_recent_classifier_set(cls, rubric, algorithm_id, course_id, item_id):
"""
Finds the most relevant classifier set based on the following line of succession:
1 -- Classifier sets with the same COURSE, ITEM, RUBRIC *content* hash, and ALGORITHM
- Newest first. If none exist...
2 -- Classifier sets with the same COURSE, ITEM, and RUBRIC *structure* hash, and ALGORITHM.
- Newest first. If none exist...
3 -- The newest classifier set with the same RUBRIC and ALGORITHM
- Newest first. If none exist...
4 -- Do no assignment and return False
Case #1 is ideal: we get a classifier set trained for the rubric as currently defined.
Case #2 handles when a course author makes a cosmetic change to a rubric after training.
We don't want to stop grading students because an author fixed a typo!
Case #3 handles problems that are duplicated, such as the default problem prompt.
If we've already trained classifiers for the identical rubric somewhere else,
then the author can use them to test out the feature immediately.
Case #4: Someone will need to schedule training; however, we will still accept
student submissions and grade them once training completes.
Args:
rubric (Rubric): The rubric associated with the classifier set.
algorithm_id (unicode): The algorithm used to create the classifier set.
course_id (unicode): The course identifier for the current problem.
item_id (unicode): The item identifier for the current problem.
Returns:
ClassifierSet or None
Raises:
DatabaseError
"""
# List of the parameters we will search for, in order of decreasing priority
search_parameters = [
# Case #1: same course / item / rubric (exact) / algorithm
{
'rubric__content_hash': rubric.content_hash,
'algorithm_id': algorithm_id,
'course_id': course_id,
'item_id': item_id
},
# Case #2: same course / item / rubric (structure only) / algorithm
{
'rubric__structure_hash': rubric.structure_hash, # pylint: disable=E1101
'algorithm_id': algorithm_id,
'course_id': course_id,
'item_id': item_id
},
# Case #3: same rubric (exact) / algorithm
{
'rubric__content_hash': rubric.content_hash,
'algorithm_id': algorithm_id
}
]
# Perform each query, starting with the highest priority
for params in search_parameters:
# Retrieve the most recent classifier set that matches our query
# (rely on implicit ordering in the model definition)
classifier_set_candidates = cls.objects.filter(**params)[:1]
# If we find a classifier set,
# then associate the most recent classifiers with it and return true
if len(classifier_set_candidates) > 0:
return classifier_set_candidates[0]
# If we get to this point, no classifiers exist with this rubric and algorithm.
return None
@property @property
def classifiers_dict(self): def classifiers_dict(self):
""" """
...@@ -564,27 +639,7 @@ class AIGradingWorkflow(AIWorkflow): ...@@ -564,27 +639,7 @@ class AIGradingWorkflow(AIWorkflow):
def assign_most_recent_classifier_set(self): def assign_most_recent_classifier_set(self):
""" """
Finds the most relevant classifier set based on the following line of succession: Find the most recent classifier set and assign it to this workflow.
1 -- Classifier sets with the same COURSE, ITEM, RUBRIC *content* hash, and ALGORITHM
- Newest first. If none exist...
2 -- Classifier sets with the same COURSE, ITEM, and RUBRIC *structure* hash, and ALGORITHM.
- Newest first. If none exist...
3 -- The newest classifier set with the same RUBRIC and ALGORITHM
- Newest first. If none exist...
4 -- Do no assignment and return False
Case #1 is ideal: we get a classifier set trained for the rubric as currently defined.
Case #2 handles when a course author makes a cosmetic change to a rubric after training.
We don't want to stop grading students because an author fixed a typo!
Case #3 handles problems that are duplicated, such as the default problem prompt.
If we've already trained classifiers for the identical rubric somewhere else,
then the author can use them to test out the feature immediately.
Case #4: Someone will need to schedule training; however, we will still accept
student submissions and grade them once training completes.
Returns: Returns:
(bool) indicates whether or not classifiers were able to be assigned to the AIGradingWorkflow (bool) indicates whether or not classifiers were able to be assigned to the AIGradingWorkflow
...@@ -592,47 +647,13 @@ class AIGradingWorkflow(AIWorkflow): ...@@ -592,47 +647,13 @@ class AIGradingWorkflow(AIWorkflow):
Raises: Raises:
DatabaseError DatabaseError
""" """
# List of the parameters we will search for, in order of decreasing priority classifier_set = AIClassifierSet.most_recent_classifier_set(
search_parameters = [ self.rubric, self.algorithm_id, self.course_id, self.item_id
# Case #1: same course / item / rubric (exact) / algorithm )
{ if classifier_set is not None:
'rubric__content_hash': self.rubric.content_hash, self.classifier_set = classifier_set
'algorithm_id': self.algorithm_id, self.save()
'course_id': self.course_id, return classifier_set is not None
'item_id': self.item_id
},
# Case #2: same course / item / rubric (structure only) / algorithm
{
'rubric__structure_hash': self.rubric.structure_hash, # pylint: disable=E1101
'algorithm_id': self.algorithm_id,
'course_id': self.course_id,
'item_id': self.item_id
},
# Case #3: same rubric (exact) / algorithm
{
'rubric__content_hash': self.rubric.content_hash,
'algorithm_id': self.algorithm_id
}
]
# Perform each query, starting with the highest priority
for params in search_parameters:
# Retrieve the most recent classifier set that matches our query
# (rely on implicit ordering in the model definition)
classifier_set_candidates = AIClassifierSet.objects.filter(**params)[:1]
# If we find a classifier set,
# then associate the most recent classifiers with it and return true
if len(classifier_set_candidates) > 0:
self.classifier_set = classifier_set_candidates[0]
self.save()
return True
# If we get to this point, no classifiers exist with this rubric and algorithm.
return False
@classmethod @classmethod
@transaction.commit_on_success @transaction.commit_on_success
......
...@@ -5,17 +5,15 @@ Tests for AI assessment. ...@@ -5,17 +5,15 @@ Tests for AI assessment.
import copy import copy
import mock import mock
from nose.tools import raises from nose.tools import raises
from celery.exceptions import NotConfigured, InvalidTaskError from celery.exceptions import NotConfigured
from django.db import DatabaseError from django.db import DatabaseError
from django.test.utils import override_settings from django.test.utils import override_settings
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.api import ai as ai_api from openassessment.assessment.api import ai as ai_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
AITrainingWorkflow, AIGradingWorkflow, AIClassifierSet, Assessment AITrainingWorkflow, AIGradingWorkflow, AIClassifierSet, Assessment
) )
from openassessment.assessment.models import AITrainingWorkflow, AIGradingWorkflow, AIClassifierSet from openassessment.assessment.models import AITrainingWorkflow, AIGradingWorkflow, AIClassifierSet
from openassessment.assessment.worker.algorithm import AIAlgorithm, AIAlgorithmError from openassessment.assessment.worker.algorithm import AIAlgorithm, AIAlgorithmError
from openassessment.assessment.serializers import rubric_from_dict from openassessment.assessment.serializers import rubric_from_dict
...@@ -531,3 +529,68 @@ class AIAutomaticGradingTest(CacheResetTest): ...@@ -531,3 +529,68 @@ class AIAutomaticGradingTest(CacheResetTest):
return False return False
except StopIteration: except StopIteration:
return True return True
class AIClassifierInfoTest(CacheResetTest):
"""
Tests for retrieving info about classifier sets.
"""
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_no_classifier_set(self):
classifier_info = ai_api.get_classifier_set_info(
RUBRIC, ALGORITHM_ID, 'test_course', 'test_item'
)
self.assertIs(classifier_info, None)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_classifier_set_info(self):
workflow_uuid = ai_api.train_classifiers(
RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID
)
classifier_info = ai_api.get_classifier_set_info(
RUBRIC, ALGORITHM_ID, 'test_course', 'test_item'
)
# Retrieve the classifier set so we can get its actual creation date
workflow = AITrainingWorkflow.objects.get(uuid=workflow_uuid)
classifier_set = workflow.classifier_set
expected_info = {
'created_at': classifier_set.created_at,
'algorithm_id': ALGORITHM_ID,
'course_id': 'test_course',
'item_id': 'test_item'
}
self.assertEqual(classifier_info, expected_info)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_multiple_classifier_sets(self):
# Train multiple classifiers
ai_api.train_classifiers(
RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID
)
second_uuid = ai_api.train_classifiers(
RUBRIC, EXAMPLES, 'test_course', 'test_item', ALGORITHM_ID
)
# Expect that we get the info for the second classifier
classifier_info = ai_api.get_classifier_set_info(
RUBRIC, ALGORITHM_ID, 'test_course', 'test_item'
)
workflow = AITrainingWorkflow.objects.get(uuid=second_uuid)
classifier_set = workflow.classifier_set
self.assertEqual(classifier_info['created_at'], classifier_set.created_at)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@raises(AIGradingInternalError)
@mock.patch.object(AIClassifierSet, 'most_recent_classifier_set')
def test_database_error(self, mock_call):
mock_call.side_effect = DatabaseError('OH NO!')
ai_api.get_classifier_set_info(
RUBRIC, ALGORITHM_ID, 'test_course', 'test_item'
)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@raises(AIGradingRequestError)
def test_invalid_rubric_error(self):
invalid_rubric = {}
ai_api.get_classifier_set_info(invalid_rubric, ALGORITHM_ID, 'test_course', 'test_item')
...@@ -79,6 +79,38 @@ ...@@ -79,6 +79,38 @@
</div> </div>
{% if display_schedule_training %} {% if display_schedule_training %}
<div class="staff-info__classifierset ui-staff__content__section">
{% if classifierset %}
<table class="staff-info__classifierset__table" summary="{% trans "Classifier set" %}">
<caption class="title">{% trans "Classifier set" %}</caption>
<thead>
<th abbr="Field" scope="col">{% trans "Field" %}</th>
<th abbr="Value" scope="col">{% trans "Value" %}</th>
</thead>
<tbody>
<tr>
<td class="value">{% trans "Created at" %}</td>
<td class="value">{{ classifierset.created_at }}</td>
</tr>
<tr>
<td class="value">{% trans "Algorithm ID" %}</td>
<td class="value">{{ classifierset.algorithm_id }}</td>
</tr>
<tr>
<td class="value">{% trans "Course ID" %}</td>
<td class="value">{{ classifierset.course_id }}</td>
</tr>
<tr>
<td class="value">{% trans "Item ID" %}</td>
<td class="value">{{ classifierset.item_id }}</td>
</tr>
</tbody>
</table>
{% else %}
{% trans "No classifiers are available for this problem" %}
{% endif %}
</div>
<div class="staff-info__status ui-staff__content__section"> <div class="staff-info__status ui-staff__content__section">
<a aria-role="button" href="" id="schedule_training" class="action--submit"><span class="copy">{% trans "Schedule Example Based Assessment Training" %}</span></a> <a aria-role="button" href="" id="schedule_training" class="action--submit"><span class="copy">{% trans "Schedule Example Based Assessment Training" %}</span></a>
<div id="schedule_training_message"></div> <div id="schedule_training_message"></div>
......
...@@ -3,22 +3,78 @@ The Staff Info View mixin renders all the staff-specific information used to ...@@ -3,22 +3,78 @@ The Staff Info View mixin renders all the staff-specific information used to
determine the flow of the problem. determine the flow of the problem.
""" """
import copy import copy
from functools import wraps
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.errors.ai import AIError, AIGradingInternalError, AITrainingInternalError from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_rubric_dict, convert_training_examples_list_to_dict from openassessment.xblock.data_conversion import (
create_rubric_dict, convert_training_examples_list_to_dict
)
from submissions import api as submission_api from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api from openassessment.assessment.api import ai as ai_api
def require_global_admin(error_msg):
"""
Method decorator to restrict access to an XBlock handler
to only global staff.
Args:
error_msg (unicode): The error message to display to the user
if they do not have sufficient permissions.
Returns:
Decorated function
"""
def _decorator(func): # pylint: disable=C0111
@wraps(func)
def _wrapped(xblock, *args, **kwargs): # pylint: disable=C0111
if not xblock.is_admin or xblock.in_studio_preview:
return {'success': False, 'msg': unicode(error_msg)}
else:
return func(xblock, *args, **kwargs)
return _wrapped
return _decorator
def require_course_staff(error_msg):
"""
Method decorator to restrict access to an XBlock render
method to only course staff.
Args:
error_msg (unicode): The error message to display to the user
if they do not have sufficient permissions.
Returns:
decorated function
"""
def _decorator(func): # pylint: disable=C0111
@wraps(func)
def _wrapped(xblock, *args, **kwargs): # pylint: disable=C0111
if not xblock.is_course_staff or xblock.in_studio_preview:
return xblock.render_error(unicode(error_msg))
else:
return func(xblock, *args, **kwargs)
return _wrapped
return _decorator
class StaffInfoMixin(object): class StaffInfoMixin(object):
"""
Display debug information to course and global staff.
"""
@XBlock.handler @XBlock.handler
def render_staff_info(self, data, suffix=''): @require_course_staff(ugettext_lazy(u"You do not have permission to access staff information"))
def render_staff_info(self, data, suffix=''): # pylint: disable=W0613
""" """
Template context dictionary for course staff debug panel. Template context dictionary for course staff debug panel.
...@@ -26,12 +82,6 @@ class StaffInfoMixin(object): ...@@ -26,12 +82,6 @@ class StaffInfoMixin(object):
dict: The template context specific to the course staff debug panel. dict: The template context specific to the course staff debug panel.
""" """
# If we're not course staff, or in preview mode, return nothing for the
# staff info view.
if not self.is_course_staff or self.in_studio_preview:
return self.render_error(_(
u"You do not have permission to access staff information"
))
path, context = self.get_staff_path_and_context() path, context = self.get_staff_path_and_context()
return self.render_assessment(path, context) return self.render_assessment(path, context)
...@@ -42,6 +92,13 @@ class StaffInfoMixin(object): ...@@ -42,6 +92,13 @@ class StaffInfoMixin(object):
context = {} context = {}
path = 'openassessmentblock/staff_debug/staff_debug.html' path = 'openassessmentblock/staff_debug/staff_debug.html'
student_item = self.get_student_item_dict()
# We need to display the new-style locations in the course staff
# info, even if we're using old-style locations internally,
# so course staff can use the locations to delete student state.
context['item_id'] = student_item["item_id"]
# Calculate how many students are in each step of the workflow # Calculate how many students are in each step of the workflow
status_counts, num_submissions = self.get_workflow_status_counts() status_counts, num_submissions = self.get_workflow_status_counts()
context['status_counts'] = status_counts context['status_counts'] = status_counts
...@@ -49,18 +106,21 @@ class StaffInfoMixin(object): ...@@ -49,18 +106,21 @@ class StaffInfoMixin(object):
# Show the schedule training button if example based assessment is # Show the schedule training button if example based assessment is
# configured, and the current user has admin privileges. # configured, and the current user has admin privileges.
assessment = self.get_assessment_module('example-based-assessment') example_based_assessment = self.get_assessment_module('example-based-assessment')
context['display_schedule_training'] = self.is_admin and assessment display_ai_staff_info = (
self.is_admin and
# Show the reschedule tasks button if the user is an administrator and bool(example_based_assessment) and
# is not in studio preview mode and there exists example based assessment not self.in_studio_preview
# as part of the problem definition. )
context['display_reschedule_unfinished_tasks'] = self.is_admin and assessment and not self.in_studio_preview context['display_schedule_training'] = display_ai_staff_info
context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
# We need to display the new-style locations in the course staff if display_ai_staff_info:
# info, even if we're using old-style locations internally, context['classifierset'] = ai_api.get_classifier_set_info(
# so course staff can use the locations to delete student state. create_rubric_dict(self.prompt, self.rubric_criteria),
context['item_id'] = self.get_student_item_dict()["item_id"] example_based_assessment['algorithm_id'],
student_item['course_id'],
student_item['item_id']
)
# Include release/due dates for each step in the problem # Include release/due dates for each step in the problem
context['step_dates'] = list() context['step_dates'] = list()
...@@ -82,13 +142,11 @@ class StaffInfoMixin(object): ...@@ -82,13 +142,11 @@ class StaffInfoMixin(object):
return path, context return path, context
@XBlock.json_handler @XBlock.json_handler
def schedule_training(self, data, suffix=''): @require_global_admin(ugettext_lazy(u"You do not have permission to schedule training"))
if not self.is_admin or self.in_studio_preview: def schedule_training(self, data, suffix=''): # pylint: disable=W0613
return { """
'success': False, Schedule a new training task for example-based grading.
'msg': _(u"You do not have permission to schedule training") """
}
assessment = self.get_assessment_module('example-based-assessment') assessment = self.get_assessment_module('example-based-assessment')
student_item_dict = self.get_student_item_dict() student_item_dict = self.get_student_item_dict()
...@@ -105,12 +163,12 @@ class StaffInfoMixin(object): ...@@ -105,12 +163,12 @@ class StaffInfoMixin(object):
return { return {
'success': True, 'success': True,
'workflow_uuid': workflow_uuid, 'workflow_uuid': workflow_uuid,
'msg': _(u"Training scheduled with new Workflow UUID: {}".format(workflow_uuid)) 'msg': _(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid))
} }
except AIError as err: except AIError as err:
return { return {
'success': False, 'success': False,
'msg': _(u"An error occurred scheduling classifier training {}".format(err)) 'msg': _(u"An error occurred scheduling classifier training: {error}".format(error=err))
} }
else: else:
...@@ -120,7 +178,8 @@ class StaffInfoMixin(object): ...@@ -120,7 +178,8 @@ class StaffInfoMixin(object):
} }
@XBlock.handler @XBlock.handler
def render_student_info(self, data, suffix=''): @require_course_staff(ugettext_lazy(u"You do not have permission to access student information."))
def render_student_info(self, data, suffix=''): # pylint: disable=W0613
""" """
Renders all relative information for a specific student's workflow. Renders all relative information for a specific student's workflow.
...@@ -130,14 +189,6 @@ class StaffInfoMixin(object): ...@@ -130,14 +189,6 @@ class StaffInfoMixin(object):
Must be course staff to render this view. Must be course staff to render this view.
""" """
# If request does not come from course staff, return nothing.
# This should not be able to happen unless someone attempts to
# explicitly invoke this handler.
if not self.is_course_staff or self.in_studio_preview:
return self.render_error(_(
u"You do not have permission to access student information."
))
path, context = self.get_student_info_path_and_context(data) path, context = self.get_student_info_path_and_context(data)
return self.render_assessment(path, context) return self.render_assessment(path, context)
...@@ -197,7 +248,8 @@ class StaffInfoMixin(object): ...@@ -197,7 +248,8 @@ class StaffInfoMixin(object):
return path, context return path, context
@XBlock.json_handler @XBlock.json_handler
def reschedule_unfinished_tasks(self, data, suffix=''): @require_global_admin(ugettext_lazy(u"You do not have permission to reschedule tasks."))
def reschedule_unfinished_tasks(self, data, suffix=''): # pylint: disable=W0613
""" """
Wrapper which invokes the API call for rescheduling grading tasks. Wrapper which invokes the API call for rescheduling grading tasks.
...@@ -215,14 +267,6 @@ class StaffInfoMixin(object): ...@@ -215,14 +267,6 @@ class StaffInfoMixin(object):
'success': (bool) Indicates whether or not the tasks were rescheduled successfully 'success': (bool) Indicates whether or not the tasks were rescheduled successfully
'msg': The response to the server (could be error message or success message) 'msg': The response to the server (could be error message or success message)
""" """
# Verifies permissions after the push of the button is made
if not self.is_admin or self.in_studio_preview:
return {
'success': False,
'msg': _(u"You do not have permission to reschedule tasks.")
}
# Identifies the course and item that will need to be re-run # Identifies the course and item that will need to be re-run
student_item_dict = self.get_student_item_dict() student_item_dict = self.get_student_item_dict()
course_id = student_item_dict.get('course_id') course_id = student_item_dict.get('course_id')
......
...@@ -84,11 +84,11 @@ ...@@ -84,11 +84,11 @@
} }
// UI - status (table) // UI - status (table)
.staff-info__status { .staff-info__status, .staff-info__classifierset {
} }
.staff-info__status__table { .staff-info__status__table, .staff-info__classifierset__table {
@extend %copy-3; @extend %copy-3;
border-radius: ($baseline-v/10); border-radius: ($baseline-v/10);
color: $copy-staff-color; color: $copy-staff-color;
...@@ -132,4 +132,5 @@ ...@@ -132,4 +132,5 @@
} }
} }
} }
# coding=utf-8 # coding=utf-8
from collections import namedtuple from collections import namedtuple
import pytz
import json import json
import datetime
from mock import Mock, patch from mock import Mock, patch
from django.test.utils import override_settings from django.test.utils import override_settings
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api from openassessment.assessment.api import ai as ai_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors.ai import AIError, AIGradingInternalError, AITrainingInternalError from openassessment.assessment.errors.ai import AIError, AIGradingInternalError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.xblock.test.base import scenario, XBlockHandlerTestCase from openassessment.xblock.test.base import scenario, XBlockHandlerTestCase
# Test dependency on Stub AI Algorithm configuration
from openassessment.assessment.test.test_ai import ( ALGORITHM_ID = 'fake'
ALGORITHM_ID, AI_ALGORITHMS, train_classifiers
) AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
STUDENT_ITEM = dict( STUDENT_ITEM = dict(
student_id="Bob", student_id="Bob",
...@@ -32,45 +34,6 @@ ASSESSMENT_DICT = { ...@@ -32,45 +34,6 @@ ASSESSMENT_DICT = {
}, },
} }
EXAMPLE_BASED_ASSESSMENT = {
"name": "example-based-assessment",
"algorithm_id": "1",
"examples": [
{
"answer": "Foo",
"options_selected": [
{
"criterion": "Ideas",
"option": "Fair"
},
{
"criterion": "Content",
"option": "Good"
}
]
},
{
"answer": "Bar",
"options_selected": [
{
"criterion": "Ideas",
"option": "Poor"
},
{
"criterion": "Content",
"option": "Good"
}
]
}
]
}
# Rubric-specific classifier score override
CLASSIFIER_SCORE_OVERRIDES = {
u"Ideas": {'score_override': 1},
u"Content": {'score_override': 2}
}
class TestCourseStaff(XBlockHandlerTestCase): class TestCourseStaff(XBlockHandlerTestCase):
""" """
...@@ -261,10 +224,10 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -261,10 +224,10 @@ class TestCourseStaff(XBlockHandlerTestCase):
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/example_based_assessment.xml', user_id='Bob') @scenario('data/example_based_assessment.xml', user_id='Bob')
def test_staff_debug_student_info_full_workflow(self, xblock): def test_staff_debug_student_info_full_workflow(self, xblock):
# Train classifiers. # Simulate that we are course staff
example_based_assessment = xblock.get_assessment_module('example-based-assessment') xblock.xmodule_runtime = self._create_mock_runtime(
example_based_assessment['algorithm_id'] = ALGORITHM_ID xblock.scope_ids.usage_id, True, False, "Bob"
train_classifiers({'criteria': xblock.rubric_criteria}, CLASSIFIER_SCORE_OVERRIDES) )
# Commonly chosen options for assessments # Commonly chosen options for assessments
options_selected = { options_selected = {
...@@ -272,13 +235,9 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -272,13 +235,9 @@ class TestCourseStaff(XBlockHandlerTestCase):
"Content": "Poor", "Content": "Poor",
} }
# Simulate that we are course staff
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob"
)
bob_item = STUDENT_ITEM.copy() bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"}) submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.on_start(submission["uuid"]) peer_api.on_start(submission["uuid"])
...@@ -318,7 +277,6 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -318,7 +277,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario('data/example_based_assessment.xml', user_id='Bob') @scenario('data/example_based_assessment.xml', user_id='Bob')
def test_display_schedule_training(self, xblock): def test_display_schedule_training(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob" xblock.scope_ids.usage_id, True, True, "Bob"
) )
...@@ -329,20 +287,17 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -329,20 +287,17 @@ class TestCourseStaff(XBlockHandlerTestCase):
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/example_based_assessment.xml', user_id='Bob') @scenario('data/example_based_assessment.xml', user_id='Bob')
def test_schedule_training(self, xblock): def test_schedule_training(self, xblock):
example_based_assessment = xblock.get_assessment_module('example-based-assessment')
example_based_assessment['algorithm_id'] = ALGORITHM_ID
train_classifiers({'criteria': xblock.rubric_criteria}, CLASSIFIER_SCORE_OVERRIDES)
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob" xblock.scope_ids.usage_id, True, True, "Bob"
) )
# Schedule training
response = self.request(xblock, 'schedule_training', json.dumps({}), response_format='json') response = self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
self.assertTrue(response['success'], msg=response.get('msg')) self.assertTrue(response['success'], msg=response.get('msg'))
self.assertTrue('workflow_uuid' in response) self.assertTrue('workflow_uuid' in response)
@scenario('data/example_based_assessment.xml', user_id='Bob') @scenario('data/example_based_assessment.xml', user_id='Bob')
def test_not_displaying_schedule_training(self, xblock): def test_not_displaying_schedule_training(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob" xblock.scope_ids.usage_id, True, False, "Bob"
) )
...@@ -363,7 +318,6 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -363,7 +318,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario('data/example_based_assessment.xml', user_id='Bob') @scenario('data/example_based_assessment.xml', user_id='Bob')
def test_admin_schedule_training_error(self, xblock, mock_api): def test_admin_schedule_training_error(self, xblock, mock_api):
mock_api.side_effect = AIError("Oh no!") mock_api.side_effect = AIError("Oh no!")
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob" xblock.scope_ids.usage_id, True, True, "Bob"
) )
...@@ -373,7 +327,6 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -373,7 +327,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario('data/example_based_assessment.xml', user_id='Bob') @scenario('data/example_based_assessment.xml', user_id='Bob')
def test_display_reschedule_unfinished_grading_tasks(self, xblock): def test_display_reschedule_unfinished_grading_tasks(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob" xblock.scope_ids.usage_id, True, True, "Bob"
) )
...@@ -425,6 +378,45 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -425,6 +378,45 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertFalse(response['success']) self.assertFalse(response['success'])
self.assertTrue('not configured' in response['msg']) self.assertTrue('not configured' in response['msg'])
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_classifier_set_info(self, xblock):
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob"
)
# Initially, there should be no classifier set info
# because we haven't trained any classifiers for this problem
__, context = xblock.get_staff_path_and_context()
self.assertIn('classifierset', context)
self.assertIs(context['classifierset'], None)
# Schedule a training task, which should create classifiers
response = self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
self.assertTrue(response['success'], msg=response.get('msg'))
# Now classifier info should be available in the context
__, context = xblock.get_staff_path_and_context()
self.assertIn('classifierset', context)
self.assertTrue(isinstance(context['classifierset']['created_at'], datetime.datetime))
self.assertEqual(context['classifierset']['algorithm_id'], ALGORITHM_ID)
self.assertEqual(context['classifierset']['course_id'], xblock.get_student_item_dict()['course_id'])
self.assertEqual(context['classifierset']['item_id'], xblock.get_student_item_dict()['item_id'])
# Verify that the classifier set appears in the rendered template
resp = self.request(xblock, 'render_staff_info', json.dumps({}))
self.assertIn("classifier set", resp.decode('utf-8').lower())
self.assertIn(ALGORITHM_ID, resp.decode('utf-8'))
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_classifier_set_info_hidden_for_course_staff(self, xblock):
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob"
)
__, context = xblock.get_staff_path_and_context()
self.assertNotIn('classifierset', context)
def _create_mock_runtime(self, item_id, is_staff, is_admin, anonymous_user_id): def _create_mock_runtime(self, item_id, is_staff, is_admin, anonymous_user_id):
mock_runtime = Mock( mock_runtime = Mock(
course_id='test_course', course_id='test_course',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment