Commit 9802ff82 by gradyward

Added in regrading functionality

Also added automatic grading after training completes

Many code review edits
parent d9f28950
......@@ -128,9 +128,8 @@ class AIGradingWorkflowAdmin(admin.ModelAdmin):
class AITrainingWorkflowAdmin(admin.ModelAdmin):
list_display = ('uuid',)
# TODO -- update to include student/item/course id
search_fields = ('uuid',)
readonly_fields = ('uuid',)
search_fields = ('uuid', 'course_id', 'item_id',)
readonly_fields = ('uuid', 'course_id', 'item_id',)
class AIClassifierInline(admin.TabularInline):
......
......@@ -5,12 +5,11 @@ import logging
from django.db import DatabaseError
from submissions import api as sub_api
from openassessment.assessment.serializers import (
deserialize_training_examples, InvalidTrainingExample, InvalidRubric,
full_assessment_dict
deserialize_training_examples, InvalidTrainingExample, InvalidRubric, full_assessment_dict
)
from openassessment.assessment.errors import (
AITrainingRequestError, AITrainingInternalError,
AIGradingRequestError, AIGradingInternalError
AIGradingRequestError, AIGradingInternalError, AIError
)
from openassessment.assessment.models import (
Assessment, AITrainingWorkflow, AIGradingWorkflow,
......@@ -206,10 +205,12 @@ def get_latest_assessment(submission_uuid):
return None
def train_classifiers(rubric_dict, examples, algorithm_id):
def train_classifiers(rubric_dict, examples, course_id, item_id, algorithm_id):
"""
Schedule a task to train classifiers.
All training examples must match the rubric!
After training of classifiers completes successfully, all AIGradingWorkflows that are incomplete will be
automatically rescheduled to complete.
Args:
rubric_dict (dict): The rubric used to assess the classifiers.
......@@ -225,6 +226,7 @@ def train_classifiers(rubric_dict, examples, algorithm_id):
Raises:
AITrainingRequestError
AITrainingInternalError
AIGradingInternalError
Example usage:
>>> train_classifiers(rubric, examples, 'ease')
......@@ -240,7 +242,7 @@ def train_classifiers(rubric_dict, examples, algorithm_id):
# Create the workflow model
try:
workflow = AITrainingWorkflow.start_workflow(examples, algorithm_id)
workflow = AITrainingWorkflow.start_workflow(examples, course_id, item_id, algorithm_id)
except NoTrainingExamples as ex:
raise AITrainingRequestError(ex)
except:
......@@ -258,31 +260,70 @@ def train_classifiers(rubric_dict, examples, algorithm_id):
u"Scheduled training task for the AI training workflow with UUID {workflow_uuid} "
u"(algorithm ID = {algorithm_id})"
).format(workflow_uuid=workflow.uuid, algorithm_id=algorithm_id))
except:
except (AITrainingInternalError, AITrainingRequestError):
msg = (
u"An unexpected error occurred while scheduling "
u"the task for training workflow with UUID {}"
).format(workflow.uuid)
logger.exception(msg)
raise AITrainingInternalError(msg)
except AIGradingInternalError:
# If we have an error that is coming from the rescheduled grading after successful completion:
msg = (
u"An unexpected error occurred while scheduling incomplete grading workflows after "
u"the training task was completed successfully. The course_id and item_id for the failed "
u"grading workflows are course_id={cid}, item_id={iid}."
).format(cid=course_id, iid=item_id)
logger.exception(msg)
raise AIGradingInternalError(msg)
# Return the workflow UUID
return workflow.uuid
def reschedule_unfinished_tasks(course_id=None, item_id=None, task_type=None):
def reschedule_unfinished_tasks(course_id=None, item_id=None, task_type=u"grade"):
"""
Check for unfinished tasks (both grading and training) and reschedule them.
Optionally restrict by course/item ID and task type.
Optionally restrict by course/item ID and task type. Default use case is to
only reschedule the unfinished grade tasks. Applied use case (with button in
staff mixin) is to call without argument, and to reschedule grades only.
Kwargs:
course_id (unicode): Restrict to unfinished tasks in a particular course.
item_id (unicode): Restrict to unfinished tasks for a particular item in a course.
NOTE: if you specify the item ID, you must also specify the course ID.
task_type (unicode): Either "grade" or "train". Restrict to unfinished tasks of this type.
if task_type is specified as None, both training and grading will be rescheduled, in that order.
Raises:
AIGradingInternalError
AITrainingInternalError
AIError
"""
pass
if course_id is None or item_id is None:
msg = u"Rescheduling tasks was not possible because the course_id / item_id was not assigned."
logger.exception(msg)
raise AIError
# Reschedules all of the training tasks
if task_type == u"train" or task_type is None:
try:
training_tasks.reschedule_training_tasks.apply_async(args=[course_id, item_id])
except Exception as ex:
msg = (
u"Rescheduling training tasks for course {cid} and item {iid} failed with exception: {ex}"
).format(cid=course_id, iid=item_id, ex=ex)
logger.exception(msg)
raise AITrainingInternalError(ex)
# Reschedules all of the grading tasks
if task_type == u"grade" or task_type is None:
try:
grading_tasks.reschedule_grading_tasks.apply_async(args=[course_id, item_id])
except Exception as ex:
msg = (
u"Rescheduling grading tasks for course {cid} and item {iid} failed with exception: {ex}"
).format(cid=course_id, iid=item_id, ex=ex)
logger.exception(msg)
raise AIGradingInternalError(ex)
......@@ -30,6 +30,8 @@ def get_grading_task_params(grading_workflow_uuid):
dict with keys:
* essay_text (unicode): The text of the essay submission.
* classifier_set (dict): Maps criterion names to serialized classifiers.
* course_id (unicode): The course ID that the training task is associated with.
* item_id (unicode): Identifies the item that the AI will be training to grade.
* algorithm_id (unicode): ID of the algorithm used to perform training.
Raises:
......@@ -66,6 +68,8 @@ def get_grading_task_params(grading_workflow_uuid):
return {
'essay_text': workflow.essay_text,
'classifier_set': classifier_set.classifiers_dict,
'course_id': workflow.course_id,
'item_id': workflow.item_id,
'algorithm_id': workflow.algorithm_id,
}
except Exception as ex:
......@@ -142,6 +146,8 @@ def get_training_task_params(training_workflow_uuid):
Returns:
dict with keys:
* training_examples (list of dict): The examples used to train the classifiers.
* course_id (unicode): The course ID that the training task is associated with.
* item_id (unicode): Identifies the item that the AI will be training to grade.
* algorithm_id (unicode): The ID of the algorithm to use for training.
Raises:
......@@ -194,7 +200,9 @@ def get_training_task_params(training_workflow_uuid):
return {
'training_examples': returned_examples,
'algorithm_id': workflow.algorithm_id
'algorithm_id': workflow.algorithm_id,
'course_id': workflow.course_id,
'item_id': workflow.item_id
}
except AITrainingWorkflow.DoesNotExist:
msg = (
......
......@@ -3,19 +3,24 @@ Database models for AI assessment.
"""
from uuid import uuid4
import json
import logging
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models, transaction
from django.db import models, transaction, DatabaseError
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
from django_extensions.db.fields import UUIDField
from submissions import api as sub_api
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.errors.ai import AIError
from .base import Rubric, Criterion, Assessment, AssessmentPart
from .training import TrainingExample
AI_ASSESSMENT_TYPE = "AI"
logger = logging.getLogger(__name__)
class IncompleteClassifierSet(Exception):
"""
......@@ -251,6 +256,13 @@ class AIWorkflow(models.Model):
# Unique identifier used to track this workflow
uuid = UUIDField(version=1, db_index=True)
# Course Entity and Item Discriminator
# Though these items are duplicated in the database tables for the submissions app,
# and every workflow has a reference to a submission entry, this is okay because
# submissions are immutable.
course_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
# Timestamps
# The task is *scheduled* as soon as a client asks the API to
# train classifiers.
......@@ -296,6 +308,42 @@ class AIWorkflow(models.Model):
self.completed_at = now()
self.save()
@classmethod
def get_incomplete_workflows(cls, course_id, item_id):
"""
Gets all incomplete grading workflows for a given course and item.
Args:
course_id (unicode): Uniquely identifies the course
item_id (unicode): The discriminator for the item we are looking for
Yields:
All incomplete workflows for this item, as a delayed "stream"
Raises:
DatabaseError
cls.DoesNotExist
"""
# Finds all of the uuid's for workflows contained within the query
grade_workflow_uuids = [
wflow['uuid'] for wflow in cls.objects.filter(
course_id=course_id, item_id=item_id, completed_at__isnull=True
).values('uuid')
]
# Continues to generate output until all workflows in the queryset have been output
for workflow_uuid in grade_workflow_uuids:
# Returns the grading workflow associated with the uuid stored in the initial query
try:
grading_workflow = cls.objects.get(uuid=workflow_uuid)
yield grading_workflow
except (cls.DoesNotExist, ObjectDoesNotExist, DatabaseError) as ex:
msg = u"No workflow with uuid '{}' could be found within the system.".format(workflow_uuid)
logger.exception(msg)
raise AIError(ex)
class AITrainingWorkflow(AIWorkflow):
"""
......@@ -318,12 +366,14 @@ class AITrainingWorkflow(AIWorkflow):
@classmethod
@transaction.commit_on_success
def start_workflow(cls, examples, algorithm_id):
def start_workflow(cls, examples, course_id, item_id, algorithm_id):
"""
Start a workflow to track a training task.
Args:
examples (list of TrainingExample): The training examples used to create the classifiers.
course_id (unicode): The ID for the course that the training workflow is associated with.
item_id (unicode): The ID for the item that the training workflow is training to assess.
algorithm_id (unicode): The ID of the algorithm to use for training.
Returns:
......@@ -336,7 +386,7 @@ class AITrainingWorkflow(AIWorkflow):
if len(examples) == 0:
raise NoTrainingExamples()
workflow = AITrainingWorkflow.objects.create(algorithm_id=algorithm_id)
workflow = AITrainingWorkflow.objects.create(algorithm_id=algorithm_id, item_id=item_id, course_id=course_id)
workflow.training_examples.add(*examples)
workflow.save()
return workflow
......@@ -422,8 +472,6 @@ class AIGradingWorkflow(AIWorkflow):
# associated with one submission, it's safe to duplicate
# this information here from the submissions models.
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=40, db_index=True)
@classmethod
@transaction.commit_on_success
......
......@@ -3,5 +3,5 @@ Celery looks for tasks in this module,
so import the tasks we want the workers to implement.
"""
# pylint:disable=W0611
from .worker.training import train_classifiers
from .worker.grading import grade_essay
from .worker.training import train_classifiers, reschedule_training_tasks
from .worker.grading import grade_essay, reschedule_grading_tasks
\ No newline at end of file
......@@ -48,12 +48,33 @@ class AIWorkerTrainingTest(CacheResetTest):
Tests for the AI API calls a worker would make when
completing a training task.
"""
COURSE_ID = u"sämplë ċöürsë"
ITEM_ID = u"12231"
ALGORITHM_ID = "test-algorithm"
# Classifier data
# Since this is controlled by the AI algorithm implementation,
# we could put anything here as long as it's JSON-serializable.
CLASSIFIERS = {
u"vøȼȺƀᵾłȺɍɏ": {
'name': u'𝒕𝒆𝒔𝒕 𝒄𝒍𝒂𝒔𝒔𝒊𝒇𝒊𝒆𝒓',
'data': u'Öḧ ḷëẗ ẗḧë ṡüṅ ḅëäẗ ḋöẅṅ üṗöṅ ṁÿ ḟäċë, ṡẗäṛṡ ẗö ḟïḷḷ ṁÿ ḋṛëäṁ"'
},
u"ﻭɼค๓๓คɼ": {
'name': u'𝒕𝒆𝒔𝒕 𝒄𝒍𝒂𝒔𝒔𝒊𝒇𝒊𝒆𝒓',
'data': u"І ам а тѓаvэlэѓ оf ъотЂ тімэ аиↁ ѕрасэ, то ъэ шЂэѓэ І Ђаvэ ъээи"
}
}
def setUp(self):
"""
Create a training workflow in the database.
"""
examples = deserialize_training_examples(EXAMPLES, RUBRIC)
workflow = AITrainingWorkflow.start_workflow(examples, ALGORITHM_ID)
workflow = AITrainingWorkflow.start_workflow(examples, self.COURSE_ID, self.ITEM_ID, self.ALGORITHM_ID)
self.workflow_uuid = workflow.uuid
def test_get_training_task_params(self):
......@@ -204,7 +225,9 @@ class AIWorkerGradingTest(CacheResetTest):
expected_params = {
'essay_text': ANSWER,
'classifier_set': CLASSIFIERS,
'algorithm_id': ALGORITHM_ID
'algorithm_id': ALGORITHM_ID,
'course_id': STUDENT_ITEM.get('course_id'),
'item_id': STUDENT_ITEM.get('item_id')
}
self.assertItemsEqual(params, expected_params)
......
......@@ -90,12 +90,28 @@ class AITrainingTaskTest(CeleryTaskTest):
Tests for the training task executed asynchronously by Celery workers.
"""
COURSE_ID = u"10923"
ITEM_ID = u"12231"
ALGORITHM_ID = u"test-stub"
ERROR_STUB_ALGORITHM_ID = u"error-stub"
UNDEFINED_CLASS_ALGORITHM_ID = u"undefined_class"
UNDEFINED_MODULE_ALGORITHM_ID = u"undefined_module"
AI_ALGORITHMS = {
ALGORITHM_ID: '{module}.StubAIAlgorithm'.format(module=__name__),
ERROR_STUB_ALGORITHM_ID: '{module}.ErrorStubAIAlgorithm'.format(module=__name__),
UNDEFINED_CLASS_ALGORITHM_ID: '{module}.NotDefinedAIAlgorithm'.format(module=__name__),
UNDEFINED_MODULE_ALGORITHM_ID: 'openassessment.not.valid.NotDefinedAIAlgorithm'
}
def setUp(self):
"""
Create a training workflow in the database.
"""
examples = deserialize_training_examples(EXAMPLES, RUBRIC)
workflow = AITrainingWorkflow.start_workflow(examples, ALGORITHM_ID)
workflow = AITrainingWorkflow.start_workflow(examples, self.COURSE_ID, self.ITEM_ID, self.ALGORITHM_ID)
self.workflow_uuid = workflow.uuid
def test_unknown_algorithm(self):
......
......@@ -3,10 +3,12 @@ Asynchronous tasks for grading essays using text classifiers.
"""
from celery import task
from django.db import DatabaseError
from celery.utils.log import get_task_logger
from openassessment.assessment.api import ai_worker as ai_worker_api
from openassessment.assessment.errors import AIError
from openassessment.assessment.errors import AIError, AIGradingInternalError, AIGradingRequestError
from .algorithm import AIAlgorithm, AIAlgorithmError
from openassessment.assessment.models.ai import AIClassifierSet, AIGradingWorkflow
MAX_RETRIES = 2
......@@ -87,3 +89,98 @@ def grade_essay(workflow_uuid):
).format(uuid=workflow_uuid, scores=scores_by_criterion)
logger.exception(msg)
raise grade_essay.retry()
@task(max_retries=MAX_RETRIES) # pylint: disable=E1102
def reschedule_grading_tasks(course_id, item_id):
"""
Reschedules all incomplete grading workflows with the specified parameters.
Args:
course_id (unicode): The course item that we will be rerunning the rescheduling on.
item_id (unicode): The item that the rescheduling will be running on
"""
# Finds all incomplete grading workflows
grading_workflows = AIGradingWorkflow.get_incomplete_workflows(course_id, item_id)
# Notes whether or not one or more operations failed. If they did, the process of rescheduling will be retried.
failures = 0
# A dictionary mapping tuples of (rubric, algorithm_id) to completed classifier sets. Used to avoid repeated
# queries which will return the same value. This loop implements a memoization of the the query.
maintained_classifiers = {}
# Try to grade all incomplete grading workflows
for workflow in grading_workflows:
# We will always go through the process of finding the most recent set of classifiers for an
# incomplete grading workflow. The rationale for this is that if we are ever rescheduling
# grading, we likely had classifiers which were not working. This way, we always take the last
# completed set.
# Note that this solution will lead to failure if "Train Classifiers" and "Refinish Grading Tasks"
# are called in rapid succession. This is part of the reason this button is in the admin view.
# Tries to find a set of classifiers that are already defined in our maintained_classifiers based on a
# description of the workflow in the form of a tuple (rubric, algorithm_id)
workflow_description = (workflow.rubric, workflow.algorithm_id)
found_classifiers = maintained_classifiers.get(workflow_description)
# If no set of classifiers is found, we perform the query to try to find them. We take the most recent
# and add it to our dictionary of maintained classifiers for future reference.
if found_classifiers is None:
try:
classifier_set_candidates = AIClassifierSet.objects.filter(
rubric=workflow.rubric, algorithm_id=workflow.algorithm_id
).order_by('-created_at')[:1]
found_classifiers = classifier_set_candidates[0]
maintained_classifiers[workflow_description] = found_classifiers
except IndexError:
msg = u"No classifiers yet exist for essay with uuid='{}'".format(workflow.uuid)
logger.log(msg)
except DatabaseError as ex:
msg = (
u"A Database error occurred while trying to assign classifiers to an essay with uuid='{id}'"
).format(id=workflow.uuid)
logger.exception(msg)
if found_classifiers is not None:
workflow.classifier_set = found_classifiers
try:
workflow.save()
logger.info(
(
u"Classifiers were successfully assigned to grading workflow with uuid={}"
).format(workflow.uuid)
)
except DatabaseError as ex:
msg = (
u"A Database error occurred while trying to save classifiers to an essay with uuid='{id}'"
).format(id=workflow.uuid)
logger.exception(msg)
# Now we should (unless we had an exception above) have a classifier set.
# Try to schedule the grading
try:
grade_essay.apply_async(args=[workflow.uuid])
logger.info(
u"Rescheduling of grading was successful for grading workflow with uuid='{}'".format(workflow.uuid)
)
except (AIGradingInternalError, AIGradingRequestError, AIError) as ex:
msg = (
u"An error occurred while try to grade essay with uuid='{id}': {ex}"
).format(id=workflow.uuid, ex=ex)
logger.exception(msg)
failures += 1
# If one or more of these failed, we want to retry rescheduling. Note that this retry is executed in such a way
# that if it fails, an AIGradingInternalError will be raised with the number of failures on the last attempt (i.e.
# the total number of workflows matching these critera that still have left to be graded).
if failures > 0:
try:
raise AIGradingInternalError(
u"In an attempt to reschedule grading workflows, there were {} failures.".format(failures)
)
except AIGradingInternalError as ex:
raise reschedule_grading_tasks.retry()
\ No newline at end of file
......@@ -7,6 +7,9 @@ from celery.utils.log import get_task_logger
from openassessment.assessment.api import ai_worker as ai_worker_api
from openassessment.assessment.errors import AIError
from .algorithm import AIAlgorithm, AIAlgorithmError
from .grading import reschedule_grading_tasks
from openassessment.assessment.errors.ai import AIGradingInternalError
from openassessment.assessment.models.ai import AITrainingWorkflow
MAX_RETRIES = 2
......@@ -58,6 +61,8 @@ def train_classifiers(workflow_uuid):
params = ai_worker_api.get_training_task_params(workflow_uuid)
examples = params['training_examples']
algorithm_id = params['algorithm_id']
course_id = params['course_id']
item_id = params['item_id']
except (AIError, KeyError):
msg = (
u"An error occurred while retrieving AI training "
......@@ -121,6 +126,45 @@ def train_classifiers(workflow_uuid):
logger.exception(msg)
raise train_classifiers.retry()
# Upon successful completion of the creation of classifiers, we will try to automatically schedule any
# grading tasks for the same item.
try:
reschedule_grading_tasks.apply_async(args=[course_id, item_id])
except AIGradingInternalError as ex:
msg = (
u"An error occured while trying to regrade all ungraded assignments"
u"after classifiers were trained successfully: {}"
).format(ex)
logger.exception(msg)
# Here we don't retry, because they will already retry once in the grading task.
raise
@task(max_retries=MAX_RETRIES) #pylint: disable E=1102
def reschedule_training_tasks(course_id, item_id):
"""
Reschedules all incomplete training tasks
Args:
course_id (unicode): The course that we are going to search for unfinished training workflows
item_id (unicode): The specific item within that course that we will reschedule unfinished workflows for
"""
# Run a query to find the incomplete training workflows
training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id, item_id)
# Tries to train every workflow that has not completed.
for target_workflow in training_workflows:
try:
train_classifiers.apply_async(args=[target_workflow.uuid])
logger.info(
u"Rescheduling of training was successful for workflow with uuid{}".format(target_workflow.uuid)
)
except Exception as ex:
msg = (
u"An unexpected error occurred while scheduling the task for training workflow with UUID {}"
).format(target_workflow.uuid)
logger.exception(msg)
raise reschedule_training_tasks.retry()
def _examples_by_criterion(examples):
"""
......
......@@ -85,6 +85,13 @@
</div>
{% endif %}
{% if display_reschedule_unfinished_tasks %}
<div class="staff-info__status ui-staff__content__section">
<a aria-role="button" href="" id="reschedule_unfinished_tasks" class="action--submit"><span class="copy">{% trans "Reschedule All Unfinished Example Based Assessment Grading Tasks" %}</span></a>
<div id="reschedule_unfinished_tasks_message"></div>
</div>
{% endif %}
<div class="staff-info__student ui-staff__content__section">
<div class="wrapper--input" class="staff-info__student__form">
<form id="openassessment_student_info_form">
......
......@@ -6,7 +6,7 @@ import copy
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment.errors.ai import AIError
from openassessment.assessment.errors.ai import AIError, AIGradingInternalError, AITrainingInternalError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_rubric_dict, convert_training_examples_list_to_dict
from submissions import api as submission_api
......@@ -52,6 +52,11 @@ class StaffInfoMixin(object):
assessment = self.get_assessment_module('example-based-assessment')
context['display_schedule_training'] = self.is_admin and assessment
# Show the reschedule tasks button if the user is an administrator and
# is not in studio preview mode and there exists example based assessment
# as part of the problem definition.
context['display_reschedule_unfinished_tasks'] = self.is_admin and assessment and not self.in_studio_preview
# We need to display the new-style locations in the course staff
# info, even if we're using old-style locations internally,
# so course staff can use the locations to delete student state.
......@@ -82,12 +87,16 @@ class StaffInfoMixin(object):
}
assessment = self.get_assessment_module('example-based-assessment')
student_item_dict = self.get_student_item_dict()
if assessment:
examples = assessment["examples"]
try:
workflow_uuid = ai_api.train_classifiers(
create_rubric_dict(self.prompt, self.rubric_criteria),
convert_training_examples_list_to_dict(examples),
student_item_dict.get('course_id'),
student_item_dict.get('item_id'),
assessment["algorithm_id"]
)
return {
......@@ -179,3 +188,49 @@ class StaffInfoMixin(object):
path = 'openassessmentblock/staff_debug/student_info.html'
return path, context
@XBlock.json_handler
def reschedule_unfinished_tasks(self, data, suffix=''):
"""
Wrapper which invokes the API call for rescheduling grading tasks.
Checks that the requester is an administrator that is not in studio-preview mode,
and that the api-call returns without error. If it returns with an error, (any
exception), the appropriate JSON serializable dictionary with success conditions
is passed back.
Args:
data (not used)
suffix (not used)
Return:
Json serilaizable dict with the following elements:
'success': (bool) Indicates whether or not the tasks were rescheduled successfully
'msg': The response to the server (could be error message or success message)
"""
# Verifies permissions after the push of the button is made
if not self.is_admin or self.in_studio_preview:
return {
'success': False,
'msg': _(u"You do not have permission to reschedule tasks.")
}
# Identifies the course and item that will need to be re-run
student_item_dict = self.get_student_item_dict()
course_id = student_item_dict.get('course_id')
item_id = student_item_dict.get('item_id')
try:
# Note that we only want to recschdule grading tasks, but maintain the potential functionallity
# within the API to also reschedule training tasks.
ai_api.reschedule_unfinished_tasks(course_id=course_id, item_id=item_id, task_type=u"grade")
return {
'success': True,
'msg': _(u"All AI tasks associated with this item have been rescheduled successfully.")
}
except (AIGradingInternalError, AITrainingInternalError, AIError) as ex:
return {
'success': False,
'msg': _(u"An error occurred while rescheduling tasks: {}".format(ex))
}
\ No newline at end of file
......@@ -27,6 +27,15 @@ describe("OpenAssessment.StaffInfoView", function() {
}).promise();
};
this.rescheduleUnfinishedTasks = function() {
var server = this;
return $.Deferred(function(defer) {
defer.resolveWith(server, [server.data]);
}).promise();
};
this.data = {};
};
// Stub base view
......@@ -93,6 +102,7 @@ describe("OpenAssessment.StaffInfoView", function() {
expect(server.scheduleTraining).toHaveBeenCalled();
});
it("Loads staff info if the page contains a course staff section", function() {
// Load the fixture for the container page that DOES include a course staff section
loadFixtures('oa_base_course_staff.html');
......@@ -105,4 +115,45 @@ describe("OpenAssessment.StaffInfoView", function() {
loadFixtures('oa_base.html');
assertStaffInfoAjaxCall(false);
});
it("reschedules training of AI tasks", function() {
server.data = {
"success": true,
"workflow_uuid": "abc123",
"msg": "Great success."
};
var el = $("#openassessment-base").get(0);
var view = new OpenAssessment.StaffInfoView(el, server, baseView);
view.load();
spyOn(server, 'rescheduleUnfinishedTasks').andCallThrough();
// Test the Rescheduling
view.rescheduleUnfinishedTasks();
// Expect that the server was instructed to reschedule Unifinished Taks
expect(server.rescheduleUnfinishedTasks).toHaveBeenCalled();
});
it("reschedules training of AI tasks", function() {
server.data = {
"success": false,
"workflow_uuid": "abc123",
"errMsg": "Stupendous Failure."
};
var el = $("#openassessment-base").get(0);
var view = new OpenAssessment.StaffInfoView(el, server, baseView);
view.load();
spyOn(server, 'rescheduleUnfinishedTasks').andCallThrough();
// Test the Rescheduling
view.rescheduleUnfinishedTasks();
// Expect that the server was instructed to reschedule Unifinished Taks
expect(server.rescheduleUnfinishedTasks).toHaveBeenCalled();
});
});
......@@ -95,6 +95,14 @@ OpenAssessment.StaffInfoView.prototype = {
view.scheduleTraining();
}
);
// Install a click handler for rescheduling unfinished AI tasks for this problem
sel.find('#reschedule_unfinished_tasks').click(
function(eventObject) {
eventObject.preventDefault();
view.rescheduleUnfinishedTasks();
}
);
},
/**
......@@ -111,5 +119,22 @@ OpenAssessment.StaffInfoView.prototype = {
).fail(function(errMsg) {
$('#schedule_training_message', this.element).text(errMsg)
});
},
/**
Begins the process of rescheduling all unfinished grading tasks. This incdludes
checking if the classifiers have been created, and grading any unfinished
student submissions.
**/
rescheduleUnfinishedTasks: function() {
var view = this;
this.server.rescheduleUnfinishedTasks().done(
function(msg) {
$('#reschedule_unfinished_tasks_message', this.element).text(msg)
}
).fail(function(errMsg) {
$('#reschedule_unfinished_tasks_message', this.element).text(errMsg)
});
}
};
......@@ -8,7 +8,7 @@ from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors.ai import AIError
from openassessment.assessment.errors.ai import AIError, AIGradingInternalError, AITrainingInternalError
from submissions import api as sub_api
from openassessment.xblock.test.base import scenario, XBlockHandlerTestCase
# Test dependency on Stub AI Algorithm configuration
......@@ -355,6 +355,51 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertFalse(response['success'])
self.assertTrue('error' in response['msg'])
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_display_reschedule_unfinished_grading_tasks(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob"
)
path, context = xblock.get_staff_path_and_context()
self.assertEquals('openassessmentblock/staff_debug/staff_debug.html', path)
self.assertTrue(context['display_reschedule_unfinished_tasks'])
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_reschedule_unfinished_grading_tasks_no_permissions(self, xblock):
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob"
)
response = self.request(xblock, 'reschedule_unfinished_tasks', json.dumps({}), response_format='json')
self.assertFalse(response['success'])
self.assertTrue('permission' in response['msg'])
@patch.object(ai_api, "reschedule_unfinished_tasks")
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_reschedule_unfinished_grading_tasks_success(self, xblock, mock_api):
mock_api.side_effect = Mock()
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob"
)
response = self.request(xblock, 'reschedule_unfinished_tasks', json.dumps({}), response_format='json')
self.assertTrue(response['success'])
self.assertTrue(u'All' in response['msg'])
mock_api.assert_called_with(
course_id=unicode(STUDENT_ITEM.get('course_id')), item_id=unicode(xblock.scope_ids.usage_id),
task_type=u"grade"
)
@patch.object(ai_api, "reschedule_unfinished_tasks")
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_reschedule_unfinished_grading_tasks_error(self, xblock, mock_api):
mock_api.side_effect = AIGradingInternalError("Oh Noooo!")
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob"
)
response = self.request(xblock, 'reschedule_unfinished_tasks', json.dumps({}), response_format='json')
self.assertFalse(response['success'])
self.assertTrue('error' in response['msg'])
@scenario('data/peer_only_scenario.xml', user_id='Bob')
def test_no_example_based_assessment(self, xblock):
xblock.xmodule_runtime = self._create_mock_runtime(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment