Commit b40204e2 by Will Daly

AI assessment API uses on_init

AI assessment updates workflow when assessment is created
parent 76e9dc56
......@@ -75,7 +75,7 @@ To limit Python tests to a particular Django app:
.. code:: bash
./scripts/test-python.sh openassessment.xblock
./scripts/test-python.sh openassessment/xblock
To run just the JavaScript tests:
......
......@@ -81,7 +81,7 @@ def get_score(submission_uuid, requirements):
}
def submit(submission_uuid, rubric, algorithm_id):
def on_init(submission_uuid, rubric=None, algorithm_id=None):
"""
Submit a response for AI assessment.
This will:
......@@ -90,6 +90,8 @@ def submit(submission_uuid, rubric, algorithm_id):
Args:
submission_uuid (str): The UUID of the submission to assess.
Kwargs:
rubric (dict): Serialized rubric model.
algorithm_id (unicode): Use only classifiers trained with the specified algorithm.
......@@ -108,6 +110,12 @@ def submit(submission_uuid, rubric, algorithm_id):
'10df7db776686822e501b05f452dc1e4b9141fe5'
"""
if rubric is None:
raise AIGradingRequestError(u'No rubric provided')
if algorithm_id is None:
raise AIGradingRequestError(u'No algorithm ID provided')
try:
workflow = AIGradingWorkflow.start_workflow(submission_uuid, rubric, algorithm_id)
except (sub_api.SubmissionNotFoundError, sub_api.SubmissionRequestError) as ex:
......
......@@ -135,6 +135,14 @@ def create_assessment(grading_workflow_uuid, criterion_scores):
logger.exception(msg)
raise AIGradingInternalError(msg)
# Fire a signal to update the workflow API
# This will allow students to receive a score if they're
# waiting on an AI assessment.
# The signal receiver is responsible for catching and logging
# all exceptions that may occur when updating the workflow.
from openassessment.assessment.signals import assessment_complete_signal
assessment_complete_signal.send(sender=None, submission_uuid=workflow.submission_uuid)
def get_training_task_params(training_workflow_uuid):
"""
......
......@@ -31,6 +31,10 @@ def submitter_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has made the required number of assessments.
If the requirements dict is None (because we're being updated
asynchronously or when the workflow is first created),
then automatically return False.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating
......@@ -40,6 +44,9 @@ def submitter_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
return False
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
......@@ -58,6 +65,10 @@ def assessment_is_finished(submission_uuid, requirements):
Check whether the submitter has received enough assessments
to get a score.
If the requirements dict is None (because we're being updated
asynchronously or when the workflow is first created),
then automatically return False.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by"
......@@ -68,6 +79,8 @@ def assessment_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
return False
return bool(get_score(submission_uuid, requirements))
......@@ -126,6 +139,9 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible".
"""
if requirements is None:
return None
# User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements):
return None
......
......@@ -69,9 +69,7 @@ def get_score(submission_uuid, requirements):
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
requirements (dict): Not used.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
......
......@@ -41,6 +41,9 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
StudentTrainingRequestError
"""
if requirements is None:
return False
try:
num_required = int(requirements['num_required'])
except KeyError:
......
"""
Signals for the workflow API.
See https://docs.djangoproject.com/en/1.4/topics/signals
"""
import django.dispatch
# Indicate that an assessment has completed
# You can fire this signal from asynchronous processes (such as AI grading)
# to notify receivers that an assessment is available.
assessment_complete_signal = django.dispatch.Signal(providing_args=['submission_uuid']) # pylint: disable=C0103
......@@ -1095,11 +1095,11 @@ class TestPeerApi(CacheResetTest):
1
)
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_max_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
def test_max_score_db_error(self):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(Assessment.objects, 'filter') as mock_filter:
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_rubric_max_scores(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get')
......
......@@ -23,7 +23,7 @@ from .errors import (
logger = logging.getLogger(__name__)
def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
def create_workflow(submission_uuid, steps, on_init_params=None):
"""Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against.
......@@ -31,14 +31,13 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
Args:
submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating.
rubric (dict): The rubric that will be used for grading in this workflow.
The rubric is only used when Example Based Assessment is configured.
algorithm_id (str): The version of the AI that will be used for evaluating
submissions, if Example Based Assessments are configured for this
location.
steps (list): List of steps that are part of the workflow, in the order
that the user must complete them. Example: `["peer", "self"]`
Kwargs:
on_init_params (dict): The parameters to pass to each assessment module
on init. Keys are the assessment step names.
Returns:
dict: Assessment workflow information with the following
`uuid` = UUID of this `AssessmentWorkflow`
......@@ -68,8 +67,12 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
.format(submission_uuid, specific_err_msg)
)
if on_init_params is None:
on_init_params = dict()
try:
submission_dict = sub_api.get_submission_and_student(submission_uuid)
workflow = AssessmentWorkflow.start_workflow(submission_uuid, steps, on_init_params)
return AssessmentWorkflowSerializer(workflow).data
except sub_api.SubmissionNotFoundError:
err_msg = sub_err_msg("submission not found")
logger.error(err_msg)
......@@ -84,79 +87,18 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
u"retrieving submission {} failed with unknown error: {}"
.format(submission_uuid, err)
)
# Raise an error if they specify a step we don't recognize...
invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
if invalid_steps:
raise AssessmentWorkflowRequestError(
u"The following steps were not recognized: {}; Must be one of {}".format(
invalid_steps, AssessmentWorkflow.STEPS
)
)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
status = AssessmentWorkflow.STATUS.waiting
step = steps[0]
# AI will not set the Workflow Status, since it will immediately replace the
# status with the next step, or "waiting".
if step == "ai":
step = steps[1] if len(steps) > 1 else None
if not rubric or not algorithm_id:
err_msg = u"Rubric and Algorithm ID must be configured for Example Based Assessment."
raise AssessmentWorkflowInternalError(err_msg)
try:
ai_api.submit(submission_uuid, rubric, algorithm_id)
except AIError as err:
err_msg = u"Could not submit submission for Example Based Grading: {}".format(err)
except DatabaseError:
err_msg = u"Could not create assessment workflow for submission UUID: {}".format(submission_uuid)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
if step == "peer":
status = AssessmentWorkflow.STATUS.peer
try:
peer_api.on_start(submission_uuid)
except PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
elif step == "self":
status = AssessmentWorkflow.STATUS.self
elif step == "training":
status = AssessmentWorkflow.STATUS.training
try:
training_api.on_start(submission_uuid)
except StudentTrainingInternalError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
try:
workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid,
status=status,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'],
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(steps)
]
workflow.steps.add(*workflow_steps)
except (
DatabaseError,
sub_api.SubmissionError
) as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
except:
err_msg = (
u"An unexpected error occurred while creating "
u"the workflow for submission UUID {}"
).format(submission_uuid)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
return AssessmentWorkflowSerializer(workflow).data
def get_workflow_for_submission(submission_uuid, assessment_requirements):
"""Returns Assessment Workflow information
......
......@@ -12,12 +12,14 @@ need to then generate a matching migration for it using:
import logging
import importlib
from django.conf import settings
from django.db import models
from django.db import models, transaction, DatabaseError
from django.dispatch import receiver
from django_extensions.db.fields import UUIDField
from django.utils.timezone import now
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
from submissions import api as sub_api
from openassessment.assessment.signals import assessment_complete_signal
from .errors import AssessmentApiLoadError
......@@ -33,6 +35,7 @@ DEFAULT_ASSESSMENT_API_DICT = {
'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training',
'ai': 'openassessment.assessment.api.ai',
}
ASSESSMENT_API_DICT = getattr(
settings, 'ORA2_ASSESSMENTS',
......@@ -46,7 +49,7 @@ ASSESSMENT_API_DICT = getattr(
# We then use that score as the student's overall score.
# This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`)
# in descending priority order.
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self']
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self', 'ai']
ASSESSMENT_SCORE_PRIORITY = getattr(
settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY',
DEFAULT_ASSESSMENT_SCORE_PRIORITY
......@@ -93,6 +96,80 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status)
@classmethod
@transaction.commit_on_success
def start_workflow(cls, submission_uuid, step_names, on_init_params):
"""
Start a new workflow.
Args:
submission_uuid (str): The UUID of the submission associated with this workflow.
step_names (list): The names of the assessment steps in the workflow.
on_init_params (dict): The parameters to pass to each assessment module
on init. Keys are the assessment step names.
Returns:
AssessmentWorkflow
Raises:
SubmissionNotFoundError
SubmissionRequestError
SubmissionInternalError
DatabaseError
Assessment-module specific errors
"""
submission_dict = sub_api.get_submission_and_student(submission_uuid)
# Create the workflow and step models in the database
# For now, set the status to waiting; we'll modify it later
# based on the first step in the workflow.
workflow = cls.objects.create(
submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.waiting,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id']
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(step_names)
]
workflow.steps.add(*workflow_steps)
# Initialize the assessment APIs
has_started_first_step = False
for step in workflow_steps:
api = step.api()
if api is not None:
# Initialize the assessment module
# We do this for every assessment module
on_init_func = getattr(api, 'on_init', lambda submission_uuid, **params: None)
on_init_func(submission_uuid, **on_init_params.get(step.name, {}))
# For the first valid step, update the workflow status
# and notify the assessment module that it's being started
if not has_started_first_step:
# Update the workflow
workflow.status = step.name
workflow.save()
# Notify the assessment module that it's being started
on_start_func = getattr(api, 'on_start', lambda submission_uuid: None)
on_start_func(submission_uuid)
# Remember that we've already started the first step
has_started_first_step = True
# Update the workflow (in case some of the assessment modules are automatically complete)
# We do NOT pass in requirements, on the assumption that any assessment module
# that accepts requirements would NOT automatically complete.
workflow.update_from_assessments(None)
# Return the newly created workflow
return workflow
@property
def score(self):
"""Latest score for the submission we're tracking.
......@@ -146,6 +223,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
this submission, then we record the score in the submissions API and move our
`status` to `done`.
By convention, if `assessment_requirements` is `None`, then assessment
modules that need requirements should automatically say that they're incomplete.
This allows us to update the workflow even when we don't know the
current state of the problem. For example, if we're updating the workflow
at the completion of an asynchronous call, we won't necessarily know the
current state of the problem, but we would still want to update assessments
that don't have any requirements.
Args:
assessment_requirements (dict): Dictionary passed to the assessment API.
This defines the requirements for each assessment step; the APIs
......@@ -202,6 +287,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# Check if the assessment API defines a score function at all
get_score_func = getattr(assessment_step.api(), 'get_score', None)
if get_score_func is not None:
if assessment_requirements is None:
requirements = None
else:
requirements = assessment_requirements.get(assessment_step_name, {})
score = get_score_func(self.submission_uuid, requirements)
break
......@@ -304,7 +392,7 @@ class AssessmentWorkflowStep(models.Model):
msg = (
u"No assessment configured for '{name}'. "
u"Check the ORA2_ASSESSMENTS Django setting."
).format(self.name)
).format(name=self.name)
logger.warning(msg)
return None
......@@ -318,6 +406,9 @@ class AssessmentWorkflowStep(models.Model):
"""
# Once a step is completed, it will not be revisited based on updated requirements.
step_changed = False
if assessment_requirements is None:
step_reqs = None
else:
step_reqs = assessment_requirements.get(self.name, {})
default_finished = lambda submission_uuid, step_reqs: True
......@@ -336,3 +427,45 @@ class AssessmentWorkflowStep(models.Model):
if step_changed:
self.save()
@receiver(assessment_complete_signal)
def update_workflow_async(sender, **kwargs):
"""
Register a receiver for the update workflow signal
This allows asynchronous processes to update the workflow
Args:
sender (object): Not used
Kwargs:
submission_uuid (str): The UUID of the submission associated
with the workflow being updated.
Returns:
None
"""
submission_uuid = kwargs.get('submission_uuid')
if submission_uuid is None:
logger.error("Update workflow signal called without a submission UUID")
return
try:
workflow = AssessmentWorkflow.objects.get(submission_uuid=submission_uuid)
workflow.update_from_assessments(None)
except AssessmentWorkflow.DoesNotExist:
msg = u"Could not retrieve workflow for submission with UUID {}".format(submission_uuid)
logger.exception(msg)
except DatabaseError:
msg = (
u"Database error occurred while updating "
u"the workflow for submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
except:
msg = (
u"Unexpected error occurred while updating the workflow "
u"for submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
......@@ -16,7 +16,7 @@ import submissions.api as sub_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.workflow.errors import AssessmentApiLoadError
from openassessment.workflow.errors import AssessmentWorkflowInternalError
RUBRIC_DICT = {
......@@ -34,6 +34,13 @@ RUBRIC_DICT = {
ALGORITHM_ID = "Ease"
ON_INIT_PARAMS = {
'ai': {
'rubric': RUBRIC_DICT,
'algorithm_id': ALGORITHM_ID,
}
}
ITEM_1 = {
"student_id": "Optimus Prime 001",
"item_id": "Matrix of Leadership",
......@@ -51,7 +58,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
if "ai" in data["steps"]:
first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], RUBRIC_DICT, ALGORITHM_ID)
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
workflow_keys = set(workflow.keys())
self.assertEqual(
......@@ -134,7 +141,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], RUBRIC_DICT, ALGORITHM_ID)
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], ON_INIT_PARAMS)
StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"])
requirements = {
"training": {
......@@ -177,20 +184,20 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"])
workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"], ON_INIT_PARAMS)
# submission_uuid is the wrong type
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow(123, data["steps"])
workflow = workflow_api.create_workflow(123, data["steps"], ON_INIT_PARAMS)
@patch.object(ai_api, 'assessment_is_finished')
@patch.object(ai_api, 'get_score')
def test_ai_score_set(self, mock_score, mock_is_finished):
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], ["ai"], RUBRIC_DICT, ALGORITHM_ID)
mock_is_finished.return_value = True
score = {"points_earned": 7, "points_possible": 10}
mock_score.return_value = score
workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
workflow = workflow_api.get_workflow_for_submission(submission["uuid"], {})
self.assertEquals(workflow["score"]["points_earned"], score["points_earned"])
self.assertEquals(workflow["score"]["points_possible"], score["points_possible"])
......@@ -200,21 +207,27 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow_api.create_workflow(submission["uuid"], ["ai"], rubric, algorithm_id)
on_init_params = {
'ai': {
'rubric': rubric,
'algorithm_id': algorithm_id,
}
}
workflow_api.create_workflow(submission["uuid"], ["ai"], on_init_params)
@patch.object(ai_api, 'submit')
@patch.object(ai_api, 'on_init')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_ai_submit_failures(self, mock_submit):
mock_submit.side_effect = AIError("Kaboom!")
def test_ai_on_init_failures(self, mock_on_init):
mock_on_init.side_effect = AIError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], ["ai"], RUBRIC_DICT, ALGORITHM_ID)
workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
@patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_submissions_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!")
workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"])
workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"], ON_INIT_PARAMS)
@patch.object(AssessmentWorkflow.objects, 'create')
@ddt.file_data('data/assessments.json')
......@@ -222,14 +235,14 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], ["peer", "self"])
workflow_api.create_workflow(submission["uuid"], ["peer", "self"], ON_INIT_PARAMS)
@patch.object(AssessmentWorkflow.objects, 'get')
@ddt.file_data('data/assessments.json')
......@@ -252,7 +265,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
workflow_api.get_workflow_for_submission(workflow["uuid"], {})
def test_get_status_counts(self):
......@@ -332,9 +345,10 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment",
}, "test answer")
workflow_api.create_workflow(submission['uuid'], ['self'])
with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.create_workflow(submission['uuid'], ['self'], ON_INIT_PARAMS)
with self.assertRaises(AssessmentApiLoadError):
with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.update_from_assessments(submission['uuid'], {})
def _create_workflow_with_status(
......@@ -368,7 +382,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment",
}, answer)
workflow = workflow_api.create_workflow(submission['uuid'], steps)
workflow = workflow_api.create_workflow(submission['uuid'], steps, ON_INIT_PARAMS)
workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
workflow_model.status = status
workflow_model.save()
......
"""
Tests for Django signals and receivers defined by the workflow API.
"""
import mock
from django.db import DatabaseError
import ddt
from submissions import api as sub_api
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.signals import assessment_complete_signal
@ddt.ddt
class UpdateWorkflowSignalTest(CacheResetTest):
"""
Test for the update workflow signal.
"""
STUDENT_ITEM = {
"student_id": "test student",
"item_id": "test item",
"course_id": "test course",
"item_type": "openassessment",
}
def setUp(self):
"""
Create a submission.
"""
submission = sub_api.create_submission(self.STUDENT_ITEM, "test answer")
self.submission_uuid = submission['uuid']
def test_update_signal_no_workflow(self):
# Without defining a workflow, send the signal
# The receiver should catch and log the exception
assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
def test_update_signal_no_submission_uuid(self):
# Try to send the signal without specifying a submission UUID
# The receiver should catch and log the exception
assessment_complete_signal.send(sender=None)
def test_update_signal_updates_workflow(self):
# Start a workflow for the submission
workflow_api.create_workflow(self.submission_uuid, ['self'])
# Spy on the workflow update call
with mock.patch.object(AssessmentWorkflow, 'update_from_assessments') as mock_update:
# Send a signal to update the workflow
assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
# Verify that the workflow model update was called
mock_update.assert_called_once_with(None)
@ddt.data(DatabaseError, IOError)
@mock.patch.object(AssessmentWorkflow.objects, 'get')
def test_errors(self, error, mock_call):
# Start a workflow for the submission
workflow_api.create_workflow(self.submission_uuid, ['self'])
# The receiver should catch and log the error
mock_call.side_effect = error("OH NO!")
assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
<openassessment>
<title>Open Assessment Test</title>
<prompt>Example-based assessment</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Ideas</name>
<prompt>How good are the ideas?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job!</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>Fair job</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>Good job</explanation>
</option>
</criterion>
<criterion>
<name>Content</name>
<prompt>How good is the content?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job!</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>Fair job</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>Good job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="Ideas" option="Poor" />
<select criterion="Content" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="Ideas" option="Fair" />
<select criterion="Content" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="Ideas" option="Fair" />
<select criterion="Content" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="Ideas" option="Poor" />
<select criterion="Content" option="Good" />
</example>
</assessment>
</assessments>
</openassessment>
"""
Integration test for example-based assessment (AI).
"""
import json
import mock
from django.test.utils import override_settings
from submissions import api as sub_api
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario
class AIAssessmentIntegrationTest(XBlockHandlerTestCase):
"""
Integration test for example-based assessment (AI).
"""
SUBMISSION = json.dumps({'submission': 'This is a submission!'})
AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/example_based_only.xml', user_id='Bob')
def test_asynch_generate_score(self, xblock, mock_is_admin):
# Test that AI grading, which creates assessments asynchronously,
# updates the workflow so students can receive a score.
mock_is_admin.return_value = True
# Train classifiers for the problem
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Submit a response
self.request(xblock, 'submit', self.SUBMISSION, response_format='json')
# BEFORE viewing the grade page, check that we get a score
score = sub_api.get_score(xblock.get_student_item_dict())
self.assertIsNot(score, None)
self.assertEqual(score['submission_uuid'], xblock.submission_uuid)
......@@ -51,12 +51,14 @@ class WorkflowMixin(object):
"""
steps = self._create_step_list()
rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria)
ai_module = self.get_assessment_module('example-based-assessment')
algorithm_id = ai_module["algorithm_id"] if ai_module else None
workflow_api.create_workflow(
submission_uuid, steps, rubric=rubric_dict, algorithm_id=algorithm_id
)
on_init_params = {
'ai': {
'rubric': create_rubric_dict(self.prompt, self.rubric_criteria),
'algorithm_id': ai_module["algorithm_id"] if ai_module else None
}
}
workflow_api.create_workflow(submission_uuid, steps, on_init_params=on_init_params)
def workflow_requirements(self):
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment