Commit b40204e2 by Will Daly

AI assessment API uses on_init

AI assessment updates workflow when assessment is created
parent 76e9dc56
...@@ -75,7 +75,7 @@ To limit Python tests to a particular Django app: ...@@ -75,7 +75,7 @@ To limit Python tests to a particular Django app:
.. code:: bash .. code:: bash
./scripts/test-python.sh openassessment.xblock ./scripts/test-python.sh openassessment/xblock
To run just the JavaScript tests: To run just the JavaScript tests:
......
...@@ -81,7 +81,7 @@ def get_score(submission_uuid, requirements): ...@@ -81,7 +81,7 @@ def get_score(submission_uuid, requirements):
} }
def submit(submission_uuid, rubric, algorithm_id): def on_init(submission_uuid, rubric=None, algorithm_id=None):
""" """
Submit a response for AI assessment. Submit a response for AI assessment.
This will: This will:
...@@ -90,6 +90,8 @@ def submit(submission_uuid, rubric, algorithm_id): ...@@ -90,6 +90,8 @@ def submit(submission_uuid, rubric, algorithm_id):
Args: Args:
submission_uuid (str): The UUID of the submission to assess. submission_uuid (str): The UUID of the submission to assess.
Kwargs:
rubric (dict): Serialized rubric model. rubric (dict): Serialized rubric model.
algorithm_id (unicode): Use only classifiers trained with the specified algorithm. algorithm_id (unicode): Use only classifiers trained with the specified algorithm.
...@@ -108,6 +110,12 @@ def submit(submission_uuid, rubric, algorithm_id): ...@@ -108,6 +110,12 @@ def submit(submission_uuid, rubric, algorithm_id):
'10df7db776686822e501b05f452dc1e4b9141fe5' '10df7db776686822e501b05f452dc1e4b9141fe5'
""" """
if rubric is None:
raise AIGradingRequestError(u'No rubric provided')
if algorithm_id is None:
raise AIGradingRequestError(u'No algorithm ID provided')
try: try:
workflow = AIGradingWorkflow.start_workflow(submission_uuid, rubric, algorithm_id) workflow = AIGradingWorkflow.start_workflow(submission_uuid, rubric, algorithm_id)
except (sub_api.SubmissionNotFoundError, sub_api.SubmissionRequestError) as ex: except (sub_api.SubmissionNotFoundError, sub_api.SubmissionRequestError) as ex:
......
...@@ -135,6 +135,14 @@ def create_assessment(grading_workflow_uuid, criterion_scores): ...@@ -135,6 +135,14 @@ def create_assessment(grading_workflow_uuid, criterion_scores):
logger.exception(msg) logger.exception(msg)
raise AIGradingInternalError(msg) raise AIGradingInternalError(msg)
# Fire a signal to update the workflow API
# This will allow students to receive a score if they're
# waiting on an AI assessment.
# The signal receiver is responsible for catching and logging
# all exceptions that may occur when updating the workflow.
from openassessment.assessment.signals import assessment_complete_signal
assessment_complete_signal.send(sender=None, submission_uuid=workflow.submission_uuid)
def get_training_task_params(training_workflow_uuid): def get_training_task_params(training_workflow_uuid):
""" """
......
...@@ -31,6 +31,10 @@ def submitter_is_finished(submission_uuid, requirements): ...@@ -31,6 +31,10 @@ def submitter_is_finished(submission_uuid, requirements):
""" """
Check whether the submitter has made the required number of assessments. Check whether the submitter has made the required number of assessments.
If the requirements dict is None (because we're being updated
asynchronously or when the workflow is first created),
then automatically return False.
Args: Args:
submission_uuid (str): The UUID of the submission being tracked. submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating requirements (dict): Dictionary with the key "must_grade" indicating
...@@ -40,6 +44,9 @@ def submitter_is_finished(submission_uuid, requirements): ...@@ -40,6 +44,9 @@ def submitter_is_finished(submission_uuid, requirements):
bool bool
""" """
if requirements is None:
return False
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None: if workflow.completed_at is not None:
...@@ -58,6 +65,10 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -58,6 +65,10 @@ def assessment_is_finished(submission_uuid, requirements):
Check whether the submitter has received enough assessments Check whether the submitter has received enough assessments
to get a score. to get a score.
If the requirements dict is None (because we're being updated
asynchronously or when the workflow is first created),
then automatically return False.
Args: Args:
submission_uuid (str): The UUID of the submission being tracked. submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by" requirements (dict): Dictionary with the key "must_be_graded_by"
...@@ -68,6 +79,8 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -68,6 +79,8 @@ def assessment_is_finished(submission_uuid, requirements):
bool bool
""" """
if requirements is None:
return False
return bool(get_score(submission_uuid, requirements)) return bool(get_score(submission_uuid, requirements))
...@@ -126,6 +139,9 @@ def get_score(submission_uuid, requirements): ...@@ -126,6 +139,9 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible". dict with keys "points_earned" and "points_possible".
""" """
if requirements is None:
return None
# User hasn't completed their own submission yet # User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements): if not submitter_is_finished(submission_uuid, requirements):
return None return None
......
...@@ -69,9 +69,7 @@ def get_score(submission_uuid, requirements): ...@@ -69,9 +69,7 @@ def get_score(submission_uuid, requirements):
Args: Args:
submission_uuid (str): The unique identifier for the submission submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required requirements (dict): Not used.
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns: Returns:
A dict of points earned and points possible for the given submission. A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet. Returns None if no score can be determined yet.
......
...@@ -41,6 +41,9 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06 ...@@ -41,6 +41,9 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
StudentTrainingRequestError StudentTrainingRequestError
""" """
if requirements is None:
return False
try: try:
num_required = int(requirements['num_required']) num_required = int(requirements['num_required'])
except KeyError: except KeyError:
......
"""
Signals for the workflow API.
See https://docs.djangoproject.com/en/1.4/topics/signals
"""
import django.dispatch
# Indicate that an assessment has completed
# You can fire this signal from asynchronous processes (such as AI grading)
# to notify receivers that an assessment is available.
assessment_complete_signal = django.dispatch.Signal(providing_args=['submission_uuid']) # pylint: disable=C0103
...@@ -190,7 +190,7 @@ class AIGradingTest(CacheResetTest): ...@@ -190,7 +190,7 @@ class AIGradingTest(CacheResetTest):
# Schedule a grading task # Schedule a grading task
# Because Celery is configured in "always eager" mode, this will # Because Celery is configured in "always eager" mode, this will
# be executed synchronously. # be executed synchronously.
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
# Verify that we got the scores we provided to the stub AI algorithm # Verify that we got the scores we provided to the stub AI algorithm
assessment = ai_api.get_latest_assessment(self.submission_uuid) assessment = ai_api.get_latest_assessment(self.submission_uuid)
...@@ -205,7 +205,7 @@ class AIGradingTest(CacheResetTest): ...@@ -205,7 +205,7 @@ class AIGradingTest(CacheResetTest):
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_get_assessment_scores_by_criteria(self): def test_get_assessment_scores_by_criteria(self):
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
# Verify that we got the scores we provided to the stub AI algorithm # Verify that we got the scores we provided to the stub AI algorithm
assessment = ai_api.get_latest_assessment(self.submission_uuid) assessment = ai_api.get_latest_assessment(self.submission_uuid)
...@@ -225,20 +225,20 @@ class AIGradingTest(CacheResetTest): ...@@ -225,20 +225,20 @@ class AIGradingTest(CacheResetTest):
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_submit_submission_not_found(self): def test_submit_submission_not_found(self):
with self.assertRaises(AIGradingRequestError): with self.assertRaises(AIGradingRequestError):
ai_api.submit("no_such_submission", RUBRIC, ALGORITHM_ID) ai_api.on_init("no_such_submission", rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_submit_invalid_rubric(self): def test_submit_invalid_rubric(self):
invalid_rubric = {'not_valid': True} invalid_rubric = {'not_valid': True}
with self.assertRaises(AIGradingRequestError): with self.assertRaises(AIGradingRequestError):
ai_api.submit(self.submission_uuid, invalid_rubric, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=invalid_rubric, algorithm_id=ALGORITHM_ID)
@mock.patch.object(AIGradingWorkflow.objects, 'create') @mock.patch.object(AIGradingWorkflow.objects, 'create')
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_submit_database_error(self, mock_call): def test_submit_database_error_create(self, mock_call):
mock_call.side_effect = DatabaseError("KABOOM!") mock_call.side_effect = DatabaseError("KABOOM!")
with self.assertRaises(AIGradingInternalError): with self.assertRaises(AIGradingInternalError):
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
@mock.patch.object(Assessment.objects, 'filter') @mock.patch.object(Assessment.objects, 'filter')
def test_get_latest_assessment_database_error(self, mock_call): def test_get_latest_assessment_database_error(self, mock_call):
...@@ -251,21 +251,21 @@ class AIGradingTest(CacheResetTest): ...@@ -251,21 +251,21 @@ class AIGradingTest(CacheResetTest):
with mock.patch('openassessment.assessment.api.ai.grading_tasks.grade_essay.apply_async') as mock_grade: with mock.patch('openassessment.assessment.api.ai.grading_tasks.grade_essay.apply_async') as mock_grade:
mock_grade.side_effect = NotConfigured mock_grade.side_effect = NotConfigured
with self.assertRaises(AIGradingInternalError): with self.assertRaises(AIGradingInternalError):
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
@mock.patch.object(AIClassifierSet.objects, 'filter') @mock.patch.object(AIClassifierSet.objects, 'filter')
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_submit_database_error(self, mock_filter): def test_submit_database_error_filter(self, mock_filter):
mock_filter.side_effect = DatabaseError("rumble... ruMBLE, RUMBLE! BOOM!") mock_filter.side_effect = DatabaseError("rumble... ruMBLE, RUMBLE! BOOM!")
with self.assertRaises(AIGradingInternalError): with self.assertRaises(AIGradingInternalError):
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
@mock.patch.object(AIClassifierSet.objects, 'filter') @mock.patch.object(AIClassifierSet.objects, 'filter')
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_submit_no_classifiers(self, mock_call): def test_submit_no_classifiers(self, mock_call):
mock_call.return_value = [] mock_call.return_value = []
with mock.patch('openassessment.assessment.api.ai.logger.info') as mock_log: with mock.patch('openassessment.assessment.api.ai.logger.info') as mock_log:
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
argument = mock_log.call_args[0][0] argument = mock_log.call_args[0][0]
self.assertTrue(u"no classifiers are available" in argument) self.assertTrue(u"no classifiers are available" in argument)
...@@ -274,19 +274,26 @@ class AIGradingTest(CacheResetTest): ...@@ -274,19 +274,26 @@ class AIGradingTest(CacheResetTest):
with mock.patch('openassessment.assessment.api.ai.AIGradingWorkflow.start_workflow') as mock_start: with mock.patch('openassessment.assessment.api.ai.AIGradingWorkflow.start_workflow') as mock_start:
mock_start.side_effect = sub_api.SubmissionInternalError mock_start.side_effect = sub_api.SubmissionInternalError
with self.assertRaises(AIGradingInternalError): with self.assertRaises(AIGradingInternalError):
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
class AIUntrainedGradingTest: class AIUntrainedGradingTest(CacheResetTest):
""" """
Tests that do not run the setup to train classifiers. Tests that do not run the setup to train classifiers.
""" """
def setUp(self):
"""
Create a submission.
"""
# Create a submission
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid']
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS) @override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_no_score(self): def test_no_score(self):
# Test that no score has been created, and get_score returns None. # Test that no score has been created, and get_score returns None.
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
score = ai_api.get_score(self.submission_uuid, {}) score = ai_api.get_score(self.submission_uuid, {})
self.assertIsNone(score) self.assertIsNone(score)
...@@ -303,10 +310,10 @@ class AIReschedulingTest(CacheResetTest): ...@@ -303,10 +310,10 @@ class AIReschedulingTest(CacheResetTest):
Sets up each test so that it will have unfinished tasks of both types Sets up each test so that it will have unfinished tasks of both types
""" """
# 1) Schedule Grading, have the scheduling succeeed but the grading fail because no classifiers exist # 1) Schedule Grading, have the scheduling succeeed but the grading fail because no classifiers exist
for i in range(0, 10): for _ in range(0, 10):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid'] self.submission_uuid = submission['uuid']
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
# 2) Schedule Training, have it INTENTIONALLY fail. Now we are a point where both parts need to be rescheduled # 2) Schedule Training, have it INTENTIONALLY fail. Now we are a point where both parts need to be rescheduled
patched_method = 'openassessment.assessment.api.ai.training_tasks.train_classifiers.apply_async' patched_method = 'openassessment.assessment.api.ai.training_tasks.train_classifiers.apply_async'
...@@ -409,7 +416,7 @@ class AIReschedulingTest(CacheResetTest): ...@@ -409,7 +416,7 @@ class AIReschedulingTest(CacheResetTest):
for i in range(0, 125): for i in range(0, 125):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid'] self.submission_uuid = submission['uuid']
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID) ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
# Both training and grading should not be complete. # Both training and grading should not be complete.
self._assert_complete(grading_done=False, training_done=False) self._assert_complete(grading_done=False, training_done=False)
...@@ -460,6 +467,38 @@ class AIReschedulingTest(CacheResetTest): ...@@ -460,6 +467,38 @@ class AIReschedulingTest(CacheResetTest):
class AIAutomaticGradingTest(CacheResetTest): class AIAutomaticGradingTest(CacheResetTest):
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_automatic_grade(self):
# Create some submissions which will not succeed. No classifiers yet exist.
for _ in range(0, 10):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
ai_api.on_init(submission['uuid'], rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
# Check that there are unresolved grading workflows
self._assert_complete(training_done=True, grading_done=False)
# Create and train a classifier set. This should set off automatic grading.
ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID, ALGORITHM_ID)
# Check to make sure that all work is done.
self._assert_complete(training_done=True, grading_done=True)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_automatic_grade_error(self):
# Create some submissions which will not succeed. No classifiers yet exist.
for _ in range(0, 10):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
ai_api.on_init(submission['uuid'], rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
# Check that there are unresolved grading workflows
self._assert_complete(training_done=True, grading_done=False)
patched_method = 'openassessment.assessment.worker.training.reschedule_grading_tasks.apply_async'
with mock.patch(patched_method) as mocked_reschedule_grading:
mocked_reschedule_grading.side_effect = AIGradingInternalError("Kablewey.")
with self.assertRaises(AIGradingInternalError):
ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID, ALGORITHM_ID)
def _assert_complete(self, training_done=None, grading_done=None): def _assert_complete(self, training_done=None, grading_done=None):
""" """
Asserts that the Training and Grading are of a given completion status Asserts that the Training and Grading are of a given completion status
...@@ -469,7 +508,7 @@ class AIAutomaticGradingTest(CacheResetTest): ...@@ -469,7 +508,7 @@ class AIAutomaticGradingTest(CacheResetTest):
training_done (bool): whether the user expects there to be unfinished training workflows training_done (bool): whether the user expects there to be unfinished training workflows
grading_done (bool): whether the user expects there to be unfinished grading workflows grading_done (bool): whether the user expects there to be unfinished grading workflows
""" """
incomplete_training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id=COURSE_ID,item_id=ITEM_ID) incomplete_training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID)
incomplete_grading_workflows = AIGradingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID) incomplete_grading_workflows = AIGradingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID)
if training_done is not None: if training_done is not None:
self.assertEqual(self._is_empty_generator(incomplete_training_workflows), training_done) self.assertEqual(self._is_empty_generator(incomplete_training_workflows), training_done)
...@@ -492,38 +531,3 @@ class AIAutomaticGradingTest(CacheResetTest): ...@@ -492,38 +531,3 @@ class AIAutomaticGradingTest(CacheResetTest):
return False return False
except StopIteration: except StopIteration:
return True return True
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_automatic_grade(self):
# Create some submissions which will not succeed. No classifiers yet exist.
for i in range(0, 10):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid']
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID)
# Check that there are unresolved grading workflows
self._assert_complete(training_done=True, grading_done=False)
# Create and train a classifier set. This should set off automatic grading.
ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID, ALGORITHM_ID)
# Check to make sure that all work is done.
self._assert_complete(training_done=True, grading_done=True)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_automatic_grade_error(self):
# Create some submissions which will not succeed. No classifiers yet exist.
for i in range(0, 10):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
self.submission_uuid = submission['uuid']
ai_api.submit(self.submission_uuid, RUBRIC, ALGORITHM_ID)
# Check that there are unresolved grading workflows
self._assert_complete(training_done=True, grading_done=False)
patched_method = 'openassessment.assessment.worker.training.reschedule_grading_tasks.apply_async'
with mock.patch(patched_method) as mocked_reschedule_grading:
mocked_reschedule_grading.side_effect = AIGradingInternalError("Kablewey.")
with self.assertRaises(AIGradingInternalError):
ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID, ALGORITHM_ID)
...@@ -1095,12 +1095,12 @@ class TestPeerApi(CacheResetTest): ...@@ -1095,12 +1095,12 @@ class TestPeerApi(CacheResetTest):
1 1
) )
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_max_score_db_error(self, mock_filter): def test_max_score_db_error(self):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_rubric_max_scores(tim["uuid"]) with patch.object(Assessment.objects, 'filter') as mock_filter:
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_rubric_max_scores(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get') @patch.object(PeerWorkflow.objects, 'get')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
......
...@@ -23,7 +23,7 @@ from .errors import ( ...@@ -23,7 +23,7 @@ from .errors import (
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None): def create_workflow(submission_uuid, steps, on_init_params=None):
"""Begins a new assessment workflow. """Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against. Create a new workflow that other assessments will record themselves against.
...@@ -31,14 +31,13 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None): ...@@ -31,14 +31,13 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
Args: Args:
submission_uuid (str): The UUID for the submission that all our submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating. assessments will be evaluating.
rubric (dict): The rubric that will be used for grading in this workflow.
The rubric is only used when Example Based Assessment is configured.
algorithm_id (str): The version of the AI that will be used for evaluating
submissions, if Example Based Assessments are configured for this
location.
steps (list): List of steps that are part of the workflow, in the order steps (list): List of steps that are part of the workflow, in the order
that the user must complete them. Example: `["peer", "self"]` that the user must complete them. Example: `["peer", "self"]`
Kwargs:
on_init_params (dict): The parameters to pass to each assessment module
on init. Keys are the assessment step names.
Returns: Returns:
dict: Assessment workflow information with the following dict: Assessment workflow information with the following
`uuid` = UUID of this `AssessmentWorkflow` `uuid` = UUID of this `AssessmentWorkflow`
...@@ -68,8 +67,12 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None): ...@@ -68,8 +67,12 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
.format(submission_uuid, specific_err_msg) .format(submission_uuid, specific_err_msg)
) )
if on_init_params is None:
on_init_params = dict()
try: try:
submission_dict = sub_api.get_submission_and_student(submission_uuid) workflow = AssessmentWorkflow.start_workflow(submission_uuid, steps, on_init_params)
return AssessmentWorkflowSerializer(workflow).data
except sub_api.SubmissionNotFoundError: except sub_api.SubmissionNotFoundError:
err_msg = sub_err_msg("submission not found") err_msg = sub_err_msg("submission not found")
logger.error(err_msg) logger.error(err_msg)
...@@ -84,78 +87,17 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None): ...@@ -84,78 +87,17 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
u"retrieving submission {} failed with unknown error: {}" u"retrieving submission {} failed with unknown error: {}"
.format(submission_uuid, err) .format(submission_uuid, err)
) )
except DatabaseError:
# Raise an error if they specify a step we don't recognize... err_msg = u"Could not create assessment workflow for submission UUID: {}".format(submission_uuid)
invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS) logger.exception(err_msg)
if invalid_steps: raise AssessmentWorkflowInternalError(err_msg)
raise AssessmentWorkflowRequestError( except:
u"The following steps were not recognized: {}; Must be one of {}".format( err_msg = (
invalid_steps, AssessmentWorkflow.STEPS u"An unexpected error occurred while creating "
) u"the workflow for submission UUID {}"
) ).format(submission_uuid)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
status = AssessmentWorkflow.STATUS.waiting
step = steps[0]
# AI will not set the Workflow Status, since it will immediately replace the
# status with the next step, or "waiting".
if step == "ai":
step = steps[1] if len(steps) > 1 else None
if not rubric or not algorithm_id:
err_msg = u"Rubric and Algorithm ID must be configured for Example Based Assessment."
raise AssessmentWorkflowInternalError(err_msg)
try:
ai_api.submit(submission_uuid, rubric, algorithm_id)
except AIError as err:
err_msg = u"Could not submit submission for Example Based Grading: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
if step == "peer":
status = AssessmentWorkflow.STATUS.peer
try:
peer_api.on_start(submission_uuid)
except PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
elif step == "self":
status = AssessmentWorkflow.STATUS.self
elif step == "training":
status = AssessmentWorkflow.STATUS.training
try:
training_api.on_start(submission_uuid)
except StudentTrainingInternalError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
try:
workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid,
status=status,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'],
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(steps)
]
workflow.steps.add(*workflow_steps)
except (
DatabaseError,
sub_api.SubmissionError
) as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg) logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg) raise AssessmentWorkflowInternalError(err_msg)
return AssessmentWorkflowSerializer(workflow).data
def get_workflow_for_submission(submission_uuid, assessment_requirements): def get_workflow_for_submission(submission_uuid, assessment_requirements):
......
...@@ -12,12 +12,14 @@ need to then generate a matching migration for it using: ...@@ -12,12 +12,14 @@ need to then generate a matching migration for it using:
import logging import logging
import importlib import importlib
from django.conf import settings from django.conf import settings
from django.db import models from django.db import models, transaction, DatabaseError
from django.dispatch import receiver
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from django.utils.timezone import now from django.utils.timezone import now
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.signals import assessment_complete_signal
from .errors import AssessmentApiLoadError from .errors import AssessmentApiLoadError
...@@ -33,6 +35,7 @@ DEFAULT_ASSESSMENT_API_DICT = { ...@@ -33,6 +35,7 @@ DEFAULT_ASSESSMENT_API_DICT = {
'peer': 'openassessment.assessment.api.peer', 'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self', 'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training', 'training': 'openassessment.assessment.api.student_training',
'ai': 'openassessment.assessment.api.ai',
} }
ASSESSMENT_API_DICT = getattr( ASSESSMENT_API_DICT = getattr(
settings, 'ORA2_ASSESSMENTS', settings, 'ORA2_ASSESSMENTS',
...@@ -46,7 +49,7 @@ ASSESSMENT_API_DICT = getattr( ...@@ -46,7 +49,7 @@ ASSESSMENT_API_DICT = getattr(
# We then use that score as the student's overall score. # We then use that score as the student's overall score.
# This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`) # This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`)
# in descending priority order. # in descending priority order.
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self'] DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self', 'ai']
ASSESSMENT_SCORE_PRIORITY = getattr( ASSESSMENT_SCORE_PRIORITY = getattr(
settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY', settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY',
DEFAULT_ASSESSMENT_SCORE_PRIORITY DEFAULT_ASSESSMENT_SCORE_PRIORITY
...@@ -93,6 +96,80 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -93,6 +96,80 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
ordering = ["-created"] ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status) # TODO: In migration, need a non-unique index on (course_id, item_id, status)
@classmethod
@transaction.commit_on_success
def start_workflow(cls, submission_uuid, step_names, on_init_params):
"""
Start a new workflow.
Args:
submission_uuid (str): The UUID of the submission associated with this workflow.
step_names (list): The names of the assessment steps in the workflow.
on_init_params (dict): The parameters to pass to each assessment module
on init. Keys are the assessment step names.
Returns:
AssessmentWorkflow
Raises:
SubmissionNotFoundError
SubmissionRequestError
SubmissionInternalError
DatabaseError
Assessment-module specific errors
"""
submission_dict = sub_api.get_submission_and_student(submission_uuid)
# Create the workflow and step models in the database
# For now, set the status to waiting; we'll modify it later
# based on the first step in the workflow.
workflow = cls.objects.create(
submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.waiting,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id']
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(step_names)
]
workflow.steps.add(*workflow_steps)
# Initialize the assessment APIs
has_started_first_step = False
for step in workflow_steps:
api = step.api()
if api is not None:
# Initialize the assessment module
# We do this for every assessment module
on_init_func = getattr(api, 'on_init', lambda submission_uuid, **params: None)
on_init_func(submission_uuid, **on_init_params.get(step.name, {}))
# For the first valid step, update the workflow status
# and notify the assessment module that it's being started
if not has_started_first_step:
# Update the workflow
workflow.status = step.name
workflow.save()
# Notify the assessment module that it's being started
on_start_func = getattr(api, 'on_start', lambda submission_uuid: None)
on_start_func(submission_uuid)
# Remember that we've already started the first step
has_started_first_step = True
# Update the workflow (in case some of the assessment modules are automatically complete)
# We do NOT pass in requirements, on the assumption that any assessment module
# that accepts requirements would NOT automatically complete.
workflow.update_from_assessments(None)
# Return the newly created workflow
return workflow
@property @property
def score(self): def score(self):
"""Latest score for the submission we're tracking. """Latest score for the submission we're tracking.
...@@ -146,6 +223,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -146,6 +223,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
this submission, then we record the score in the submissions API and move our this submission, then we record the score in the submissions API and move our
`status` to `done`. `status` to `done`.
By convention, if `assessment_requirements` is `None`, then assessment
modules that need requirements should automatically say that they're incomplete.
This allows us to update the workflow even when we don't know the
current state of the problem. For example, if we're updating the workflow
at the completion of an asynchronous call, we won't necessarily know the
current state of the problem, but we would still want to update assessments
that don't have any requirements.
Args: Args:
assessment_requirements (dict): Dictionary passed to the assessment API. assessment_requirements (dict): Dictionary passed to the assessment API.
This defines the requirements for each assessment step; the APIs This defines the requirements for each assessment step; the APIs
...@@ -202,7 +287,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -202,7 +287,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# Check if the assessment API defines a score function at all # Check if the assessment API defines a score function at all
get_score_func = getattr(assessment_step.api(), 'get_score', None) get_score_func = getattr(assessment_step.api(), 'get_score', None)
if get_score_func is not None: if get_score_func is not None:
requirements = assessment_requirements.get(assessment_step_name, {}) if assessment_requirements is None:
requirements = None
else:
requirements = assessment_requirements.get(assessment_step_name, {})
score = get_score_func(self.submission_uuid, requirements) score = get_score_func(self.submission_uuid, requirements)
break break
...@@ -304,7 +392,7 @@ class AssessmentWorkflowStep(models.Model): ...@@ -304,7 +392,7 @@ class AssessmentWorkflowStep(models.Model):
msg = ( msg = (
u"No assessment configured for '{name}'. " u"No assessment configured for '{name}'. "
u"Check the ORA2_ASSESSMENTS Django setting." u"Check the ORA2_ASSESSMENTS Django setting."
).format(self.name) ).format(name=self.name)
logger.warning(msg) logger.warning(msg)
return None return None
...@@ -318,7 +406,10 @@ class AssessmentWorkflowStep(models.Model): ...@@ -318,7 +406,10 @@ class AssessmentWorkflowStep(models.Model):
""" """
# Once a step is completed, it will not be revisited based on updated requirements. # Once a step is completed, it will not be revisited based on updated requirements.
step_changed = False step_changed = False
step_reqs = assessment_requirements.get(self.name, {}) if assessment_requirements is None:
step_reqs = None
else:
step_reqs = assessment_requirements.get(self.name, {})
default_finished = lambda submission_uuid, step_reqs: True default_finished = lambda submission_uuid, step_reqs: True
submitter_finished = getattr(self.api(), 'submitter_is_finished', default_finished) submitter_finished = getattr(self.api(), 'submitter_is_finished', default_finished)
...@@ -336,3 +427,45 @@ class AssessmentWorkflowStep(models.Model): ...@@ -336,3 +427,45 @@ class AssessmentWorkflowStep(models.Model):
if step_changed: if step_changed:
self.save() self.save()
@receiver(assessment_complete_signal)
def update_workflow_async(sender, **kwargs):
"""
Register a receiver for the update workflow signal
This allows asynchronous processes to update the workflow
Args:
sender (object): Not used
Kwargs:
submission_uuid (str): The UUID of the submission associated
with the workflow being updated.
Returns:
None
"""
submission_uuid = kwargs.get('submission_uuid')
if submission_uuid is None:
logger.error("Update workflow signal called without a submission UUID")
return
try:
workflow = AssessmentWorkflow.objects.get(submission_uuid=submission_uuid)
workflow.update_from_assessments(None)
except AssessmentWorkflow.DoesNotExist:
msg = u"Could not retrieve workflow for submission with UUID {}".format(submission_uuid)
logger.exception(msg)
except DatabaseError:
msg = (
u"Database error occurred while updating "
u"the workflow for submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
except:
msg = (
u"Unexpected error occurred while updating the workflow "
u"for submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
...@@ -16,7 +16,7 @@ import submissions.api as sub_api ...@@ -16,7 +16,7 @@ import submissions.api as sub_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
from openassessment.workflow.errors import AssessmentApiLoadError from openassessment.workflow.errors import AssessmentWorkflowInternalError
RUBRIC_DICT = { RUBRIC_DICT = {
...@@ -34,6 +34,13 @@ RUBRIC_DICT = { ...@@ -34,6 +34,13 @@ RUBRIC_DICT = {
ALGORITHM_ID = "Ease" ALGORITHM_ID = "Ease"
ON_INIT_PARAMS = {
'ai': {
'rubric': RUBRIC_DICT,
'algorithm_id': ALGORITHM_ID,
}
}
ITEM_1 = { ITEM_1 = {
"student_id": "Optimus Prime 001", "student_id": "Optimus Prime 001",
"item_id": "Matrix of Leadership", "item_id": "Matrix of Leadership",
...@@ -51,7 +58,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -51,7 +58,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
if "ai" in data["steps"]: if "ai" in data["steps"]:
first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting" first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], RUBRIC_DICT, ALGORITHM_ID) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
workflow_keys = set(workflow.keys()) workflow_keys = set(workflow.keys())
self.assertEqual( self.assertEqual(
...@@ -134,7 +141,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -134,7 +141,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_update_peer_workflow(self): def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], RUBRIC_DICT, ALGORITHM_ID) workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], ON_INIT_PARAMS)
StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"]) StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"])
requirements = { requirements = {
"training": { "training": {
...@@ -177,20 +184,20 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -177,20 +184,20 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_need_valid_submission_uuid(self, data): def test_need_valid_submission_uuid(self, data):
# submission doesn't exist # submission doesn't exist
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"]) workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"], ON_INIT_PARAMS)
# submission_uuid is the wrong type # submission_uuid is the wrong type
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow(123, data["steps"]) workflow = workflow_api.create_workflow(123, data["steps"], ON_INIT_PARAMS)
@patch.object(ai_api, 'assessment_is_finished') @patch.object(ai_api, 'assessment_is_finished')
@patch.object(ai_api, 'get_score') @patch.object(ai_api, 'get_score')
def test_ai_score_set(self, mock_score, mock_is_finished): def test_ai_score_set(self, mock_score, mock_is_finished):
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], ["ai"], RUBRIC_DICT, ALGORITHM_ID)
mock_is_finished.return_value = True mock_is_finished.return_value = True
score = {"points_earned": 7, "points_possible": 10} score = {"points_earned": 7, "points_possible": 10}
mock_score.return_value = score mock_score.return_value = score
workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
workflow = workflow_api.get_workflow_for_submission(submission["uuid"], {}) workflow = workflow_api.get_workflow_for_submission(submission["uuid"], {})
self.assertEquals(workflow["score"]["points_earned"], score["points_earned"]) self.assertEquals(workflow["score"]["points_earned"], score["points_earned"])
self.assertEquals(workflow["score"]["points_possible"], score["points_possible"]) self.assertEquals(workflow["score"]["points_possible"], score["points_possible"])
...@@ -200,21 +207,27 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -200,21 +207,27 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id): def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow_api.create_workflow(submission["uuid"], ["ai"], rubric, algorithm_id) on_init_params = {
'ai': {
'rubric': rubric,
'algorithm_id': algorithm_id,
}
}
workflow_api.create_workflow(submission["uuid"], ["ai"], on_init_params)
@patch.object(ai_api, 'submit') @patch.object(ai_api, 'on_init')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_ai_submit_failures(self, mock_submit): def test_ai_on_init_failures(self, mock_on_init):
mock_submit.side_effect = AIError("Kaboom!") mock_on_init.side_effect = AIError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], ["ai"], RUBRIC_DICT, ALGORITHM_ID) workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json') @ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_submissions_errors_wrapped(self, data, mock_get): def test_unexpected_submissions_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!") mock_get.side_effect = Exception("Kaboom!")
workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"]) workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"], ON_INIT_PARAMS)
@patch.object(AssessmentWorkflow.objects, 'create') @patch.object(AssessmentWorkflow.objects, 'create')
@ddt.file_data('data/assessments.json') @ddt.file_data('data/assessments.json')
...@@ -222,14 +235,14 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -222,14 +235,14 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_unexpected_workflow_errors_wrapped(self, data, mock_create): def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], data["steps"]) workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_peer_workflow_errors_wrapped(self, mock_create): def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], ["peer", "self"]) workflow_api.create_workflow(submission["uuid"], ["peer", "self"], ON_INIT_PARAMS)
@patch.object(AssessmentWorkflow.objects, 'get') @patch.object(AssessmentWorkflow.objects, 'get')
@ddt.file_data('data/assessments.json') @ddt.file_data('data/assessments.json')
...@@ -252,7 +265,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -252,7 +265,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get): def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!") mock_get.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "We talk TV!") submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"]) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
workflow_api.get_workflow_for_submission(workflow["uuid"], {}) workflow_api.get_workflow_for_submission(workflow["uuid"], {})
def test_get_status_counts(self): def test_get_status_counts(self):
...@@ -332,9 +345,10 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -332,9 +345,10 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment", "item_type": "openassessment",
}, "test answer") }, "test answer")
workflow_api.create_workflow(submission['uuid'], ['self']) with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.create_workflow(submission['uuid'], ['self'], ON_INIT_PARAMS)
with self.assertRaises(AssessmentApiLoadError): with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.update_from_assessments(submission['uuid'], {}) workflow_api.update_from_assessments(submission['uuid'], {})
def _create_workflow_with_status( def _create_workflow_with_status(
...@@ -368,7 +382,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -368,7 +382,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment", "item_type": "openassessment",
}, answer) }, answer)
workflow = workflow_api.create_workflow(submission['uuid'], steps) workflow = workflow_api.create_workflow(submission['uuid'], steps, ON_INIT_PARAMS)
workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid']) workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
workflow_model.status = status workflow_model.status = status
workflow_model.save() workflow_model.save()
......
"""
Tests for Django signals and receivers defined by the workflow API.
"""
import mock
from django.db import DatabaseError
import ddt
from submissions import api as sub_api
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.signals import assessment_complete_signal
@ddt.ddt
class UpdateWorkflowSignalTest(CacheResetTest):
"""
Test for the update workflow signal.
"""
STUDENT_ITEM = {
"student_id": "test student",
"item_id": "test item",
"course_id": "test course",
"item_type": "openassessment",
}
def setUp(self):
"""
Create a submission.
"""
submission = sub_api.create_submission(self.STUDENT_ITEM, "test answer")
self.submission_uuid = submission['uuid']
def test_update_signal_no_workflow(self):
# Without defining a workflow, send the signal
# The receiver should catch and log the exception
assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
def test_update_signal_no_submission_uuid(self):
# Try to send the signal without specifying a submission UUID
# The receiver should catch and log the exception
assessment_complete_signal.send(sender=None)
def test_update_signal_updates_workflow(self):
# Start a workflow for the submission
workflow_api.create_workflow(self.submission_uuid, ['self'])
# Spy on the workflow update call
with mock.patch.object(AssessmentWorkflow, 'update_from_assessments') as mock_update:
# Send a signal to update the workflow
assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
# Verify that the workflow model update was called
mock_update.assert_called_once_with(None)
@ddt.data(DatabaseError, IOError)
@mock.patch.object(AssessmentWorkflow.objects, 'get')
def test_errors(self, error, mock_call):
# Start a workflow for the submission
workflow_api.create_workflow(self.submission_uuid, ['self'])
# The receiver should catch and log the error
mock_call.side_effect = error("OH NO!")
assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
<openassessment>
<title>Open Assessment Test</title>
<prompt>Example-based assessment</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Ideas</name>
<prompt>How good are the ideas?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job!</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>Fair job</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>Good job</explanation>
</option>
</criterion>
<criterion>
<name>Content</name>
<prompt>How good is the content?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job!</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>Fair job</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>Good job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="Ideas" option="Poor" />
<select criterion="Content" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="Ideas" option="Fair" />
<select criterion="Content" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="Ideas" option="Fair" />
<select criterion="Content" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="Ideas" option="Poor" />
<select criterion="Content" option="Good" />
</example>
</assessment>
</assessments>
</openassessment>
"""
Integration test for example-based assessment (AI).
"""
import json
import mock
from django.test.utils import override_settings
from submissions import api as sub_api
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario
class AIAssessmentIntegrationTest(XBlockHandlerTestCase):
"""
Integration test for example-based assessment (AI).
"""
SUBMISSION = json.dumps({'submission': 'This is a submission!'})
AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/example_based_only.xml', user_id='Bob')
def test_asynch_generate_score(self, xblock, mock_is_admin):
# Test that AI grading, which creates assessments asynchronously,
# updates the workflow so students can receive a score.
mock_is_admin.return_value = True
# Train classifiers for the problem
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Submit a response
self.request(xblock, 'submit', self.SUBMISSION, response_format='json')
# BEFORE viewing the grade page, check that we get a score
score = sub_api.get_score(xblock.get_student_item_dict())
self.assertIsNot(score, None)
self.assertEqual(score['submission_uuid'], xblock.submission_uuid)
...@@ -51,12 +51,14 @@ class WorkflowMixin(object): ...@@ -51,12 +51,14 @@ class WorkflowMixin(object):
""" """
steps = self._create_step_list() steps = self._create_step_list()
rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria)
ai_module = self.get_assessment_module('example-based-assessment') ai_module = self.get_assessment_module('example-based-assessment')
algorithm_id = ai_module["algorithm_id"] if ai_module else None on_init_params = {
workflow_api.create_workflow( 'ai': {
submission_uuid, steps, rubric=rubric_dict, algorithm_id=algorithm_id 'rubric': create_rubric_dict(self.prompt, self.rubric_criteria),
) 'algorithm_id': ai_module["algorithm_id"] if ai_module else None
}
}
workflow_api.create_workflow(submission_uuid, steps, on_init_params=on_init_params)
def workflow_requirements(self): def workflow_requirements(self):
""" """
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment