Commit 6567fadd by Will Daly

Remove workflow step validity check (to support rollbacks)

Refactor workflow API to avoid hard-coding first step logic.
parent b108083b
......@@ -31,6 +31,10 @@ def submitter_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has made the required number of assessments.
If the requirements dict is None (because we're being updated
asynchronously or when the workflow is first created),
then automatically return False.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating
......@@ -40,6 +44,9 @@ def submitter_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
return False
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
......@@ -58,6 +65,10 @@ def assessment_is_finished(submission_uuid, requirements):
Check whether the submitter has received enough assessments
to get a score.
If the requirements dict is None (because we're being updated
asynchronously or when the workflow is first created),
then automatically return False.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by"
......@@ -68,6 +79,8 @@ def assessment_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
return False
return bool(get_score(submission_uuid, requirements))
......@@ -126,6 +139,7 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements):
return None
......
......@@ -41,6 +41,9 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
StudentTrainingRequestError
"""
if requirements is None:
return False
try:
num_required = int(requirements['num_required'])
except KeyError:
......
......@@ -1095,12 +1095,12 @@ class TestPeerApi(CacheResetTest):
1
)
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_max_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
def test_max_score_db_error(self):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_rubric_max_scores(tim["uuid"])
with patch.object(Assessment.objects, 'filter') as mock_filter:
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_rubric_max_scores(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get')
@raises(peer_api.PeerAssessmentInternalError)
......
......@@ -63,7 +63,8 @@ def create_workflow(submission_uuid, steps):
)
try:
submission_dict = sub_api.get_submission_and_student(submission_uuid)
workflow = AssessmentWorkflow.start_workflow(submission_uuid, steps)
return AssessmentWorkflowSerializer(workflow).data
except sub_api.SubmissionNotFoundError:
err_msg = sub_err_msg("submission not found")
logger.error(err_msg)
......@@ -78,61 +79,17 @@ def create_workflow(submission_uuid, steps):
u"retrieving submission {} failed with unknown error: {}"
.format(submission_uuid, err)
)
# Raise an error if they specify a step we don't recognize...
invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
if invalid_steps:
raise AssessmentWorkflowRequestError(
u"The following steps were not recognized: {}; Must be one of {}".format(
invalid_steps, AssessmentWorkflow.STEPS
)
)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
status = AssessmentWorkflow.STATUS.peer
if steps[0] == "peer":
try:
peer_api.on_start(submission_uuid)
except PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
elif steps[0] == "self":
status = AssessmentWorkflow.STATUS.self
elif steps[0] == "training":
status = AssessmentWorkflow.STATUS.training
try:
training_api.on_start(submission_uuid)
except StudentTrainingInternalError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
try:
workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid,
status=status,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'],
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(steps)
]
workflow.steps.add(*workflow_steps)
except (
DatabaseError,
sub_api.SubmissionError
) as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
except DatabaseError:
err_msg = u"Could not create assessment workflow for submission UUID: {}".format(submission_uuid)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
except:
err_msg = (
u"An unexpected error occurred while creating "
u"the workflow for submission UUID {}"
).format(submission_uuid)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
return AssessmentWorkflowSerializer(workflow).data
def get_workflow_for_submission(submission_uuid, assessment_requirements):
......
......@@ -12,7 +12,7 @@ need to then generate a matching migration for it using:
import logging
import importlib
from django.conf import settings
from django.db import models
from django.db import models, transaction
from django_extensions.db.fields import UUIDField
from django.utils.timezone import now
from model_utils import Choices
......@@ -93,6 +93,78 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status)
@classmethod
@transaction.commit_on_success
def start_workflow(cls, submission_uuid, step_names):
"""
Start a new workflow.
Args:
submission_uuid (str): The UUID of the submission associated with this workflow.
step_names (list): The names of the assessment steps in the workflow.
Returns:
AssessmentWorkflow
Raises:
SubmissionNotFoundError
SubmissionRequestError
SubmissionInternalError
DatabaseError
Assessment-module specific errors
"""
submission_dict = sub_api.get_submission_and_student(submission_uuid)
# Create the workflow and step models in the database
# For now, set the status to waiting; we'll modify it later
# based on the first step in the workflow.
workflow = cls.objects.create(
submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.waiting,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id']
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(step_names)
]
workflow.steps.add(*workflow_steps)
# Initialize the assessment APIs
has_started_first_step = False
for step in workflow_steps:
api = step.api()
if api is not None:
# Initialize the assessment module
# We do this for every assessment module
on_init_func = getattr(api, 'on_init', lambda submission_uuid: None)
on_init_func(submission_uuid)
# For the first valid step, update the workflow status
# and notify the assessment module that it's being started
if not has_started_first_step:
# Update the workflow
workflow.status = step.name
workflow.save()
# Notify the assessment module that it's being started
on_start_func = getattr(api, 'on_start', lambda submission_uuid: None)
on_start_func(submission_uuid)
# Remember that we've already started the first step
has_started_first_step = True
# Update the workflow (in case some of the assessment modules are automatically complete)
# We do NOT pass in requirements, on the assumption that any assessment module
# that accepts requirements would NOT automatically complete.
workflow.update_from_assessments(None)
# Return the newly created workflow
return workflow
@property
def score(self):
"""Latest score for the submission we're tracking.
......@@ -132,6 +204,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
this submission, then we record the score in the submissions API and move our
`status` to `done`.
By convention, if `assessment_requirements` is `None`, then assessment
modules that need requirements should automatically say that they're incomplete.
This allows us to update the workflow even when we don't know the
current state of the problem. For example, if we're updating the workflow
at the completion of an asynchronous call, we won't necessarily know the
current state of the problem, but we would still want to update assessments
that don't have any requirements.
Args:
assessment_requirements (dict): Dictionary passed to the assessment API.
This defines the requirements for each assessment step; the APIs
......@@ -290,7 +370,7 @@ class AssessmentWorkflowStep(models.Model):
msg = (
u"No assessment configured for '{name}'. "
u"Check the ORA2_ASSESSMENTS Django setting."
).format(self.name)
).format(name=self.name)
logger.warning(msg)
return None
......@@ -304,7 +384,10 @@ class AssessmentWorkflowStep(models.Model):
"""
# Once a step is completed, it will not be revisited based on updated requirements.
step_changed = False
step_reqs = assessment_requirements.get(self.name, {})
if assessment_requirements is None:
step_reqs = None
else:
step_reqs = assessment_requirements.get(self.name, {})
default_finished = lambda submission_uuid, step_reqs: True
submitter_finished = getattr(self.api(), 'submitter_is_finished', default_finished)
......
......@@ -14,7 +14,7 @@ import submissions.api as sub_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.workflow.errors import AssessmentApiLoadError
from openassessment.workflow.errors import AssessmentWorkflowInternalError
ITEM_1 = {
......@@ -264,9 +264,10 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment",
}, "test answer")
workflow_api.create_workflow(submission['uuid'], ['self'])
with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.create_workflow(submission['uuid'], ['self'])
with self.assertRaises(AssessmentApiLoadError):
with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.update_from_assessments(submission['uuid'], {})
def _create_workflow_with_status(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment