Commit b0476980 by Will Daly

Merge pull request #415 from edx/will/workflow-api-refactor

Will/workflow api refactor
parents 041e5e43 768187ee
...@@ -49,6 +49,8 @@ Isolation of Assessment types ...@@ -49,6 +49,8 @@ Isolation of Assessment types
a non `None` value has been returned by this function for a given a non `None` value has been returned by this function for a given
`submission_uuid`, repeated calls to this function should return the same `submission_uuid`, repeated calls to this function should return the same
thing. thing.
`on_start(submission_uuid)`
Notification to the API that the student has started the assessment step.
In the long run, it could be that `OpenAssessmentBlock` becomes a wrapper In the long run, it could be that `OpenAssessmentBlock` becomes a wrapper
that talks to child XBlocks via this kind of API, and that each child would that talks to child XBlocks via this kind of API, and that each child would
...@@ -96,3 +98,15 @@ Handling Problem Definition Change ...@@ -96,3 +98,15 @@ Handling Problem Definition Change
2. If the sequence of steps changes, we look at the new steps and advance to 2. If the sequence of steps changes, we look at the new steps and advance to
the first step that the user has not completed (`is_submitter_done()` the first step that the user has not completed (`is_submitter_done()`
returns `False`). returns `False`).
Django settings
Assessments in the workflow are configurable using Django settings.
This encapsulates the workflow API from the assessment modules.
The two settings are:
* `ORA2_ASSESSMENTS`: a `dict` mapping assessment names to the Python module path
of the corresponding assessment API.
* `ORA2_ASSESSMENT_SCORE_PRIORITY`: a `list` of assessment names that determine
which assessment type is used to generate a student's score.
...@@ -28,6 +28,18 @@ PEER_TYPE = "PE" ...@@ -28,6 +28,18 @@ PEER_TYPE = "PE"
def submitter_is_finished(submission_uuid, requirements): def submitter_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has made the required number of assessments.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating
the required number of submissions the student must grade.
Returns:
bool
"""
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None: if workflow.completed_at is not None:
...@@ -41,17 +53,78 @@ def submitter_is_finished(submission_uuid, requirements): ...@@ -41,17 +53,78 @@ def submitter_is_finished(submission_uuid, requirements):
return False return False
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has received enough assessments
to get a score.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by"
indicating the required number of assessments the student
must receive to get a score.
Returns:
bool
"""
return bool(get_score(submission_uuid, requirements))
def on_start(submission_uuid):
"""Create a new peer workflow for a student item and submission.
Creates a unique peer workflow for a student item, associated with a
submission.
Args:
submission_uuid (str): The submission associated with this workflow.
Returns:
None
Raises:
SubmissionError: There was an error retrieving the submission.
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
"""
try:
submission = sub_api.get_submission_and_student(submission_uuid)
workflow, __ = PeerWorkflow.objects.get_or_create(
student_id=submission['student_item']['student_id'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
submission_uuid=submission_uuid
)
workflow.save()
except IntegrityError:
# If we get an integrity error, it means someone else has already
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_score(submission_uuid, requirements): def get_score(submission_uuid, requirements):
""" """
Retrieve a score for a submission if requirements have been satisfied. Retrieve a score for a submission if requirements have been satisfied.
Args: Args:
submission_uuid (str): The UUID of the submission. submission_uuid (str): The UUID of the submission.
requirements (dict): Description of requirements for receiving a score, requirements (dict): Dictionary with the key "must_be_graded_by"
specific to the particular kind of submission (e.g. self or peer). indicating the required number of assessments the student
must receive to get a score.
Returns: Returns:
dict with keys "points_earned" and "points_possible". dict with keys "points_earned" and "points_possible".
""" """
# User hasn't completed their own submission yet # User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements): if not submitter_is_finished(submission_uuid, requirements):
...@@ -93,10 +166,6 @@ def get_score(submission_uuid, requirements): ...@@ -93,10 +166,6 @@ def get_score(submission_uuid, requirements):
} }
def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements))
def create_assessment( def create_assessment(
scorer_submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
......
...@@ -24,6 +24,74 @@ SELF_TYPE = "SE" ...@@ -24,6 +24,74 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.api.self") logger = logging.getLogger("openassessment.assessment.api.self")
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None): def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
""" """
Create a self-assessment for a submission. Create a self-assessment for a submission.
...@@ -155,73 +223,6 @@ def get_assessment(submission_uuid): ...@@ -155,73 +223,6 @@ def get_assessment(submission_uuid):
return serialized_assessment return serialized_assessment
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def get_assessment_scores_by_criteria(submission_uuid): def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion """Get the median score for each rubric criterion
......
...@@ -56,19 +56,33 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06 ...@@ -56,19 +56,33 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
return workflow.num_completed >= num_required return workflow.num_completed >= num_required
def assessment_is_finished(submission_uuid, requirements): # pylint:disable=W0613 def on_start(submission_uuid):
""" """
Since the student is not being assessed by others, Creates a new student training workflow.
this always returns true.
"""
return True
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
def get_score(submission_uuid, requirements): # pylint:disable=W0613 Args:
""" submission_uuid (str): The submission UUID for the student that is
Training is either complete or incomplete; there is no score. initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
""" """
return None try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the student "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def validate_training_examples(rubric, examples): def validate_training_examples(rubric, examples):
...@@ -354,34 +368,6 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -354,34 +368,6 @@ def get_training_example(submission_uuid, rubric, examples):
raise StudentTrainingInternalError(msg) raise StudentTrainingInternalError(msg)
def create_student_training_workflow(submission_uuid):
"""
Creates a new student training workflow.
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
Args:
submission_uuid (str): The submission UUID for the student that is
initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
"""
try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the student "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def assess_training_example(submission_uuid, options_selected, update_workflow=True): def assess_training_example(submission_uuid, options_selected, update_workflow=True):
""" """
Assess a training example and update the workflow. Assess a training example and update the workflow.
......
...@@ -370,7 +370,7 @@ class TestPeerApi(CacheResetTest): ...@@ -370,7 +370,7 @@ class TestPeerApi(CacheResetTest):
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer: with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!") mock_peer.side_effect = IntegrityError("Oh no!")
# This should not raise an exception # This should not raise an exception
peer_api.create_peer_workflow(tim_sub["uuid"]) peer_api.on_start(tim_sub["uuid"])
@raises(peer_api.PeerAssessmentWorkflowError) @raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submission_found_closing_assessment(self): def test_no_submission_found_closing_assessment(self):
...@@ -1121,7 +1121,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1121,7 +1121,7 @@ class TestPeerApi(CacheResetTest):
def test_error_on_assessment_creation(self, mock_filter): def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_peer_workflow(submission["uuid"]) peer_api.on_start(submission["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], STUDENT_ITEM["student_id"], submission["uuid"], STUDENT_ITEM["student_id"],
ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT['options_selected'],
...@@ -1188,6 +1188,6 @@ class TestPeerApi(CacheResetTest): ...@@ -1188,6 +1188,6 @@ class TestPeerApi(CacheResetTest):
new_student_item = STUDENT_ITEM.copy() new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date) submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], STEPS) workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item return submission, new_student_item
...@@ -26,7 +26,7 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -26,7 +26,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
Create a submission. Create a submission.
""" """
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
training_api.create_student_training_workflow(submission['uuid']) training_api.on_start(submission['uuid'])
self.submission_uuid = submission['uuid'] self.submission_uuid = submission['uuid']
def test_training_workflow(self): def test_training_workflow(self):
...@@ -122,7 +122,7 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -122,7 +122,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_submitter_is_finished_num_queries(self): def test_submitter_is_finished_num_queries(self):
# Complete the first training example # Complete the first training example
training_api.create_student_training_workflow(self.submission_uuid) training_api.on_start(self.submission_uuid)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
...@@ -160,9 +160,6 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -160,9 +160,6 @@ class StudentTrainingAssessmentTest(CacheResetTest):
requirements = {'num_required': 1} requirements = {'num_required': 1}
self.assertFalse(training_api.submitter_is_finished(self.submission_uuid, requirements)) self.assertFalse(training_api.submitter_is_finished(self.submission_uuid, requirements))
# But since we're not being assessed by others, the "assessment" should be finished.
self.assertTrue(training_api.assessment_is_finished(self.submission_uuid, requirements))
def test_get_training_example_none_available(self): def test_get_training_example_none_available(self):
for example in EXAMPLES: for example in EXAMPLES:
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
...@@ -263,13 +260,6 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -263,13 +260,6 @@ class StudentTrainingAssessmentTest(CacheResetTest):
is_finished = training_api.submitter_is_finished(submission_uuid, requirements) is_finished = training_api.submitter_is_finished(submission_uuid, requirements)
self.assertEqual(is_finished, bool(num_completed >= num_required)) self.assertEqual(is_finished, bool(num_completed >= num_required))
# Assessment is finished should always be true,
# since we're not being assessed by others.
self.assertTrue(training_api.assessment_is_finished(submission_uuid, requirements))
# At no point should we receive a score!
self.assertIs(training_api.get_score(submission_uuid, requirements), None)
def _expected_example(self, input_example, rubric): def _expected_example(self, input_example, rubric):
""" """
Return the training example we would expect to retrieve for an example. Return the training example we would expect to retrieve for an example.
...@@ -323,7 +313,7 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -323,7 +313,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
""" """
pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
training_api.create_student_training_workflow(pre_submission['uuid']) training_api.on_start(pre_submission['uuid'])
for example in examples: for example in examples:
training_api.get_training_example(pre_submission['uuid'], rubric, examples) training_api.get_training_example(pre_submission['uuid'], rubric, examples)
training_api.assess_training_example(pre_submission['uuid'], example['options_selected']) training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
Public interface for the Assessment Workflow. Public interface for the Assessment Workflow.
""" """
import copy
import logging import logging
from django.db import DatabaseError from django.db import DatabaseError
...@@ -15,53 +14,14 @@ from openassessment.assessment.errors import ( ...@@ -15,53 +14,14 @@ from openassessment.assessment.errors import (
from submissions import api as sub_api from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep from .models import AssessmentWorkflow, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer from .serializers import AssessmentWorkflowSerializer
from .errors import (
AssessmentWorkflowInternalError, AssessmentWorkflowRequestError,
AssessmentWorkflowNotFoundError
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
def create_workflow(submission_uuid, steps): def create_workflow(submission_uuid, steps):
"""Begins a new assessment workflow. """Begins a new assessment workflow.
...@@ -134,7 +94,7 @@ def create_workflow(submission_uuid, steps): ...@@ -134,7 +94,7 @@ def create_workflow(submission_uuid, steps):
status = AssessmentWorkflow.STATUS.peer status = AssessmentWorkflow.STATUS.peer
if steps[0] == "peer": if steps[0] == "peer":
try: try:
peer_api.create_peer_workflow(submission_uuid) peer_api.on_start(submission_uuid)
except PeerAssessmentError as err: except PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err) err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg) logger.exception(err_msg)
...@@ -144,7 +104,7 @@ def create_workflow(submission_uuid, steps): ...@@ -144,7 +104,7 @@ def create_workflow(submission_uuid, steps):
elif steps[0] == "training": elif steps[0] == "training":
status = AssessmentWorkflow.STATUS.training status = AssessmentWorkflow.STATUS.training
try: try:
training_api.create_student_training_workflow(submission_uuid) training_api.on_start(submission_uuid)
except StudentTrainingInternalError as err: except StudentTrainingInternalError as err:
err_msg = u"Could not create assessment workflow: {}".format(err) err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg) logger.exception(err_msg)
......
"""
Errors defined by the workflow API.
"""
import copy
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class AssessmentApiLoadError(AssessmentWorkflowInternalError):
"""
The assessment API could not be loaded.
"""
def __init__(self, assessment_name, api_path):
msg = u"Could not load assessment API for {} from {}".format(
assessment_name, api_path
)
super(AssessmentApiLoadError, self).__init__(msg)
...@@ -9,29 +9,35 @@ need to then generate a matching migration for it using: ...@@ -9,29 +9,35 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.workflow --auto ./manage.py schemamigration openassessment.workflow --auto
""" """
from datetime import datetime
import logging import logging
import importlib import importlib
from django.conf import settings from django.conf import settings
from django.db import models from django.db import models
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from django.utils.timezone import now from django.utils.timezone import now
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
from submissions import api as sub_api from submissions import api as sub_api
from .errors import AssessmentApiLoadError
logger = logging.getLogger('openassessment.workflow.models') logger = logging.getLogger('openassessment.workflow.models')
# This will (hopefully soon) be replaced with calls to the event-tracking API:
# https://github.com/edx/event-tracking # To encapsulate the workflow API from the assessment API,
if hasattr(settings, "EDX_ORA2") and "EVENT_LOGGER" in settings.EDX_ORA2: # we use dependency injection. The Django settings define
func_path = settings.EDX_ORA2["EVENT_LOGGER"] # a dictionary mapping assessment step names to the Python module path
module_name, func_name = func_path.rsplit('.', 1) # that implements the corresponding assessment API.
emit_event = getattr(importlib.import_module(module_name), func_name) ASSESSMENT_API_DICT = getattr(settings, 'ORA2_ASSESSMENTS', {})
else:
emit_event = lambda event: logger.info("Event: " + unicode(event)) # For now, we use a simple scoring mechanism:
# Once a student has completed all assessments,
# we search assessment APIs
# in priority order until one of the APIs provides a score.
# We then use that score as the student's overall score.
# This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`)
# in descending priority order.
ASSESSMENT_SCORE_PRIORITY = getattr(settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY', [])
class AssessmentWorkflow(TimeStampedModel, StatusModel): class AssessmentWorkflow(TimeStampedModel, StatusModel):
...@@ -47,11 +53,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -47,11 +53,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
an after the fact recording of the last known state of that information so an after the fact recording of the last known state of that information so
we can search easily. we can search easily.
""" """
STEPS = [ STEPS = ASSESSMENT_API_DICT.keys()
"peer", # User needs to assess peer submissions
"self", # User needs to assess themselves
"training", # User needs to practice grading using example essays
]
STATUSES = [ STATUSES = [
"waiting", # User has done all necessary assessment but hasn't been "waiting", # User has done all necessary assessment but hasn't been
...@@ -100,39 +102,31 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -100,39 +102,31 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return status_dict return status_dict
def update_from_assessments(self, assessment_requirements): def update_from_assessments(self, assessment_requirements):
"""Query self and peer APIs and change our status if appropriate. """Query assessment APIs and change our status if appropriate.
If the status is done, we do nothing. Once something is done, we never If the status is done, we do nothing. Once something is done, we never
move back to any other status. move back to any other status.
By default, an `AssessmentWorkflow` starts with status `peer`. If an assessment API says that our submitter's requirements are met,
then move to the next assessment. For example, in peer assessment,
If the peer API says that our submitter's requirements are met -- that if the submitter we're tracking has assessed the required number
the submitter of the submission we're tracking has assessed the required of submissions, they're allowed to continue.
number of other submissions -- then the status will move to `self`.
If the self API says that the person who created the submission we're If the submitter has finished all the assessments, then we change
tracking has assessed themselves, then we move to `waiting`. their status to `waiting`.
If we're in the `waiting` status, and the peer API says it can score If we're in the `waiting` status, and an assessment API says it can score
this submission (meaning other students have created enough assessments this submission, then we record the score in the submissions API and move our
of it), then we record the score in the submissions API and move our
`status` to `done`. `status` to `done`.
Args: Args:
assessment_requirements (dict): Dictionary that currently looks like: assessment_requirements (dict): Dictionary passed to the assessment API.
`{"peer": {"must_grade": <int>, "must_be_graded_by": <int>}}` This defines the requirements for each assessment step; the APIs
`must_grade` is the number of assessments a student must complete. can refer to this to decide whether the requirements have been
`must_be_graded_by` is the number of assessments a submission must met. Note that the requirements could change if the author
receive to be scored. `must_grade` should be greater than updates the problem definition.
`must_be_graded_by` to ensure that everyone will get scored.
The intention is to eventually pass in more assessment sequence
specific requirements in this dict.
""" """
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
# If we're done, we're done -- it doesn't matter if requirements have # If we're done, we're done -- it doesn't matter if requirements have
# changed because we've already written a score. # changed because we've already written a score.
if self.status == self.STATUS.done: if self.status == self.STATUS.done:
...@@ -140,6 +134,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -140,6 +134,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# Update our AssessmentWorkflowStep models with the latest from our APIs # Update our AssessmentWorkflowStep models with the latest from our APIs
steps = self._get_steps() steps = self._get_steps()
step_for_name = {step.name:step for step in steps}
# Go through each step and update its status. # Go through each step and update its status.
for step in steps: for step in steps:
...@@ -151,10 +146,13 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -151,10 +146,13 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self.STATUS.waiting # if nothing's left to complete, we're waiting self.STATUS.waiting # if nothing's left to complete, we're waiting
) )
# If the submitter is beginning peer assessment, add them to the queue # If the submitter is beginning the next assessment, notify the
# by creating a new peer workflow # appropriate assessment API.
if new_status == "peer": new_step = step_for_name.get(new_status)
peer_api.create_peer_workflow(self.submission_uuid) if new_step is not None:
on_start_func = getattr(new_step.api(), 'on_start', None)
if on_start_func is not None:
on_start_func(self.submission_uuid)
# If the submitter has done all they need to do, let's check to see if # If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it). # all steps have been fully assessed (i.e. we can score it).
...@@ -162,20 +160,27 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -162,20 +160,27 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
all(step.assessment_completed_at for step in steps)): all(step.assessment_completed_at for step in steps)):
# At this point, we're trying to give a score. We currently have a # At this point, we're trying to give a score. We currently have a
# very simple rule for this -- if it has a peer step, use that for # very simple rule for this -- we iterate through the
# scoring. If not, use the self step. Later on, we may put more # assessment APIs in priority order and use the first reported score.
# interesting rules here.
step_names = [step.name for step in steps]
score = None score = None
if self.STATUS.peer in step_names: for assessment_step_name in ASSESSMENT_SCORE_PRIORITY:
score = peer_api.get_score(
self.submission_uuid, # Check if the problem contains this assessment type
assessment_requirements[self.STATUS.peer] assessment_step = step_for_name.get(assessment_step_name)
)
elif self.STATUS.self in step_names: # Query the corresponding assessment API for a score
score = self_api.get_score(self.submission_uuid, {}) # If we find one, then stop looking
if assessment_step is not None:
if score: # Check if the assessment API defines a score function at all
get_score_func = getattr(assessment_step.api(), 'get_score', None)
if get_score_func is not None:
requirements = assessment_requirements.get(assessment_step_name, {})
score = get_score_func(self.submission_uuid, requirements)
break
# If we found a score, then we're done
if score is not None:
self.set_score(score) self.set_score(score)
new_status = self.STATUS.done new_status = self.STATUS.done
...@@ -219,22 +224,6 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -219,22 +224,6 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
score["points_possible"] score["points_possible"]
) )
# This should be replaced by using the event tracking API, but
# that's not quite ready yet. So we're making this temp hack.
emit_event({
"context": {
"course_id": self.course_id
},
"event": {
"submission_uuid": self.submission_uuid,
"points_earned": score["points_earned"],
"points_possible": score["points_possible"],
},
"event_source": "server",
"event_type": "openassessment.workflow.score",
"time": datetime.utcnow(),
})
class AssessmentWorkflowStep(models.Model): class AssessmentWorkflowStep(models.Model):
"""An individual step in the overall workflow process. """An individual step in the overall workflow process.
...@@ -266,18 +255,26 @@ class AssessmentWorkflowStep(models.Model): ...@@ -266,18 +255,26 @@ class AssessmentWorkflowStep(models.Model):
""" """
Returns an API associated with this workflow step. If no API is Returns an API associated with this workflow step. If no API is
associated with this workflow step, None is returned. associated with this workflow step, None is returned.
This relies on Django settings to map step names to
the assessment API implementation.
""" """
from openassessment.assessment.api import peer as peer_api api_path = getattr(settings, 'ORA2_ASSESSMENTS', {}).get(self.name)
from openassessment.assessment.api import self as self_api if api_path is not None:
from openassessment.assessment.api import student_training try:
api = None return importlib.import_module(api_path)
if self.name == AssessmentWorkflow.STATUS.self: except (ImportError, ValueError):
api = self_api raise AssessmentApiLoadError(self.name, api_path)
elif self.name == AssessmentWorkflow.STATUS.peer: else:
api = peer_api # It's possible for the database to contain steps for APIs
elif self.name == AssessmentWorkflow.STATUS.training: # that are not configured -- for example, if a new assessment
api = student_training # type is added, then the code is rolled back.
return api msg = (
u"No assessment configured for '{name}'. "
u"Check the ORA2_ASSESSMENTS Django setting."
).format(self.name)
logger.warning(msg)
return None
def update(self, submission_uuid, assessment_requirements): def update(self, submission_uuid, assessment_requirements):
""" """
...@@ -287,33 +284,23 @@ class AssessmentWorkflowStep(models.Model): ...@@ -287,33 +284,23 @@ class AssessmentWorkflowStep(models.Model):
Intended for internal use by update_from_assessments(). See Intended for internal use by update_from_assessments(). See
update_from_assessments() documentation for more details. update_from_assessments() documentation for more details.
""" """
# Once a step is completed, it will not be revisited based on updated # Once a step is completed, it will not be revisited based on updated requirements.
# requirements.
step_changed = False step_changed = False
step_reqs = assessment_requirements.get(self.name, {}) step_reqs = assessment_requirements.get(self.name, {})
default_finished = lambda submission_uuid, step_reqs: True
submitter_finished = getattr(self.api(), 'submitter_is_finished', default_finished)
assessment_finished = getattr(self.api(), 'assessment_is_finished', default_finished)
# Has the user completed their obligations for this step? # Has the user completed their obligations for this step?
if (not self.is_submitter_complete() and if (not self.is_submitter_complete() and submitter_finished(submission_uuid, step_reqs)):
self.api().submitter_is_finished(submission_uuid, step_reqs)):
self.submitter_completed_at = now() self.submitter_completed_at = now()
step_changed = True step_changed = True
# Has the step received a score? # Has the step received a score?
if (not self.is_assessment_complete() and if (not self.is_assessment_complete() and assessment_finished(submission_uuid, step_reqs)):
self.api().assessment_is_finished(submission_uuid, step_reqs)):
self.assessment_completed_at = now() self.assessment_completed_at = now()
step_changed = True step_changed = True
if step_changed: if step_changed:
self.save() self.save()
# Just here to record thoughts for later:
#
# class AssessmentWorkflowEvent(models.Model):
# workflow = models.ForeignKey(AssessmentWorkflow, related_name="events")
# app = models.CharField(max_length=50)
# event_type = models.CharField(max_length=255)
# event_data = models.TextField()
# description = models.TextField()
# created_at = models.DateTimeField(default=now, db_index=True)
"""
This is just a dummy event logger to test our ability to dyanmically change this
value based on configuration. All this should go away when we start using the
edx-analytics approved library (once that's ready to be used on prod).
"""
def fake_event_logger(event):
print event
from django.db import DatabaseError from django.db import DatabaseError
from django.test.utils import override_settings
import ddt import ddt
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
...@@ -6,13 +7,15 @@ from openassessment.assessment.models import PeerWorkflow ...@@ -6,13 +7,15 @@ from openassessment.assessment.models import PeerWorkflow
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission from submissions.models import Submission
import openassessment.workflow.api as workflow_api import openassessment.workflow.api as workflow_api
from openassessment.assessment.models import StudentTrainingWorkflow from openassessment.assessment.models import StudentTrainingWorkflow
import submissions.api as sub_api import submissions.api as sub_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.workflow.errors import AssessmentApiLoadError
ITEM_1 = { ITEM_1 = {
"student_id": "Optimus Prime 001", "student_id": "Optimus Prime 001",
...@@ -21,6 +24,7 @@ ITEM_1 = { ...@@ -21,6 +24,7 @@ ITEM_1 = {
"item_type": "openassessment", "item_type": "openassessment",
} }
@ddt.ddt @ddt.ddt
class TestAssessmentWorkflowApi(CacheResetTest): class TestAssessmentWorkflowApi(CacheResetTest):
...@@ -251,8 +255,24 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -251,8 +255,24 @@ class TestAssessmentWorkflowApi(CacheResetTest):
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"]) updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts) self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id, @override_settings(ORA2_ASSESSMENTS={'self': 'not.a.module'})
status, answer="answer", steps=None): def test_unable_to_load_api(self):
submission = sub_api.create_submission({
"student_id": "test student",
"course_id": "test course",
"item_id": "test item",
"item_type": "openassessment",
}, "test answer")
workflow_api.create_workflow(submission['uuid'], ['self'])
with self.assertRaises(AssessmentApiLoadError):
workflow_api.update_from_assessments(submission['uuid'], {})
def _create_workflow_with_status(
self, student_id, course_id, item_id,
status, answer="answer", steps=None
):
""" """
Create a submission and workflow with a given status. Create a submission and workflow with a given status.
...@@ -270,7 +290,8 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -270,7 +290,8 @@ class TestAssessmentWorkflowApi(CacheResetTest):
Returns: Returns:
workflow, submission workflow, submission
""" """
if not steps: steps = ["peer", "self"] if not steps:
steps = ["peer", "self"]
submission = sub_api.create_submission({ submission = sub_api.create_submission({
"student_id": student_id, "student_id": student_id,
......
from django.test import TestCase
from mock import patch
from nose.tools import raises
from openassessment.workflow.models import emit_event
from openassessment.workflow.test.events import fake_event_logger
class TestEmitEvent(TestCase):
def test_emit_wired_correctly(self):
self.assertEqual(emit_event, fake_event_logger)
...@@ -26,6 +26,7 @@ from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml ...@@ -26,6 +26,7 @@ from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
...@@ -216,7 +217,7 @@ class OpenAssessmentBlock( ...@@ -216,7 +217,7 @@ class OpenAssessmentBlock(
# case we may have a score available. # case we may have a score available.
try: try:
self.update_workflow_status() self.update_workflow_status()
except workflow_api.AssessmentWorkflowError: except AssessmentWorkflowError:
# Log the exception, but continue loading the page # Log the exception, but continue loading the page
logger.exception('An error occurred while updating the workflow on page load.') logger.exception('An error occurred while updating the workflow on page load.')
......
...@@ -9,6 +9,8 @@ from openassessment.assessment.errors import ( ...@@ -9,6 +9,8 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
) )
import openassessment.workflow.api as workflow_api import openassessment.workflow.api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -98,7 +100,7 @@ class PeerAssessmentMixin(object): ...@@ -98,7 +100,7 @@ class PeerAssessmentMixin(object):
if assessment: if assessment:
self.update_workflow_status(submission_uuid=assessment['submission_uuid']) self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
self.update_workflow_status() self.update_workflow_status()
except workflow_api.AssessmentWorkflowError: except AssessmentWorkflowError:
logger.exception( logger.exception(
u"Workflow error occurred when submitting peer assessment " u"Workflow error occurred when submitting peer assessment "
u"for submission {}".format(self.submission_uuid) u"for submission {}".format(self.submission_uuid)
......
...@@ -7,6 +7,7 @@ from webob import Response ...@@ -7,6 +7,7 @@ from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.api import student_training from openassessment.assessment.api import student_training
import openassessment.workflow.api as workflow_api import openassessment.workflow.api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -182,12 +183,10 @@ class StudentTrainingMixin(object): ...@@ -182,12 +183,10 @@ class StudentTrainingMixin(object):
else: else:
try: try:
self.update_workflow_status() self.update_workflow_status()
except workflow_api.AssessmentWorkflowError: except AssessmentWorkflowError:
logger.exception( msg = _('Could not update workflow status.')
u"Workflow error occurred when submitting peer assessment " logger.exception(msg)
u"for submission {uuid}".format(uuid=self.submission_uuid) return {'success': False, 'msg': msg}
)
return {'success': False, 'msg': _('Could not update workflow status.')}
return { return {
'success': True, 'success': True,
'msg': u'', 'msg': u'',
......
...@@ -6,6 +6,7 @@ from xblock.core import XBlock ...@@ -6,6 +6,7 @@ from xblock.core import XBlock
from submissions import api from submissions import api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -73,7 +74,7 @@ class SubmissionMixin(object): ...@@ -73,7 +74,7 @@ class SubmissionMixin(object):
except api.SubmissionRequestError as err: except api.SubmissionRequestError as err:
status_tag = 'EBADFORM' status_tag = 'EBADFORM'
status_text = unicode(err.field_errors) status_text = unicode(err.field_errors)
except (api.SubmissionError, workflow_api.AssessmentWorkflowError): except (api.SubmissionError, AssessmentWorkflowError):
logger.exception("This response was not submitted.") logger.exception("This response was not submitted.")
status_tag = 'EUNKNOWN' status_tag = 'EUNKNOWN'
else: else:
......
...@@ -9,6 +9,7 @@ from mock import Mock, patch ...@@ -9,6 +9,7 @@ from mock import Mock, patch
from openassessment.xblock import openassessmentblock from openassessment.xblock import openassessmentblock
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -89,7 +90,7 @@ class TestOpenAssessment(XBlockHandlerTestCase): ...@@ -89,7 +90,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
# Simulate an error from updating the workflow # Simulate an error from updating the workflow
xblock.submission_uuid = 'test_submission' xblock.submission_uuid = 'test_submission'
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api: with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowError mock_api.update_from_assessments.side_effect = AssessmentWorkflowError
xblock_fragment = self.runtime.render(xblock, "student_view") xblock_fragment = self.runtime.render(xblock, "student_view")
# Expect that the page renders even if the update fails # Expect that the page renders even if the update fails
......
...@@ -148,14 +148,14 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -148,14 +148,14 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"}) submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer']) workflow_api.create_workflow(submission["uuid"], ['peer'])
# Create a submission for Tim, and corresponding workflow. # Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy() tim_item = bob_item.copy()
tim_item["student_id"] = "Tim" tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer") tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"]) peer_api.on_start(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self']) workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim. # Bob assesses Tim.
...@@ -188,7 +188,7 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -188,7 +188,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"}) submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['self']) workflow_api.create_workflow(submission["uuid"], ['self'])
# Bob assesses himself. # Bob assesses himself.
...@@ -219,14 +219,14 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -219,14 +219,14 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"}) submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer', 'self']) workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])
# Create a submission for Tim, and corresponding workflow. # Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy() tim_item = bob_item.copy()
tim_item["student_id"] = "Tim" tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer") tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"]) peer_api.on_start(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self']) workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim. # Bob assesses Tim.
......
...@@ -10,6 +10,7 @@ import pytz ...@@ -10,6 +10,7 @@ import pytz
from django.db import DatabaseError from django.db import DatabaseError
from openassessment.assessment.models import StudentTrainingWorkflow from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
@ddt.ddt @ddt.ddt
...@@ -56,7 +57,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -56,7 +57,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
} }
} }
with patch.object(workflow_api, "update_from_assessments") as mock_workflow_update: with patch.object(workflow_api, "update_from_assessments") as mock_workflow_update:
mock_workflow_update.side_effect = workflow_api.AssessmentWorkflowError("Oh no!") mock_workflow_update.side_effect = AssessmentWorkflowError("Oh no!")
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json') resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were not correct due to a workflow update error. # Expect that we were not correct due to a workflow update error.
......
...@@ -147,10 +147,20 @@ CACHES = { ...@@ -147,10 +147,20 @@ CACHES = {
}, },
} }
EDX_ORA2 = { # Configuration for the workflow API
# We use dependency injection to tell the workflow API
# which assessments to use and where to find the corresponding
# assessment API Python modules.
ORA2_ASSESSMENTS = {
'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training',
} }
# If peer-assessment provides a score, use that;
# otherwise fall back to self-assessment.
ORA2_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self']
# Celery configuration # Celery configuration
# Note: Version 3.1 of Celery includes Django support, but since we're using # Note: Version 3.1 of Celery includes Django support, but since we're using
# version 3.0 (same as edx-platform), we need to use an external library. # version 3.0 (same as edx-platform), we need to use an external library.
......
...@@ -37,8 +37,6 @@ TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' ...@@ -37,8 +37,6 @@ TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Install test-specific Django apps # Install test-specific Django apps
INSTALLED_APPS += ('django_nose',) INSTALLED_APPS += ('django_nose',)
EDX_ORA2["EVENT_LOGGER"] = "openassessment.workflow.test.events.fake_event_logger"
# We run Celery in "always eager" mode in the test suite, # We run Celery in "always eager" mode in the test suite,
# which executes tasks synchronously instead of using the task queue. # which executes tasks synchronously instead of using the task queue.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment