Commit 768187ee by Will Daly

Refactor assessment and workflow APIs

Moved workflow errors into a separate module.
parent 6f73835f
......@@ -49,6 +49,8 @@ Isolation of Assessment types
a non `None` value has been returned by this function for a given
`submission_uuid`, repeated calls to this function should return the same
thing.
`on_start(submission_uuid)`
Notification to the API that the student has started the assessment step.
In the long run, it could be that `OpenAssessmentBlock` becomes a wrapper
that talks to child XBlocks via this kind of API, and that each child would
......@@ -96,3 +98,15 @@ Handling Problem Definition Change
2. If the sequence of steps changes, we look at the new steps and advance to
the first step that the user has not completed (`is_submitter_done()`
returns `False`).
Django settings
Assessments in the workflow are configurable using Django settings.
This encapsulates the workflow API from the assessment modules.
The two settings are:
* `ORA2_ASSESSMENTS`: a `dict` mapping assessment names to the Python module path
of the corresponding assessment API.
* `ORA2_ASSESSMENT_SCORE_PRIORITY`: a `list` of assessment names that determine
which assessment type is used to generate a student's score.
......@@ -28,6 +28,18 @@ PEER_TYPE = "PE"
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has made the required number of assessments.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating
the required number of submissions the student must grade.
Returns:
bool
"""
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
......@@ -41,17 +53,78 @@ def submitter_is_finished(submission_uuid, requirements):
return False
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has received enough assessments
to get a score.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by"
indicating the required number of assessments the student
must receive to get a score.
Returns:
bool
"""
return bool(get_score(submission_uuid, requirements))
def on_start(submission_uuid):
"""Create a new peer workflow for a student item and submission.
Creates a unique peer workflow for a student item, associated with a
submission.
Args:
submission_uuid (str): The submission associated with this workflow.
Returns:
None
Raises:
SubmissionError: There was an error retrieving the submission.
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
"""
try:
submission = sub_api.get_submission_and_student(submission_uuid)
workflow, __ = PeerWorkflow.objects.get_or_create(
student_id=submission['student_item']['student_id'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
submission_uuid=submission_uuid
)
workflow.save()
except IntegrityError:
# If we get an integrity error, it means someone else has already
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_score(submission_uuid, requirements):
"""
Retrieve a score for a submission if requirements have been satisfied.
Args:
submission_uuid (str): The UUID of the submission.
requirements (dict): Description of requirements for receiving a score,
specific to the particular kind of submission (e.g. self or peer).
requirements (dict): Dictionary with the key "must_be_graded_by"
indicating the required number of assessments the student
must receive to get a score.
Returns:
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements):
......@@ -93,10 +166,6 @@ def get_score(submission_uuid, requirements):
}
def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements))
def create_assessment(
scorer_submission_uuid,
scorer_id,
......
......@@ -24,6 +24,74 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.api.self")
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
"""
Create a self-assessment for a submission.
......@@ -155,73 +223,6 @@ def get_assessment(submission_uuid):
return serialized_assessment
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
......
......@@ -56,19 +56,33 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
return workflow.num_completed >= num_required
def assessment_is_finished(submission_uuid, requirements): # pylint:disable=W0613
def on_start(submission_uuid):
"""
Since the student is not being assessed by others,
this always returns true.
"""
return True
Creates a new student training workflow.
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
def get_score(submission_uuid, requirements): # pylint:disable=W0613
"""
Training is either complete or incomplete; there is no score.
Args:
submission_uuid (str): The submission UUID for the student that is
initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
"""
return None
try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the student "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def validate_training_examples(rubric, examples):
......@@ -354,34 +368,6 @@ def get_training_example(submission_uuid, rubric, examples):
raise StudentTrainingInternalError(msg)
def create_student_training_workflow(submission_uuid):
"""
Creates a new student training workflow.
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
Args:
submission_uuid (str): The submission UUID for the student that is
initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
"""
try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the student "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def assess_training_example(submission_uuid, options_selected, update_workflow=True):
"""
Assess a training example and update the workflow.
......
......@@ -370,7 +370,7 @@ class TestPeerApi(CacheResetTest):
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!")
# This should not raise an exception
peer_api.create_peer_workflow(tim_sub["uuid"])
peer_api.on_start(tim_sub["uuid"])
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submission_found_closing_assessment(self):
......@@ -1121,7 +1121,7 @@ class TestPeerApi(CacheResetTest):
def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
peer_api.create_assessment(
submission["uuid"], STUDENT_ITEM["student_id"],
ASSESSMENT_DICT['options_selected'],
......@@ -1188,6 +1188,6 @@ class TestPeerApi(CacheResetTest):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item
......@@ -26,7 +26,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
Create a submission.
"""
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
training_api.create_student_training_workflow(submission['uuid'])
training_api.on_start(submission['uuid'])
self.submission_uuid = submission['uuid']
def test_training_workflow(self):
......@@ -122,7 +122,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_submitter_is_finished_num_queries(self):
# Complete the first training example
training_api.create_student_training_workflow(self.submission_uuid)
training_api.on_start(self.submission_uuid)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
......@@ -160,9 +160,6 @@ class StudentTrainingAssessmentTest(CacheResetTest):
requirements = {'num_required': 1}
self.assertFalse(training_api.submitter_is_finished(self.submission_uuid, requirements))
# But since we're not being assessed by others, the "assessment" should be finished.
self.assertTrue(training_api.assessment_is_finished(self.submission_uuid, requirements))
def test_get_training_example_none_available(self):
for example in EXAMPLES:
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
......@@ -263,13 +260,6 @@ class StudentTrainingAssessmentTest(CacheResetTest):
is_finished = training_api.submitter_is_finished(submission_uuid, requirements)
self.assertEqual(is_finished, bool(num_completed >= num_required))
# Assessment is finished should always be true,
# since we're not being assessed by others.
self.assertTrue(training_api.assessment_is_finished(submission_uuid, requirements))
# At no point should we receive a score!
self.assertIs(training_api.get_score(submission_uuid, requirements), None)
def _expected_example(self, input_example, rubric):
"""
Return the training example we would expect to retrieve for an example.
......@@ -323,7 +313,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
"""
pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
training_api.create_student_training_workflow(pre_submission['uuid'])
training_api.on_start(pre_submission['uuid'])
for example in examples:
training_api.get_training_example(pre_submission['uuid'], rubric, examples)
training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
......@@ -2,7 +2,6 @@
Public interface for the Assessment Workflow.
"""
import copy
import logging
from django.db import DatabaseError
......@@ -15,53 +14,14 @@ from openassessment.assessment.errors import (
from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer
from .errors import (
AssessmentWorkflowInternalError, AssessmentWorkflowRequestError,
AssessmentWorkflowNotFoundError
)
logger = logging.getLogger(__name__)
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
def create_workflow(submission_uuid, steps):
"""Begins a new assessment workflow.
......@@ -134,7 +94,7 @@ def create_workflow(submission_uuid, steps):
status = AssessmentWorkflow.STATUS.peer
if steps[0] == "peer":
try:
peer_api.create_peer_workflow(submission_uuid)
peer_api.on_start(submission_uuid)
except PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
......@@ -144,7 +104,7 @@ def create_workflow(submission_uuid, steps):
elif steps[0] == "training":
status = AssessmentWorkflow.STATUS.training
try:
training_api.create_student_training_workflow(submission_uuid)
training_api.on_start(submission_uuid)
except StudentTrainingInternalError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
......
"""
Errors defined by the workflow API.
"""
import copy
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class AssessmentApiLoadError(AssessmentWorkflowInternalError):
"""
The assessment API could not be loaded.
"""
def __init__(self, assessment_name, api_path):
msg = u"Could not load assessment API for {} from {}".format(
assessment_name, api_path
)
super(AssessmentApiLoadError, self).__init__(msg)
from django.db import DatabaseError
from django.test.utils import override_settings
import ddt
from mock import patch
from nose.tools import raises
......@@ -6,13 +7,15 @@ from openassessment.assessment.models import PeerWorkflow
from openassessment.test_utils import CacheResetTest
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
import openassessment.workflow.api as workflow_api
from openassessment.assessment.models import StudentTrainingWorkflow
import submissions.api as sub_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.workflow.errors import AssessmentApiLoadError
ITEM_1 = {
"student_id": "Optimus Prime 001",
......@@ -21,6 +24,7 @@ ITEM_1 = {
"item_type": "openassessment",
}
@ddt.ddt
class TestAssessmentWorkflowApi(CacheResetTest):
......@@ -251,8 +255,24 @@ class TestAssessmentWorkflowApi(CacheResetTest):
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id,
status, answer="answer", steps=None):
@override_settings(ORA2_ASSESSMENTS={'self': 'not.a.module'})
def test_unable_to_load_api(self):
submission = sub_api.create_submission({
"student_id": "test student",
"course_id": "test course",
"item_id": "test item",
"item_type": "openassessment",
}, "test answer")
workflow_api.create_workflow(submission['uuid'], ['self'])
with self.assertRaises(AssessmentApiLoadError):
workflow_api.update_from_assessments(submission['uuid'], {})
def _create_workflow_with_status(
self, student_id, course_id, item_id,
status, answer="answer", steps=None
):
"""
Create a submission and workflow with a given status.
......@@ -270,7 +290,8 @@ class TestAssessmentWorkflowApi(CacheResetTest):
Returns:
workflow, submission
"""
if not steps: steps = ["peer", "self"]
if not steps:
steps = ["peer", "self"]
submission = sub_api.create_submission({
"student_id": student_id,
......
......@@ -26,6 +26,7 @@ from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
......@@ -216,7 +217,7 @@ class OpenAssessmentBlock(
# case we may have a score available.
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
except AssessmentWorkflowError:
# Log the exception, but continue loading the page
logger.exception('An error occurred while updating the workflow on page load.')
......
......@@ -9,6 +9,8 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
)
import openassessment.workflow.api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
......@@ -98,7 +100,7 @@ class PeerAssessmentMixin(object):
if assessment:
self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
except AssessmentWorkflowError:
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {}".format(self.submission_uuid)
......
......@@ -7,6 +7,7 @@ from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
import openassessment.workflow.api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE
......@@ -182,12 +183,10 @@ class StudentTrainingMixin(object):
else:
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {uuid}".format(uuid=self.submission_uuid)
)
return {'success': False, 'msg': _('Could not update workflow status.')}
except AssessmentWorkflowError:
msg = _('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg}
return {
'success': True,
'msg': u'',
......
......@@ -6,6 +6,7 @@ from xblock.core import XBlock
from submissions import api
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE
......@@ -73,7 +74,7 @@ class SubmissionMixin(object):
except api.SubmissionRequestError as err:
status_tag = 'EBADFORM'
status_text = unicode(err.field_errors)
except (api.SubmissionError, workflow_api.AssessmentWorkflowError):
except (api.SubmissionError, AssessmentWorkflowError):
logger.exception("This response was not submitted.")
status_tag = 'EUNKNOWN'
else:
......
......@@ -9,6 +9,7 @@ from mock import Mock, patch
from openassessment.xblock import openassessmentblock
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario
......@@ -89,7 +90,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
# Simulate an error from updating the workflow
xblock.submission_uuid = 'test_submission'
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowError
mock_api.update_from_assessments.side_effect = AssessmentWorkflowError
xblock_fragment = self.runtime.render(xblock, "student_view")
# Expect that the page renders even if the update fails
......
......@@ -148,14 +148,14 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer'])
# Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy()
tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"])
peer_api.on_start(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim.
......@@ -188,7 +188,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['self'])
# Bob assesses himself.
......@@ -219,14 +219,14 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])
# Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy()
tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"])
peer_api.on_start(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim.
......
......@@ -10,6 +10,7 @@ import pytz
from django.db import DatabaseError
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
......@@ -56,7 +57,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
}
}
with patch.object(workflow_api, "update_from_assessments") as mock_workflow_update:
mock_workflow_update.side_effect = workflow_api.AssessmentWorkflowError("Oh no!")
mock_workflow_update.side_effect = AssessmentWorkflowError("Oh no!")
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were not correct due to a workflow update error.
......
......@@ -147,6 +147,19 @@ CACHES = {
},
}
# Configuration for the workflow API
# We use dependency injection to tell the workflow API
# which assessments to use and where to find the corresponding
# assessment API Python modules.
ORA2_ASSESSMENTS = {
'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training',
}
# If peer-assessment provides a score, use that;
# otherwise fall back to self-assessment.
ORA2_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self']
# Celery configuration
# Note: Version 3.1 of Celery includes Django support, but since we're using
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment