Commit ab7cfa6b by Will Daly

Merge pull request #392 from edx/will/log-peer-request-error

Clean up logging in peer and self assessment mixins
parents 58e9384c d7240460
...@@ -525,9 +525,10 @@ def get_submission_to_assess(submission_uuid, graded_by): ...@@ -525,9 +525,10 @@ def get_submission_to_assess(submission_uuid, graded_by):
""" """
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
if not workflow: if not workflow:
raise PeerAssessmentWorkflowError(_( raise PeerAssessmentWorkflowError(
u"A Peer Assessment Workflow does not exist for the specified " u"A Peer Assessment Workflow does not exist for the student "
u"student.")) u"with submission UUID {}".format(submission_uuid)
)
peer_submission_uuid = workflow.find_active_assessments() peer_submission_uuid = workflow.find_active_assessments()
# If there is an active assessment for this user, get that submission, # If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise, # otherwise, get the first assessment for review, otherwise,
......
...@@ -1169,6 +1169,12 @@ class TestPeerApi(CacheResetTest): ...@@ -1169,6 +1169,12 @@ class TestPeerApi(CacheResetTest):
MONDAY, MONDAY,
) )
def test_get_submission_to_assess_no_workflow(self):
# Try to retrieve a submission to assess when the student
# doing the assessment hasn't yet submitted.
with self.assertRaises(peer_api.PeerAssessmentWorkflowError):
peer_api.get_submission_to_assess("no_such_submission", "scorer ID")
@staticmethod @staticmethod
def _create_student_and_submission(student, answer, date=None): def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy() new_student_item = STUDENT_ITEM.copy()
......
...@@ -6,8 +6,7 @@ from xblock.core import XBlock ...@@ -6,8 +6,7 @@ from xblock.core import XBlock
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
PeerAssessmentInternalError, PeerAssessmentRequestError, PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
PeerAssessmentWorkflowError
) )
import openassessment.workflow.api as workflow_api import openassessment.workflow.api as workflow_api
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -80,11 +79,17 @@ class PeerAssessmentMixin(object): ...@@ -80,11 +79,17 @@ class PeerAssessmentMixin(object):
# Emit analytics event... # Emit analytics event...
self._publish_peer_assessment_event(assessment) self._publish_peer_assessment_event(assessment)
except PeerAssessmentRequestError: except (PeerAssessmentRequestError, PeerAssessmentWorkflowError):
logger.warning(
u"Peer API error for submission UUID {}".format(self.submission_uuid),
exc_info=True
)
return {'success': False, 'msg': _(u"Your peer assessment could not be submitted.")} return {'success': False, 'msg': _(u"Your peer assessment could not be submitted.")}
except PeerAssessmentInternalError: except PeerAssessmentInternalError:
msg = _("Internal error occurred while creating the assessment") logger.exception(
logger.exception(msg) u"Peer API internal error for submission UUID: {}".format(self.submission_uuid)
)
msg = _("Your peer assessment could not be submitted.")
return {'success': False, 'msg': msg} return {'success': False, 'msg': msg}
# Update both the workflow that the submission we're assessing # Update both the workflow that the submission we're assessing
...@@ -94,8 +99,11 @@ class PeerAssessmentMixin(object): ...@@ -94,8 +99,11 @@ class PeerAssessmentMixin(object):
self.update_workflow_status(submission_uuid=assessment['submission_uuid']) self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
self.update_workflow_status() self.update_workflow_status()
except workflow_api.AssessmentWorkflowError: except workflow_api.AssessmentWorkflowError:
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {}".format(self.submission_uuid)
)
msg = _('Could not update workflow status.') msg = _('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg} return {'success': False, 'msg': msg}
# Temp kludge until we fix JSON serialization for datetime # Temp kludge until we fix JSON serialization for datetime
......
...@@ -141,11 +141,20 @@ class SelfAssessmentMixin(object): ...@@ -141,11 +141,20 @@ class SelfAssessmentMixin(object):
) )
# After we've created the self-assessment, we need to update the workflow. # After we've created the self-assessment, we need to update the workflow.
self.update_workflow_status() self.update_workflow_status()
except self_api.SelfAssessmentRequestError as ex: except (self_api.SelfAssessmentRequestError, workflow_api.AssessmentWorkflowRequestError):
msg = _(u"Could not create self assessment: {error}").format(error=ex) logger.warning(
u"An error occurred while submitting a self assessment "
u"for the submission {}".format(self.submission_uuid),
exc_info=True
)
msg = _(u"Your self assessment could not be submitted.")
return {'success': False, 'msg': msg} return {'success': False, 'msg': msg}
except workflow_api.AssessmentWorkflowError as ex: except (self_api.SelfAssessmentInternalError, workflow_api.AssessmentWorkflowInternalError):
msg = _(u"Could not update workflow: {error}").format(error=ex) logger.exception(
u"An error occurred while submitting a self assessment "
u"for the submission {}".format(self.submission_uuid),
)
msg = _(u"Your self assessment could not be submitted.")
return {'success': False, 'msg': msg} return {'success': False, 'msg': msg}
else: else:
return {'success': True, 'msg': u""} return {'success': True, 'msg': u""}
...@@ -11,6 +11,7 @@ import datetime as dt ...@@ -11,6 +11,7 @@ import datetime as dt
import pytz import pytz
import ddt import ddt
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.workflow import api as workflow_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -90,6 +91,18 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -90,6 +91,18 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertGreater(len(resp['msg']), 0) self.assertGreater(len(resp['msg']), 0)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_without_leasing_submission(self, xblock):
# Create a submission
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, u"Bob's answer")
# Attempt to assess a peer without first leasing their submission
# (usually occurs by rendering the peer assessment step)
resp = self.request(xblock, 'peer_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertEqual(resp['success'], False)
self.assertGreater(len(resp['msg']), 0)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock): def test_missing_keys_in_request(self, xblock):
for missing in ['criterion_feedback', 'overall_feedback', 'options_selected']: for missing in ['criterion_feedback', 'overall_feedback', 'options_selected']:
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
...@@ -613,6 +626,24 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -613,6 +626,24 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
expect_failure=True expect_failure=True
) )
@mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_request_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentRequestError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_internal_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentInternalError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.workflow_mixin.workflow_api.update_from_assessments')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_workflow_error(self, xblock, mock_call):
mock_call.side_effect = workflow_api.AssessmentWorkflowInternalError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True)
def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False): def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False):
""" """
Create submissions for a student and scorer, then create a peer assessment Create submissions for a student and scorer, then create a peer assessment
......
...@@ -87,14 +87,13 @@ class TestSelfAssessment(XBlockHandlerTestCase): ...@@ -87,14 +87,13 @@ class TestSelfAssessment(XBlockHandlerTestCase):
with mock.patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api: with mock.patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
# Simulate a workflow error # Simulate a workflow error
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowError mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowInternalError
# Submit a self-assessment # Submit a self-assessment
resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json') resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
# Verify that the we get an error response # Verify that the we get an error response
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn('workflow', resp['msg'].lower())
@scenario('data/self_assessment_scenario.xml', user_id='Bob') @scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock): def test_self_assess_handler_missing_keys(self, xblock):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment