Commit c1dbf128 by Will Daly

Merge pull request #179 from edx/will/score-integrity-error

Prevent workflow from trying to create multiple scores for the same user
parents 6dd9c89b 698203cc
"""An XBlock where students can read a question and compose their response"""
import datetime as dt
import logging
import pkg_resources
import pytz
......@@ -26,6 +27,9 @@ from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates
logger = logging.getLogger(__name__)
DEFAULT_PROMPT = """
Censorship in the Libraries
......@@ -266,6 +270,15 @@ class OpenAssessmentBlock(
(Fragment): The HTML Fragment for this XBlock, which determines the
general frame of the Open Ended Assessment Question.
"""
# On page load, update the workflow status.
# We need to do this here because peers may have graded us, in which
# case we may have a score available.
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
# Log the exception, but continue loading the page
logger.exception('An error occurred while updating the workflow on page load.')
ui_models = self._create_ui_models()
# All data we intend to pass to the front end.
context_dict = {
......@@ -460,18 +473,25 @@ class OpenAssessmentBlock(
is_open, reason = self.is_open(step=step)
return is_published and (is_open or reason == 'due')
def update_workflow_status(self, submission_uuid):
assessment_ui_model = self.get_assessment_module('peer-assessment')
requirements = {
"peer": {
"must_grade": assessment_ui_model["must_grade"],
"must_be_graded_by": assessment_ui_model["must_be_graded_by"]
}
}
return workflow_api.update_from_assessments(submission_uuid, requirements)
def get_assessment_module(self, mixin_name):
"""Get a configured assessment module by name.
"""
Get a configured assessment module by name.
Args:
mixin_name (str): The name of the mixin (e.g. "self-assessment" or "peer-assessment")
Returns:
dict
Example:
>>> self.get_assessment_module('peer-assessment')
{
"name": "peer-assessment",
"start": None,
"due": None,
"must_grade": 5,
"must_be_graded_by": 3,
}
"""
for assessment in self.rubric_assessments:
if assessment["name"] == mixin_name:
......
......@@ -6,6 +6,7 @@ from openassessment.assessment.peer_api import (
PeerAssessmentInternalError, PeerAssessmentRequestError,
PeerAssessmentWorkflowError
)
import openassessment.workflow.api as workflow_api
logger = logging.getLogger(__name__)
......@@ -78,8 +79,13 @@ class PeerAssessmentMixin(object):
# Update both the workflow that the submission we're assessing
# belongs to, as well as our own (e.g. have we evaluated enough?)
self.update_workflow_status(data["submission_uuid"])
self.update_workflow_status(self.submission_uuid)
try:
self.update_workflow_status(submission_uuid=data["submission_uuid"])
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
msg = _('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg}
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
......
......@@ -3,6 +3,7 @@ from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment import self_api
from openassessment.workflow import api as workflow_api
from submissions import api as submission_api
logger = logging.getLogger(__name__)
......@@ -79,8 +80,14 @@ class SelfAssessmentMixin(object):
data['options_selected'],
{"criteria": self.rubric_criteria}
)
# After we've created the self-assessment, we need to update the workflow.
self.update_workflow_status()
except self_api.SelfAssessmentRequestError as ex:
msg = _(u"Could not create self assessment: {error}").format(error=ex.message)
return {'success': False, 'msg': msg}
except workflow_api.AssessmentWorkflowError as ex:
msg = _(u"Could not update workflow: {error}").format(error=ex.message)
return {'success': False, 'msg': msg}
else:
return {'success': True, 'msg': u""}
......@@ -7,6 +7,7 @@ import pytz
from mock import Mock, patch
from openassessment.xblock import openassessmentblock
from openassessment.workflow import api as workflow_api
from .base import XBlockHandlerTestCase, scenario
......@@ -45,6 +46,37 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(grade_response)
self.assertTrue(grade_response.body.find("openassessment__grade"))
@scenario('data/basic_scenario.xml')
def test_page_load_updates_workflow(self, xblock):
# No submission made, so don't update the workflow
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
self.runtime.render(xblock, "student_view")
self.assertEqual(mock_api.update_from_assessments.call_count, 0)
# Simulate one submission made (we have a submission ID)
xblock.submission_uuid = 'test_submission'
# Now that we have a submission, the workflow should get updated
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
self.runtime.render(xblock, "student_view")
expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 }
}
mock_api.update_from_assessments.assert_called_once_with('test_submission', expected_reqs)
@scenario('data/basic_scenario.xml')
def test_student_view_workflow_error(self, xblock):
# Simulate an error from updating the workflow
xblock.submission_uuid = 'test_submission'
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowError
xblock_fragment = self.runtime.render(xblock, "student_view")
# Expect that the page renders even if the update fails
self.assertTrue(xblock_fragment.body_html().find("Openassessmentblock"))
@scenario('data/dates_scenario.xml')
def test_load_student_view_with_dates(self, xblock):
"""OA XBlock returns some HTML to the user.
......
......@@ -6,6 +6,9 @@ from collections import namedtuple
import copy
import json
import mock
import submissions.api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api
from .base import XBlockHandlerTestCase, scenario
......@@ -27,13 +30,11 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_student_item = copy.deepcopy(student_item)
sally_student_item['student_id'] = "Sally"
sally_submission = xblock.create_submission(sally_student_item, u"Sally's answer")
xblock.get_workflow_info()
# Hal comes and submits a response.
hal_student_item = copy.deepcopy(student_item)
hal_student_item['student_id'] = "Hal"
hal_submission = xblock.create_submission(hal_student_item, u"Hal's answer")
xblock.get_workflow_info()
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
......@@ -73,19 +74,18 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIn("Sally".encode('utf-8'), peer_response.body)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_assess_handler(self, xblock):
def test_peer_assess_handler(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.get_workflow_info()
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
xblock.get_workflow_info()
peer_api.get_submission_to_assess(another_student, 3)
......@@ -114,7 +114,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertEqual(actual[0]['feedback'], assessment['feedback'])
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_assess_rubric_option_mismatch(self, xblock):
def test_peer_assess_rubric_option_mismatch(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
......@@ -135,7 +135,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Expect an error response
self.assertFalse(resp['success'])
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock):
for missing in ['feedback', 'submission_uuid', 'options_selected']:
......
......@@ -5,8 +5,8 @@ Tests for self assessment handlers in Open Assessment XBlock.
import copy
import json
import mock
from submissions import api as submission_api
from openassessment.assessment import self_api
from openassessment.workflow import api as workflow_api
from .base import XBlockHandlerTestCase, scenario
......@@ -26,7 +26,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = submission_api.create_submission(student_item, self.SUBMISSION)
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self-assessment
assessment = copy.deepcopy(self.ASSESSMENT)
......@@ -51,6 +51,48 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_updates_workflow(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
with mock.patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
# Submit a self-assessment
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
# Verify that the workflow is updated when we submit a self-assessment
self.assertTrue(resp['success'])
expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 }
}
mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
with mock.patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
# Simulate a workflow error
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowError
# Submit a self-assessment
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
# Verify that the we get an error response
self.assertFalse(resp['success'])
self.assertIn('workflow', resp['msg'].lower())
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock):
# Missing submission_uuid
assessment = copy.deepcopy(self.ASSESSMENT)
......@@ -125,7 +167,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
def test_self_assess_api_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = submission_api.create_submission(student_item, self.SUBMISSION)
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self-assessment
assessment = copy.deepcopy(self.ASSESSMENT)
......
from xblock.core import XBlock
from openassessment.workflow import api as workflow_api
class WorkflowMixin(object):
@XBlock.json_handler
def handle_workflow_info(self, data, suffix=''):
if not self.submission_uuid:
return None
return workflow_api.get_workflow_for_submission(
self.submission_uuid, self.workflow_requirements()
)
return self.get_workflow_info()
def workflow_requirements(self):
"""
Retrieve the requirements from each assessment module
so the workflow can decide whether the student can receive a score.
Returns:
dict
"""
assessment_ui_model = self.get_assessment_module('peer-assessment')
if not assessment_ui_model:
......@@ -24,7 +28,40 @@ class WorkflowMixin(object):
}
}
def update_workflow_status(self, submission_uuid=None):
"""
Update the status of a workflow. For example, change the status
from peer-assessment to self-assessment. Creates a score
if the student has completed all requirements.
Kwargs:
submission_uuid (str): The submission associated with the workflow to update.
Defaults to the submission created by the current student.
Returns:
None
Raises:
AssessmentWorkflowError
"""
if submission_uuid is None:
submission_uuid = self.submission_uuid
if submission_uuid:
requirements = self.workflow_requirements()
workflow_api.update_from_assessments(submission_uuid, requirements)
def get_workflow_info(self):
"""
Retrieve a description of the student's progress in a workflow.
Note that this *may* update the workflow status if it's changed.
Returns:
dict
Raises:
AssessmentWorkflowError
"""
if not self.submission_uuid:
return {}
return workflow_api.get_workflow_for_submission(
......
......@@ -6,7 +6,7 @@ import copy
import logging
from django.core.cache import cache
from django.db import DatabaseError
from django.db import IntegrityError, DatabaseError
from django.utils.encoding import force_unicode
from submissions.serializers import (
......@@ -410,7 +410,7 @@ def set_score(submission_uuid, score, points_possible):
student item.
Returns:
(dict): The dictionary representation of the saved score.
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
......@@ -453,8 +453,18 @@ def set_score(submission_uuid, score, points_possible):
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
score.save()
return score.data
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
try:
score.save()
except IntegrityError:
pass
def _get_or_create_student_item(student_item_dict):
......
......@@ -211,7 +211,8 @@ class TestSubmissionsApi(TestCase):
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
score = api.set_score(submission["uuid"], 11, 12)
api.set_score(submission["uuid"], 11, 12)
score = api.get_latest_score_for_submission(submission["uuid"])
self._assert_score(score, 11, 12)
def test_get_score(self):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment