Commit e601a79d by Eric Fischer Committed by Andy Armstrong

Clean up update_from_assessments() codepath

Previously, api.update_from_assessments() would call model.
update_from_assessments(), then api._serialized_with_details(reqs), which
would duplicate the update work we *just* did as part of getting the
status. Now, update() and get_details() are totally seperate operations, and
_serialized_with_details() is a read-only method that does not need to have
requirements passed in.

I've also standardized student_training and peer, the two assessment types that
do *not* auto-complete on None requirements, to return False if None, and error
out if passed {}. This is to allow staff_assess to pass None to indicate
"Update thyself, if you don't depend on requirements" to the other step types.

Also adding test_requirements_changed(), to confirm that workflows are updated
when requirements change, even though thoe requirements are no longer explicitly
passed to the getter.
parent c19eb221
......@@ -45,7 +45,7 @@ def submitter_is_finished(submission_uuid, requirements):
bool
"""
if not requirements:
if requirements is None:
return False
try:
......@@ -59,6 +59,8 @@ def submitter_is_finished(submission_uuid, requirements):
return False
except PeerWorkflow.DoesNotExist:
return False
except KeyError:
raise PeerAssessmentRequestError(u'Requirements dict must contain "must_grade" key')
def assessment_is_finished(submission_uuid, requirements):
......
......@@ -264,7 +264,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
u"Updated workflow for submission UUID {uuid} "
u"with requirements {reqs}"
).format(uuid=submission_uuid, reqs=assessment_requirements))
return _serialized_with_details(workflow, assessment_requirements)
return _serialized_with_details(workflow)
except PeerAssessmentError as err:
err_msg = u"Could not update assessment workflow: {}".format(err)
logger.exception(err_msg)
......@@ -362,13 +362,12 @@ def _get_workflow_model(submission_uuid):
return workflow
def _serialized_with_details(workflow, assessment_requirements):
"""Given a workflow and assessment requirements, return the serialized
version of an `AssessmentWorkflow` and add in the status details. See
`update_from_assessments()` for details on params and return values.
def _serialized_with_details(workflow):
"""
Given a workflow, return its serialized version with added status details.
"""
data_dict = AssessmentWorkflowSerializer(workflow).data
data_dict["status_details"] = workflow.status_details(assessment_requirements)
data_dict["status_details"] = workflow.status_details()
return data_dict
......
......@@ -201,30 +201,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
score = sub_api.get_latest_score_for_submission(self.submission_uuid)
return score
def status_details(self, assessment_requirements):
def status_details(self):
status_dict = {}
steps = self._get_steps()
for step in steps:
api = step.api()
if api is not None:
# If an assessment module does not define these functions,
# default to True -- that is, automatically assume that the user has
# met the requirements. This prevents students from getting "stuck"
# in the workflow in the event of a rollback that removes a step
# from the problem definition.
submitter_finished_func = getattr(api, 'submitter_is_finished', lambda submission_uuid, reqs: True)
assessment_finished_func = getattr(api, 'assessment_is_finished', lambda submission_uuid, reqs: True)
status_dict[step.name] = {
"complete": submitter_finished_func(
self.submission_uuid,
assessment_requirements.get(step.name, {})
),
"graded": assessment_finished_func(
self.submission_uuid,
assessment_requirements.get(step.name, {})
),
}
status_dict[step.name] = {
"complete": step.is_submitter_complete(),
"graded": step.is_assessment_complete(),
}
return status_dict
def get_score(self, assessment_requirements, step_for_name):
......
......@@ -52,7 +52,7 @@ class StaffAssessmentMixin(object):
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
)
self.publish_assessment_event("openassessmentblock.staff_assessment", assessment)
workflow_api.update_from_assessments(assessment["submission_uuid"], {})
workflow_api.update_from_assessments(assessment["submission_uuid"], None)
except StaffAssessmentRequestError:
logger.warning(
......@@ -71,7 +71,7 @@ class StaffAssessmentMixin(object):
return {'success': False, 'msg': msg}
else:
return {'success': True, 'msg': u""}
@XBlock.handler
def render_staff_assessment(self, data, suffix=''):
"""
......@@ -85,7 +85,7 @@ class StaffAssessmentMixin(object):
path, context_dict = self.staff_path_and_context()
return self.render_assessment(path, context_dict)
def staff_path_and_context(self):
"""
Retrieve the correct template path and template context for the handler to render.
......
......@@ -33,8 +33,54 @@ class TestPeerAssessment(XBlockHandlerTestCase):
@scenario('data/over_grade_scenario.xml', user_id='Bob')
def test_load_peer_student_view_with_dates(self, xblock):
self._sally_and_hal_grade_each_other_helper(xblock)
# If Over Grading is on, this should now return Sally or Hal's response to Bob.
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2"))
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
# Validate Submission Rendering.
request = namedtuple('Request', 'params')
request.params = {}
peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["parts"][0]["text"].encode('utf-8'), peer_response.body)
self.assertNotIn(submission["answer"]["parts"][1]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering.
self.assertTrue("Sally".encode('utf-8') in peer_response.body or
"Hal".encode('utf-8') in peer_response.body)
@mock.patch('openassessment.xblock.workflow_mixin.WorkflowMixin.workflow_requirements')
@scenario('data/peer_assessment_scenario.xml', user_id='Sally')
def test_requirements_changed(self, xblock, mock_requirements):
"""
Test to verify that if requirements change, student workflows are immediately updated to
reflect their done status with regards to the new requirements.
"""
# Setup the peer grading scenario, using the default requirements
self._sally_and_hal_grade_each_other_helper(xblock)
# Verify that Sally's workflow is not marked done, as the requirements are higher than 1.
mock_requirements.return_value = {"peer": {"must_grade":2, "must_be_graded_by":2}}
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
# Now, change the requirements and verify that Sally's workflow updates to 'self' status.
mock_requirements.return_value = {"peer": {"must_grade":1, "must_be_graded_by":1}}
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'self')
def _sally_and_hal_grade_each_other_helper(self, xblock):
"""
A helper method to set up 2 submissions, one for each of Sally and Hal, and then have each assess the other.
"""
student_item = xblock.get_student_item_dict()
# Sally submits a response.
sally_student_item = copy.deepcopy(student_item)
sally_student_item['student_id'] = "Sally"
sally_submission = xblock.create_submission(sally_student_item, (u"Sally's answer 1", u"Sally's answer 2"))
......@@ -70,23 +116,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
1
)
# If Over Grading is on, this should now return Sally or Hal's response to Bob.
submission = xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2"))
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
# Validate Submission Rendering.
request = namedtuple('Request', 'params')
request.params = {}
peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["parts"][0]["text"].encode('utf-8'), peer_response.body)
self.assertNotIn(submission["answer"]["parts"][1]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering.
self.assertTrue("Sally".encode('utf-8') in peer_response.body or
"Hal".encode('utf-8') in peer_response.body)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_before_submission(self, xblock):
# Submit a peer assessment without a submission
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment