Commit 1d52e869 by Will Daly Committed by gradyward

Update self assessment XBlock handler

parent 260cfe7a
......@@ -89,7 +89,15 @@ def get_score(submission_uuid, requirements):
}
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
def create_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at=None
):
"""
Create a self-assessment for a submission.
......@@ -97,6 +105,11 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
submission_uuid (str): The unique identifier for the submission being assessed.
user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model.
Kwargs:
......
......@@ -73,3 +73,23 @@ def create_rubric_dict(prompt, criteria):
"prompt": prompt,
"criteria": criteria
}
def clean_criterion_feedback(rubric_criteria, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
rubric_criteria (list): The rubric criteria from the problem definition.
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
......@@ -9,10 +9,8 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
)
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.api import FileUploadError
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import create_rubric_dict, clean_criterion_feedback
logger = logging.getLogger(__name__)
......@@ -64,19 +62,15 @@ class PeerAssessmentMixin(object):
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
try:
# Create the assessment
assessment = peer_api.create_assessment(
self.submission_uuid,
self.get_student_item_dict()["student_id"],
data['options_selected'],
self._clean_criterion_feedback(data['criterion_feedback']),
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
rubric_dict,
create_rubric_dict(self.prompt, self.rubric_criteria),
assessment_ui_model['must_be_graded_by']
)
......@@ -268,22 +262,3 @@ class PeerAssessmentMixin(object):
logger.exception(err)
return peer_submission
def _clean_criterion_feedback(self, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
......@@ -8,6 +8,7 @@ from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as submission_api
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import create_rubric_dict, clean_criterion_feedback
logger = logging.getLogger(__name__)
......@@ -112,6 +113,12 @@ class SelfAssessmentMixin(object):
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
if 'overall_feedback' not in data:
return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}
if self.submission_uuid is None:
return {'success': False, 'msg': _(u"You must submit a response before you can perform a self-assessment.")}
......@@ -120,7 +127,9 @@ class SelfAssessmentMixin(object):
self.submission_uuid,
self.get_student_item_dict()['student_id'],
data['options_selected'],
{"criteria": self.rubric_criteria}
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria)
)
self.publish_assessment_event("openassessmentblock.self_assess", assessment)
......
......@@ -9,6 +9,7 @@ import mock
import pytz
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from openassessment.xblock.data_conversion import create_rubric_dict
from .base import XBlockHandlerTestCase, scenario
......@@ -23,6 +24,8 @@ class TestSelfAssessment(XBlockHandlerTestCase):
ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
}
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
......@@ -87,6 +90,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
# Submit a self assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'criterion_feedback': {u'vocabulary': 'Awesome job!'},
'overall_feedback': u''
}
resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
......@@ -99,10 +103,9 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
# The written feedback should default to an empty string
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'')
self.assertEqual(assessment['parts'][1]['feedback'], u'Awesome job!')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock):
......@@ -267,7 +270,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria}
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
......@@ -302,7 +306,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria}
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
)
# This case probably isn't possible, because presumably when we create
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment