Commit 1d52e869 by Will Daly Committed by gradyward

Update self assessment XBlock handler

parent 260cfe7a
...@@ -89,7 +89,15 @@ def get_score(submission_uuid, requirements): ...@@ -89,7 +89,15 @@ def get_score(submission_uuid, requirements):
} }
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None): def create_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at=None
):
""" """
Create a self-assessment for a submission. Create a self-assessment for a submission.
...@@ -97,6 +105,11 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -97,6 +105,11 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
submission_uuid (str): The unique identifier for the submission being assessed. submission_uuid (str): The unique identifier for the submission being assessed.
user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option values selected. options_selected (dict): Mapping of rubric criterion names to option values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model. rubric_dict (dict): Serialized Rubric model.
Kwargs: Kwargs:
......
...@@ -73,3 +73,23 @@ def create_rubric_dict(prompt, criteria): ...@@ -73,3 +73,23 @@ def create_rubric_dict(prompt, criteria):
"prompt": prompt, "prompt": prompt,
"criteria": criteria "criteria": criteria
} }
def clean_criterion_feedback(rubric_criteria, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
rubric_criteria (list): The rubric criteria from the problem definition.
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
...@@ -9,10 +9,8 @@ from openassessment.assessment.errors import ( ...@@ -9,10 +9,8 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
) )
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.api import FileUploadError
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from .data_conversion import create_rubric_dict, clean_criterion_feedback
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -64,19 +62,15 @@ class PeerAssessmentMixin(object): ...@@ -64,19 +62,15 @@ class PeerAssessmentMixin(object):
assessment_ui_model = self.get_assessment_module('peer-assessment') assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model: if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
try: try:
# Create the assessment # Create the assessment
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
self.submission_uuid, self.submission_uuid,
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
data['options_selected'], data['options_selected'],
self._clean_criterion_feedback(data['criterion_feedback']), clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'], data['overall_feedback'],
rubric_dict, create_rubric_dict(self.prompt, self.rubric_criteria),
assessment_ui_model['must_be_graded_by'] assessment_ui_model['must_be_graded_by']
) )
...@@ -268,22 +262,3 @@ class PeerAssessmentMixin(object): ...@@ -268,22 +262,3 @@ class PeerAssessmentMixin(object):
logger.exception(err) logger.exception(err)
return peer_submission return peer_submission
def _clean_criterion_feedback(self, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
...@@ -8,6 +8,7 @@ from openassessment.assessment.api import self as self_api ...@@ -8,6 +8,7 @@ from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as submission_api from submissions import api as submission_api
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from .data_conversion import create_rubric_dict, clean_criterion_feedback
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -112,6 +113,12 @@ class SelfAssessmentMixin(object): ...@@ -112,6 +113,12 @@ class SelfAssessmentMixin(object):
if 'options_selected' not in data: if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")} return {'success': False, 'msg': _(u"Missing options_selected key in request")}
if 'overall_feedback' not in data:
return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}
if self.submission_uuid is None: if self.submission_uuid is None:
return {'success': False, 'msg': _(u"You must submit a response before you can perform a self-assessment.")} return {'success': False, 'msg': _(u"You must submit a response before you can perform a self-assessment.")}
...@@ -120,7 +127,9 @@ class SelfAssessmentMixin(object): ...@@ -120,7 +127,9 @@ class SelfAssessmentMixin(object):
self.submission_uuid, self.submission_uuid,
self.get_student_item_dict()['student_id'], self.get_student_item_dict()['student_id'],
data['options_selected'], data['options_selected'],
{"criteria": self.rubric_criteria} clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria)
) )
self.publish_assessment_event("openassessmentblock.self_assess", assessment) self.publish_assessment_event("openassessmentblock.self_assess", assessment)
......
...@@ -9,6 +9,7 @@ import mock ...@@ -9,6 +9,7 @@ import mock
import pytz import pytz
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.xblock.data_conversion import create_rubric_dict
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -23,6 +24,8 @@ class TestSelfAssessment(XBlockHandlerTestCase): ...@@ -23,6 +24,8 @@ class TestSelfAssessment(XBlockHandlerTestCase):
ASSESSMENT = { ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'}, 'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
} }
@scenario('data/self_assessment_scenario.xml', user_id='Bob') @scenario('data/self_assessment_scenario.xml', user_id='Bob')
...@@ -87,6 +90,7 @@ class TestSelfAssessment(XBlockHandlerTestCase): ...@@ -87,6 +90,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
# Submit a self assessment for a rubric with a feedback-only criterion # Submit a self assessment for a rubric with a feedback-only criterion
assessment_dict = { assessment_dict = {
'options_selected': {u'vocabulary': u'good'}, 'options_selected': {u'vocabulary': u'good'},
'criterion_feedback': {u'vocabulary': 'Awesome job!'},
'overall_feedback': u'' 'overall_feedback': u''
} }
resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json') resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
...@@ -99,10 +103,9 @@ class TestSelfAssessment(XBlockHandlerTestCase): ...@@ -99,10 +103,9 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertEqual(assessment['parts'][0]['option']['points'], 1) self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback # Check the feedback-only criterion score/feedback
# The written feedback should default to an empty string
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞') self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None) self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'') self.assertEqual(assessment['parts'][1]['feedback'], u'Awesome job!')
@scenario('data/self_assessment_scenario.xml', user_id='Bob') @scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock): def test_self_assess_workflow_error(self, xblock):
...@@ -267,7 +270,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase): ...@@ -267,7 +270,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
submission['uuid'], submission['uuid'],
xblock.get_student_item_dict()['student_id'], xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'}, {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria} {}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
) )
self._assert_path_and_context( self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {}, xblock, 'openassessmentblock/self/oa_self_complete.html', {},
...@@ -302,7 +306,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase): ...@@ -302,7 +306,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
submission['uuid'], submission['uuid'],
xblock.get_student_item_dict()['student_id'], xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'}, {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria} {}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
) )
# This case probably isn't possible, because presumably when we create # This case probably isn't possible, because presumably when we create
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment