Commit f7543120 by Eric Fischer Committed by Andy Armstrong

TNL-3714, adding backend support for staff scoring

Changes include:
    -modifies requirements to gt an updated version of edx-submissions
    -adds set_staff_score to the AssessmentWorkflow model, which will record
        a new annotated score using edx-submissions functionality
    -prevents recording non-staff scores if a staff score exists
    -modifies update_from_assessment to call set_staff_score as needed
        -this includes changes to both the workflow model and its api
    -modifies get_score to allow staff overrides as optional
    -modifies the assessment serializer to include id information
        -adds this information to get_score method in self, ai, peer, and staff
            apis, to expose c ontributing_assessments where needed
    -fixes a small bug regarding None vs {} in the peer api
    -adds staff-assessment to the xblock, and makes it always available
    -uses the new force_update_score parameter on the workflow api when
        recording a staff assessment
parent 53afc52b
......@@ -67,7 +67,8 @@ def get_score(submission_uuid, requirements):
requirements (dict): Not used.
Returns:
A dictionary with the points earned and points possible.
A dictionary with the points earned, points possible, and
contributing_assessments information, along with a None staff_id.
"""
assessment = get_latest_assessment(submission_uuid)
......@@ -76,7 +77,9 @@ def get_score(submission_uuid, requirements):
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
}
......
......@@ -45,7 +45,7 @@ def submitter_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
if not requirements:
return False
try:
......@@ -80,7 +80,7 @@ def assessment_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
if not requirements:
return False
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
......@@ -146,7 +146,8 @@ def get_score(submission_uuid, requirements):
must receive to get a score.
Returns:
dict with keys "points_earned" and "points_possible".
A dictionary with the points earned, points possible, and
contributing_assessments information, along with a None staff_id.
"""
if requirements is None:
......@@ -183,12 +184,15 @@ def get_score(submission_uuid, requirements):
for scored_item in items[:requirements["must_be_graded_by"]]:
scored_item.scored = True
scored_item.save()
assessments = [item.assessment for item in items]
return {
"points_earned": sum(
get_assessment_median_scores(submission_uuid).values()
),
"points_possible": items[0].assessment.points_possible,
"points_possible": assessments[0].points_possible,
"contributing_assessments": [assessment.id for assessment in assessments],
"staff_id": None,
}
......
......@@ -70,8 +70,8 @@ def get_score(submission_uuid, requirements):
submission_uuid (str): The unique identifier for the submission
requirements (dict): Not used.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
A dictionary with the points earned, points possible, and
contributing_assessments information, along with a None staff_id.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
......@@ -85,7 +85,9 @@ def get_score(submission_uuid, requirements):
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
}
......@@ -284,12 +286,15 @@ def get_assessment_scores_by_criteria(submission_uuid):
information to form the median scores, an error is raised.
"""
try:
# This will always create a list of length 1
assessments = list(
Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = (
......
......@@ -17,11 +17,9 @@ from openassessment.assessment.serializers import (
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
from submissions import api as sub_api
logger = logging.getLogger("openassessment.assessment.api.staff")
STAFF_TYPE = "ST"
......@@ -43,20 +41,18 @@ def submitter_is_finished(submission_uuid, requirements):
def assessment_is_finished(submission_uuid, requirements):
"""
Determine if the assessment of the given submission is completed. This
checks to see if staff have completed the assessment.
Determine if the staff assessment step of the given submission is completed.
This checks to see if staff have completed the assessment.
Args:
submission_uuid (str): The UUID of the submission being graded.
requirements (dict): Any variables that may effect this state.
Returns:
True if the assessment has been completed for this submission.
True if a staff assessment has been completed for this submission or if not required.
"""
required = requirements.get('staff', {}).get('required', False)
if required:
return bool(get_latest_assessment(submission_uuid))
if requirements and requirements.get('staff', {}).get('required', False):
return bool(get_latest_staff_assessment(submission_uuid))
return True
......@@ -71,20 +67,23 @@ def get_score(submission_uuid, requirements):
requirements (dict): Not used.
Returns:
A dictionary with the points earned and points possible.
A dictionary with the points earned, points possible,
contributing_assessments, and staff_id information.
"""
assessment = get_latest_assessment(submission_uuid)
assessment = get_latest_staff_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": assessment["scorer_id"],
}
def get_latest_assessment(submission_uuid):
def get_latest_staff_assessment(submission_uuid):
"""
Retrieve the latest staff assessment for a submission.
......@@ -96,11 +95,11 @@ def get_latest_assessment(submission_uuid):
or None if no assessments are available
Raises:
StaffAssessmentInternalError
StaffAssessmentInternalError if there are problems connecting to the database.
Example usage:
>>> get_latest_assessment('10df7db776686822e501b05f452dc1e4b9141fe5')
>>> get_latest_staff_assessment('10df7db776686822e501b05f452dc1e4b9141fe5')
{
'points_earned': 6,
'points_possible': 12,
......@@ -130,7 +129,7 @@ def get_latest_assessment(submission_uuid):
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the score for each rubric criterion
"""Get the staff score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
......@@ -145,12 +144,15 @@ def get_assessment_scores_by_criteria(submission_uuid):
information from the scores, an error is raised.
"""
try:
# This will always create a list of length 1
assessments = list(
Assessment.objects.filter(
score_type=STAFF_TYPE, submission_uuid=submission_uuid
)[:1]
)
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = u"Error getting staff assessment scores for {}".format(submission_uuid)
......@@ -175,6 +177,8 @@ def create_assessment(
Assumes that the user creating the assessment has the permissions to do so.
Args:
submission_uuid (str): The submission uuid for the submission being
assessed.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
options_selected (dict): Dictionary mapping criterion names to the
......@@ -184,6 +188,10 @@ def create_assessment(
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): The rubric model associated with this assessment
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
now.
Keyword Args:
scored_at (datetime): Optional argument to override the time in which
......@@ -219,13 +227,13 @@ def create_assessment(
return full_assessment_dict(assessment)
except InvalidRubric:
msg = u"Rubric definition was not valid"
logger.exception(msg)
raise StaffAssessmentRequestError(msg)
error_message = u"Rubric definition was not valid"
logger.exception(error_message)
raise StaffAssessmentRequestError(error_message)
except InvalidRubricSelection:
msg = u"Invalid options selected in the rubric"
logger.warning(msg, exc_info=True)
raise StaffAssessmentRequestError(msg)
error_message = u"Invalid options selected in the rubric"
logger.warning(error_message, exc_info=True)
raise StaffAssessmentRequestError(error_message)
except DatabaseError:
error_message = (
u"An error occurred while creating assessment by scorer with ID: {}"
......@@ -249,11 +257,10 @@ def _complete_assessment(
in a single transaction.
Args:
rubric_dict (dict): The rubric model associated with this assessment
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
submission_uuid (str): The submission uuid for the submission being
assessed.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
options_selected (dict): Dictionary mapping criterion names to the
option names the user selected for that criterion.
criterion_feedback (dict): Dictionary mapping criterion names to the
......@@ -261,6 +268,7 @@ def _complete_assessment(
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): The rubric model associated with this assessment
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
now.
......
""" Create generic errors that can be shared across different assessment types. """
class AssessmentError(Exception):
""" A generic error for errors that occur during assessment. """
pass
"""
Errors for the peer assessment.
"""
from .base import AssessmentError
class PeerAssessmentError(Exception):
class PeerAssessmentError(AssessmentError):
"""Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the
......
"""
Errors for self-assessment
"""
from .base import AssessmentError
class SelfAssessmentError(Exception):
class SelfAssessmentError(AssessmentError):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
......
......@@ -2,8 +2,10 @@
Errors for the staff assessment api.
"""
from .base import AssessmentError
class StaffAssessmentError(Exception):
class StaffAssessmentError(AssessmentError):
"""Generic Staff Assessment Error
Raised when an error occurs while processing a request related to
......
......@@ -232,6 +232,7 @@ def full_assessment_dict(assessment, rubric_dict=None):
for part_dict in parts
)
assessment_dict["points_possible"] = rubric_dict["points_possible"]
assessment_dict["id"] = assessment.id
cache.set(assessment_cache_key, assessment_dict)
......
......@@ -51,6 +51,34 @@ RUBRIC = {
]
}
RUBRIC_POSSIBLE_POINTS = sum(
max(
option["points"] for option in criterion["options"]
) for criterion in RUBRIC["criteria"]
)
# Used to generate OPTIONS_SELECTED_DICT. Indices refer to RUBRIC_OPTIONS.
OPTIONS_SELECTED_CHOICES = {
"none": [0, 0],
"few": [0, 1],
"most": [1, 2],
"all": [2, 2],
}
OPTIONS_SELECTED_DICT = {
# This dict is constructed from OPTIONS_SELECTED_CHOICES.
# 'key' is expected to be a string, such as 'none', 'all', etc.
# 'value' is a list, indicating the indices of the RUBRIC_OPTIONS selections that pertain to that key
key: {
"options": {
RUBRIC["criteria"][i]["name"]: RUBRIC_OPTIONS[j]["name"] for i, j in enumerate(value)
},
"expected_points": sum(
RUBRIC_OPTIONS[i]["points"] for i in value
)
} for key, value in OPTIONS_SELECTED_CHOICES.iteritems()
}
EXAMPLES = [
{
'answer': (
......
......@@ -183,7 +183,8 @@ def get_workflow_for_submission(submission_uuid, assessment_requirements):
def update_from_assessments(submission_uuid, assessment_requirements):
"""Update our workflow status based on the status of peer and self assessments.
"""
Update our workflow status based on the status of the underlying assessments.
We pass in the `assessment_requirements` each time we make the request
because the canonical requirements are stored in the `OpenAssessmentBlock`
......
"""
Data Conversion utility methods for handling ORA2 XBlock data transformations.
Data Conversion utility methods for handling ORA2 XBlock data transformations and validation.
"""
import json
......@@ -218,3 +218,30 @@ def make_django_template_key(key):
basestring
"""
return key.replace('-', '_')
def verify_assessment_parameters(func):
"""
Verify that the wrapped function receives the given parameters.
Used for the staff_assess, self_assess, peer_assess functions and uses their data types.
Args:
func - the function to be modified
Returns:
the modified function
"""
def verify_and_call(instance, data, suffix):
# Validate the request
if 'options_selected' not in data:
return {'success': False, 'msg': instance._('Must provide options selected in the assessment')}
if 'overall_feedback' not in data:
return {'success': False, 'msg': instance._('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': instance._('Must provide feedback for criteria in the assessment')}
return func(instance, data, suffix)
return verify_and_call
......@@ -138,10 +138,18 @@ DEFAULT_SELF_ASSESSMENT = {
"due": DEFAULT_DUE,
}
DEFAULT_STAFF_ASSESSMENT = {
"name": "staff-assessment",
"start": DEFAULT_START,
"due": DEFAULT_DUE,
"required": False,
}
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_STUDENT_TRAINING,
DEFAULT_PEER_ASSESSMENT,
DEFAULT_SELF_ASSESSMENT,
DEFAULT_STAFF_ASSESSMENT,
]
DEFAULT_EDITOR_ASSESSMENTS_ORDER = [
......
......@@ -67,7 +67,7 @@ UI_MODELS = {
"navigation_text": "Your assessment of your response",
"title": "Assess Your Response"
},
"self-assessment": {
"staff-assessment": {
"name": "staff-assessment",
"class_id": "openassessment__staff-assessment",
"navigation_text": "Staff assessment of your response",
......@@ -92,6 +92,7 @@ VALID_ASSESSMENT_TYPES = [
"example-based-assessment",
"peer-assessment",
"self-assessment",
"staff-assessment"
]
......@@ -456,9 +457,10 @@ class OpenAssessmentBlock(
ui_models = [UI_MODELS["submission"]]
for assessment in self.valid_assessments:
if assessment["name"] == "staff-assessment" and assessment["required"] == False:
# Check if staff have graded the assessment
# else
continue
# If we don't have a staff grade, and it's not required, hide
# this UI model.
if not self.staff_assessment_exists(self.submission_uuid):
continue
ui_model = UI_MODELS.get(assessment["name"])
if ui_model:
ui_models.append(dict(assessment, **ui_model))
......
......@@ -12,7 +12,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT
from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import clean_criterion_feedback, create_submission_dict
from .data_conversion import clean_criterion_feedback, create_submission_dict, verify_assessment_parameters
logger = logging.getLogger(__name__)
......@@ -31,6 +31,7 @@ class PeerAssessmentMixin(object):
"""
@XBlock.json_handler
@verify_assessment_parameters
def peer_assess(self, data, suffix=''):
"""Place a peer assessment into OpenAssessment system
......@@ -50,16 +51,6 @@ class PeerAssessmentMixin(object):
and "msg" (unicode) containing additional information if an error occurs.
"""
# Validate the request
if 'options_selected' not in data:
return {'success': False, 'msg': self._('Must provide options selected in the assessment')}
if 'overall_feedback' not in data:
return {'success': False, 'msg': self._('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': self._('Must provide feedback for criteria in the assessment')}
if self.submission_uuid is None:
return {'success': False, 'msg': self._('You must submit a response before you can peer-assess.')}
......
......@@ -6,9 +6,9 @@ from webob import Response
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as submission_api
from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import clean_criterion_feedback, create_submission_dict
from .data_conversion import (clean_criterion_feedback, create_submission_dict,
create_rubric_dict, verify_assessment_parameters)
logger = logging.getLogger(__name__)
......@@ -102,6 +102,7 @@ class SelfAssessmentMixin(object):
return path, context
@XBlock.json_handler
@verify_assessment_parameters
def self_assess(self, data, suffix=''):
"""
Create a self-assessment for a submission.
......@@ -114,14 +115,6 @@ class SelfAssessmentMixin(object):
Dict with keys "success" (bool) indicating success/failure
and "msg" (unicode) containing additional information if an error occurs.
"""
if 'options_selected' not in data:
return {'success': False, 'msg': self._(u"Missing options_selected key in request")}
if 'overall_feedback' not in data:
return {'success': False, 'msg': self._('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': self._('Must provide feedback for criteria in the assessment')}
if self.submission_uuid is None:
return {'success': False, 'msg': self._(u"You must submit a response before you can perform a self-assessment.")}
......
......@@ -7,13 +7,13 @@ from staff_area_mixin import require_course_staff
from xblock.core import XBlock
from openassessment.assessment.api import staff as staff_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import clean_criterion_feedback, create_submission_dict
from .data_conversion import clean_criterion_feedback, create_submission_dict, verify_assessment_parameters
logger = logging.getLogger(__name__)
......@@ -23,21 +23,20 @@ class StaffAssessmentMixin(object):
This mixin is for all staff-assessment related endpoints.
"""
def staff_assessment_exists(self, submission_uuid):
"""
Returns True if there exists a staff assessment for the given uuid. False otherwise.
"""
return staff_api.get_latest_staff_assessment(submission_uuid) is not None
@XBlock.json_handler
@require_course_staff("STUDENT_INFO")
@verify_assessment_parameters
def staff_assess(self, data, suffix=''):
"""
Create a staff assessment from a staff submission.
"""
if 'options_selected' not in data:
return {'success': False, 'msg': self._(u"Missing options_selected key in request")}
if 'overall_feedback' not in data:
return {'success': False, 'msg': self._('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': self._('Must provide feedback for criteria in the assessment')}
if 'submission_uuid' not in data:
return {'success': False, 'msg': self._(u"Missing the submission id of the submission being assessed.")}
......@@ -51,11 +50,12 @@ class StaffAssessmentMixin(object):
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
)
self.publish_assessment_event("openassessmentblock.staff_assessment", assessment)
workflow_api.update_from_assessments(assessment["submission_uuid"], {})
except StaffAssessmentRequestError:
logger.warning(
u"An error occurred while submitting a staff assessment "
u"for the submission {}".format(self.submission_uuid),
u"for the submission {}".format(data['submission_uuid']),
exc_info=True
)
msg = self._(u"Your staff assessment could not be submitted.")
......@@ -63,48 +63,9 @@ class StaffAssessmentMixin(object):
except StaffAssessmentInternalError:
logger.exception(
u"An error occurred while submitting a staff assessment "
u"for the submission {}".format(self.submission_uuid),
u"for the submission {}".format(data['submission_uuid']),
)
msg = self._(u"Your staff assessment could not be submitted.")
return {'success': False, 'msg': msg}
else:
return {'success': True, 'msg': u""}
@XBlock.handler
@require_course_staff("STUDENT_INFO")
def render_staff_assessment(self, data, suffix=''):
"""
Render the staff assessment for the given student.
"""
try:
submission_uuid = data.get("submission_uuid")
path, context = self.self_path_and_context(submission_uuid)
except:
msg = u"Could not retrieve staff assessment for submission {}".format(self.submission_uuid)
logger.exception(msg)
return self.render_error(self._(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
def staff_path_and_context(self, submission_uuid):
"""
Retrieve the correct template path and template context for the handler to render.
Args:
submission_uuid (str) -
"""
#TODO: add in the workflow for staff grading instead of assuming it's allowed.
submission = submission_api.get_submission(self.submission_uuid)
context = {'allow_latex': self.allow_latex}
context["rubric_criteria"] = self.rubric_criteria_with_labels
context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = create_submission_dict(submission, self.prompts)
# Determine if file upload is supported for this XBlock.
context["allow_file_upload"] = self.allow_file_upload
context['self_file_url'] = self.get_download_url_from_submission(submission)
#TODO: Replace with the staff assessment template when it's been built.
path = 'openassessmentblock/self/oa_self_assessment.html'
return path, context
......@@ -134,7 +134,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
del assessment['options_selected']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('options_selected', resp['msg'])
self.assertIn('options', resp['msg'])
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_api_error(self, xblock):
......
......@@ -42,7 +42,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertTrue(resp['success'])
# Expect that a staff-assessment was created
assessment = staff_api.get_latest_assessment(submission['uuid'])
assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
......@@ -67,7 +67,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertEqual(assessment['points_possible'], score['points_possible'])
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_assess_permission_error(self, xblock):
def test_permission_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
......@@ -75,7 +75,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertIn("You do not have permission", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_assess_invalid_options(self, xblock):
def test_invalid_options(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
......@@ -92,7 +92,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertIn('msg', resp)
@scenario('data/self_assessment_scenario.xml', user_id='bob')
def test_staff_assess_assessment_error(self, xblock):
def test_assessment_error(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
......@@ -112,15 +112,3 @@ class TestStaffAssessment(StaffAssessmentTestBase):
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('msg', resp)
class TestStaffAssessmentRender(StaffAssessmentTestBase):
#TODO: test success when staff assessment template exists
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_staff_assessment_permission_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
resp = self.request(xblock, 'render_staff_assessment', json.dumps(self.ASSESSMENT))
self.assertIn("You do not have permission", resp)
......@@ -19,6 +19,7 @@ class WorkflowMixin(object):
"self-assessment": "self",
"peer-assessment": "peer",
"student-training": "training",
"staff-assessment": "staff"
}
@XBlock.json_handler
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment