Commit 98332648 by Usman Khalid Committed by Andy Armstrong

Created initial version of staff assessment step api.

TNL-1162
parent 154c9b80
......@@ -67,7 +67,8 @@ def get_score(submission_uuid, requirements):
requirements (dict): Not used.
Returns:
A dictionary with the points earned and points possible.
A dictionary with the points earned, points possible, and
contributing_assessments information.
"""
assessment = get_latest_assessment(submission_uuid)
......@@ -76,7 +77,9 @@ def get_score(submission_uuid, requirements):
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
}
......
......@@ -45,7 +45,7 @@ def submitter_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
if not requirements:
return False
try:
......@@ -80,7 +80,7 @@ def assessment_is_finished(submission_uuid, requirements):
bool
"""
if requirements is None:
if not requirements:
return False
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
......@@ -146,7 +146,8 @@ def get_score(submission_uuid, requirements):
must receive to get a score.
Returns:
dict with keys "points_earned" and "points_possible".
A dictionary with the points earned, points possible, and
contributing_assessments information.
"""
if requirements is None:
......@@ -183,12 +184,15 @@ def get_score(submission_uuid, requirements):
for scored_item in items[:requirements["must_be_graded_by"]]:
scored_item.scored = True
scored_item.save()
assessments = [item.assessment for item in items]
return {
"points_earned": sum(
get_assessment_median_scores(submission_uuid).values()
),
"points_possible": items[0].assessment.points_possible,
"points_possible": assessments[0].points_possible,
"contributing_assessments": [assessment.id for assessment in assessments],
"staff_id": None,
}
......
......@@ -70,8 +70,8 @@ def get_score(submission_uuid, requirements):
submission_uuid (str): The unique identifier for the submission
requirements (dict): Not used.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
A dictionary with the points earned, points possible, and
contributing_assessments information.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
......@@ -85,7 +85,9 @@ def get_score(submission_uuid, requirements):
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
}
......
"""
Public interface for staff grading, used by students/course staff.
"""
import logging
from django.db import DatabaseError, IntegrityError, transaction
from dogapi import dog_stats_api
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
InvalidRubricSelection
)
from openassessment.assessment.serializers import (
AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
)
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
logger = logging.getLogger("openassessment.assessment.api.staff")
STAFF_TYPE = "ST"
def submitter_is_finished(submission_uuid, requirements):
"""
Determine if the submitter has finished their requirements for staff
assessment. Always returns True.
Args:
submission_uuid (str): Not used.
requirements (dict): Not used.
Returns:
True
"""
return True
def assessment_is_finished(submission_uuid, requirements):
"""
Determine if the assessment of the given submission is completed. This
checks to see if staff have completed the assessment.
Args:
submission_uuid (str): The UUID of the submission being graded.
requirements (dict): Any variables that may effect this state.
Returns:
True if the assessment has been completed for this submission.
"""
if requirements and requirements.get('staff', {}).get('required', False):
return bool(get_latest_staff_assessment(submission_uuid))
return True
def get_score(submission_uuid, requirements):
"""
Generate a score based on a completed assessment for the given submission.
If no assessment has been completed for this submission, this will return
None.
Args:
submission_uuid (str): The UUID for the submission to get a score for.
requirements (dict): Not used.
Returns:
A dictionary with the points earned, points possible,
contributing_assessments, and staff_id information.
"""
assessment = get_latest_staff_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": assessment["scorer_id"],
}
def get_latest_staff_assessment(submission_uuid):
"""
Retrieve the latest staff assessment for a submission.
Args:
submission_uuid (str): The UUID of the submission being assessed.
Returns:
dict: The serialized assessment model
or None if no assessments are available
Raises:
StaffAssessmentInternalError
Example usage:
>>> get_latest_staff_assessment('10df7db776686822e501b05f452dc1e4b9141fe5')
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer': u"staff",
'feedback': u''
}
"""
try:
assessments = Assessment.objects.filter(
submission_uuid=submission_uuid,
score_type=STAFF_TYPE,
)[:1]
except DatabaseError as ex:
msg = (
u"An error occurred while retrieving staff assessments "
u"for the submission with UUID {uuid}: {ex}"
).format(uuid=submission_uuid, ex=ex)
logger.exception(msg)
raise StaffAssessmentInternalError(msg)
if len(assessments) > 0:
return full_assessment_dict(assessments[0])
else:
return None
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the staff score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessment used to score this submission.
Returns:
(dict): A dictionary of rubric criterion names, with a score of
the staff assessments.
Raises:
StaffAssessmentInternalError: If any error occurs while retrieving
information from the scores, an error is raised.
"""
try:
assessments = list(
Assessment.objects.filter(
score_type=STAFF_TYPE, submission_uuid=submission_uuid
)[:1]
)
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = u"Error getting staff assessment scores for {}".format(submission_uuid)
logger.exception(error_message)
raise StaffAssessmentInternalError(error_message)
def create_assessment(
submission_uuid,
scorer_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at=None
):
"""Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular
rubric.
Assumes that the user creating the assessment has the permissions to do so.
Args:
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
options_selected (dict): Dictionary mapping criterion names to the
option names the user selected for that criterion.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
Keyword Args:
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
now.
Returns:
dict: the Assessment model, serialized as a dict.
Raises:
StaffAssessmentRequestError: Raised when the submission_id is invalid, or
the assessment_dict does not contain the required values to create
an assessment.
StaffAssessmentInternalError: Raised when there is an internal error
while creating a new assessment.
Examples:
>>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
>>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
>>> feedback = "Your submission was thrilling."
>>> create_assessment("Tim", options_selected, criterion_feedback, feedback, rubric_dict)
"""
try:
assessment = _complete_assessment(
submission_uuid,
scorer_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at
)
return full_assessment_dict(assessment)
except InvalidRubric:
error_message = u"Rubric definition was not valid"
logger.exception(error_message)
raise StaffAssessmentRequestError(error_message)
except InvalidRubricSelection:
error_message = u"Invalid options selected in the rubric"
logger.warning(error_message, exc_info=True)
raise StaffAssessmentRequestError(error_message)
except DatabaseError:
error_message = (
u"An error occurred while creating assessment by scorer with ID: {}"
).format(scorer_id)
logger.exception(error_message)
raise StaffAssessmentInternalError(error_message)
@transaction.commit_on_success
def _complete_assessment(
submission_uuid,
scorer_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at
):
"""
Internal function for atomic assessment creation. Creates a staff assessment
in a single transaction.
Args:
submission_uuid (str): The submission uuid for the submission being
assessed.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
options_selected (dict): Dictionary mapping criterion names to the
option names the user selected for that criterion.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): The rubric model associated with this assessment
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
now.
Returns:
The Assessment model
"""
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
# Create the staff assessment
assessment = Assessment.create(
rubric,
scorer_id,
submission_uuid,
STAFF_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# Create assessment parts for each criterion in the rubric
# This will raise an `InvalidRubricSelection` if the selected options do not
# match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
return assessment
......@@ -6,5 +6,6 @@ Export errors from all modules defined in this package.
from .peer import *
from .self import *
from .staff import *
from .student_training import *
from .ai import *
"""
Errors for the staff assessment api.
"""
class StaffAssessmentError(Exception):
"""Generic Staff Assessment Error
Raised when an error occurs while processing a request related to
staff assessment.
"""
pass
class StaffAssessmentRequestError(StaffAssessmentError):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
information which does not allow the request to be processed.
"""
pass
class StaffAssessmentInternalError(StaffAssessmentError):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
the actions or parameters given to the API.
"""
pass
......@@ -232,6 +232,7 @@ def full_assessment_dict(assessment, rubric_dict=None):
for part_dict in parts
)
assessment_dict["points_possible"] = rubric_dict["points_possible"]
assessment_dict["id"] = assessment.id
cache.set(assessment_cache_key, assessment_dict)
......
......@@ -177,8 +177,10 @@ def get_workflow_for_submission(submission_uuid, assessment_requirements):
return update_from_assessments(submission_uuid, assessment_requirements)
def update_from_assessments(submission_uuid, assessment_requirements):
"""Update our workflow status based on the status of peer and self assessments.
def update_from_assessments(submission_uuid, assessment_requirements, force_update_score=False):
"""
Update our workflow status based on the status of peer and self assessments,
or staff assessments if using the force_update_score parameter.
We pass in the `assessment_requirements` each time we make the request
because the canonical requirements are stored in the `OpenAssessmentBlock`
......@@ -203,6 +205,9 @@ def update_from_assessments(submission_uuid, assessment_requirements):
`must_be_graded_by` to ensure that everyone will get scored.
The intention is to eventually pass in more assessment sequence
specific requirements in this dict.
force_update_score (bool): If true, will find the latest staff assessment,
and use that score as the submission's final grade, overriding
any other scores that may be present.
Returns:
dict: Assessment workflow information with the following
......@@ -258,7 +263,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
workflow = _get_workflow_model(submission_uuid)
try:
workflow.update_from_assessments(assessment_requirements)
workflow.update_from_assessments(assessment_requirements, force_update_score=force_update_score)
logger.info((
u"Updated workflow for submission UUID {uuid} "
u"with requirements {reqs}"
......
......@@ -34,6 +34,7 @@ logger = logging.getLogger('openassessment.workflow.models')
DEFAULT_ASSESSMENT_API_DICT = {
'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self',
'staff': 'openassessment.assessment.api.staff',
'training': 'openassessment.assessment.api.student_training',
'ai': 'openassessment.assessment.api.ai',
}
......@@ -49,12 +50,14 @@ ASSESSMENT_API_DICT = getattr(
# We then use that score as the student's overall score.
# This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`)
# in descending priority order.
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self', 'ai']
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['staff', 'peer', 'self', 'ai']
ASSESSMENT_SCORE_PRIORITY = getattr(
settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY',
DEFAULT_ASSESSMENT_SCORE_PRIORITY
)
STAFF_OVERRIDE_ANNOTATION_TYPE = "overriden_staff_score"
STAFF_REQUIRED_ANNOTATION_TYPE = "required_staff_score"
class AssessmentWorkflow(TimeStampedModel, StatusModel):
"""Tracks the open-ended assessment status of a student submission.
......@@ -239,11 +242,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
else:
requirements = assessment_requirements.get(assessment_step_name, {})
score = get_score_func(self.submission_uuid, requirements)
if assessment_step_name == self.STATUS.staff and score == None:
if requirements and requirements.get(assessment_step_name, {}).get('required', False):
break # A staff score was not found, and one is required. Return None
continue # A staff score was not found, but it is not required, so try the next type of score
break
return score
def update_from_assessments(self, assessment_requirements):
def update_from_assessments(self, assessment_requirements, force_update_score=False):
"""Query assessment APIs and change our status if appropriate.
If the status is done, we do nothing. Once something is done, we never
......@@ -277,9 +284,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
updates the problem definition.
"""
# If the status is done or cancelled, we're done -- it doesn't matter if requirements have
# changed because we've already written a score.
if self.status in (self.STATUS.done, self.STATUS.cancelled):
if self.status == self.STATUS.cancelled:
return
# Update our AssessmentWorkflowStep models with the latest from our APIs
......@@ -287,6 +292,18 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
step_for_name = {step.name: step for step in steps}
if force_update_score:
new_score = self.get_score(assessment_requirements, {'staff': step_for_name.get('staff', None)})
self.set_staff_score(new_score, is_override=True)
self.save()
logger.info((
u"Workflow for submission UUID {uuid} has updated score."
).format(uuid=self.submission_uuid))
return
if self.status == self.STATUS.done:
return
# Go through each step and update its status.
for step in steps:
step.update(self.submission_uuid, assessment_requirements)
......@@ -313,7 +330,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
score = self.get_score(assessment_requirements, step_for_name)
# If we found a score, then we're done
if score is not None:
self.set_score(score)
if score.get("staff_id", None) is not None:
self.set_staff_score(score)
else:
self.set_score(score)
new_status = self.STATUS.done
# Finally save our changes if the status has changed
......@@ -335,12 +355,61 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# If no steps exist for this AssessmentWorkflow, assume
# peer -> self for backwards compatibility
self.steps.add(
AssessmentWorkflowStep(name=self.STATUS.peer, order_num=0),
AssessmentWorkflowStep(name=self.STATUS.self, order_num=1)
AssessmentWorkflowStep(name=self.STATUS.peer, order_num=1),
AssessmentWorkflowStep(name=self.STATUS.self, order_num=2)
)
steps = list(self.steps.all())
# A staff step will always be available, to allow for staff overrides
try:
self.steps.get(name=self.STATUS.staff)
except AssessmentWorkflowStep.DoesNotExist:
self.steps.add(
AssessmentWorkflowStep(
name=self.STATUS.staff,
order_num=0,
assessment_completed_at=now(),
)
)
return steps
def set_staff_score(self, score, is_override=False, reason=None):
"""
Set a staff score for the workflow.
Allows for staff scores to be set on a submission, with annotations to provide an audit trail if needed.
This method can be used for both required staff grading, and staff overrides.
Args:
score (dict): A dict containing 'points_earned', 'points_possible', and 'staff_id'.
is_override (bool): Optionally True if staff is overriding a previous score.
reason (string): An optional parameter specifying the reason for the staff grade. A default value
will be used in the event that this parameter is not provided.
"""
annotation_type = STAFF_REQUIRED_ANNOTATION_TYPE
if is_override:
annotation_type = STAFF_OVERRIDE_ANNOTATION_TYPE
if reason is None:
reason = "A staff member has overridden a previous score for this submission"
sub_dict = sub_api.get_submission_and_student(self.submission_uuid)
sub_api.reset_score(
sub_dict['student_item']['student_id'],
self.course_id,
self.item_id
)
else:
if reason is None:
reason = "A staff score was required for this submission"
sub_api.set_score(
self.submission_uuid,
score["points_earned"],
score["points_possible"],
annotation_creator = score["staff_id"],
annotation_type = annotation_type,
annotation_reason = reason
)
def set_score(self, score):
"""
Set a score for the workflow.
......@@ -353,11 +422,27 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
'points_possible'.
"""
sub_api.set_score(
self.submission_uuid,
score["points_earned"],
score["points_possible"]
)
if not self.staff_score_exists():
sub_api.set_score(
self.submission_uuid,
score["points_earned"],
score["points_possible"]
)
def staff_score_exists(self):
"""
Check if a staff score exists for this submission.
"""
steps = self._get_steps()
step_for_name = {step.name: step for step in steps}
staff_step = step_for_name.get("staff")
if staff_step is not None:
get_latest_func = getattr(staff_step.api(), 'get_latest_assessment', None)
if get_latest_func is not None:
staff_assessment = get_latest_func(self.submission_uuid)
if staff_assessment is not None:
return True
return False
def cancel(self, assessment_requirements):
"""
......@@ -557,6 +642,8 @@ class AssessmentWorkflowStep(models.Model):
step_changed = True
# Has the step received a score?
# If staff assessment is optional we will mark assessment as complete immediately.
# But if staff comes and assesses later, this date will not be updated.
if (not self.is_assessment_complete() and assessment_finished(submission_uuid, step_reqs)):
self.assessment_completed_at = now()
step_changed = True
......
......@@ -138,10 +138,18 @@ DEFAULT_SELF_ASSESSMENT = {
"due": DEFAULT_DUE,
}
DEFAULT_STAFF_ASSESSMENT = {
"name": "staff-assessment",
"start": DEFAULT_START,
"due": DEFAULT_DUE,
"required": False,
}
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_STUDENT_TRAINING,
DEFAULT_PEER_ASSESSMENT,
DEFAULT_SELF_ASSESSMENT,
DEFAULT_STAFF_ASSESSMENT,
]
DEFAULT_EDITOR_ASSESSMENTS_ORDER = [
......
......@@ -31,6 +31,7 @@ from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_area_mixin import StaffAreaMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.xblock.staff_assessment_mixin import StaffAssessmentMixin
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator
......@@ -66,6 +67,12 @@ UI_MODELS = {
"navigation_text": "Your assessment of your response",
"title": "Assess Your Response"
},
"staff-assessment": {
"name": "staff-assessment",
"class_id": "openassessment__staff-assessment",
"navigation_text": "Staff assessment of your response",
"title": "Staff Assessment"
},
"grade": {
"name": "grade",
"class_id": "openassessment__grade",
......@@ -85,6 +92,7 @@ VALID_ASSESSMENT_TYPES = [
"example-based-assessment",
"peer-assessment",
"self-assessment",
"staff-assessment"
]
......@@ -100,6 +108,7 @@ class OpenAssessmentBlock(
SubmissionMixin,
PeerAssessmentMixin,
SelfAssessmentMixin,
StaffAssessmentMixin,
StudioMixin,
GradeMixin,
LeaderboardMixin,
......@@ -447,6 +456,10 @@ class OpenAssessmentBlock(
"""
ui_models = [UI_MODELS["submission"]]
for assessment in self.valid_assessments:
if assessment["name"] == "staff-assessment" and assessment["required"] == False:
# Check if staff have graded the assessment
# else
continue
ui_model = UI_MODELS.get(assessment["name"])
if ui_model:
ui_models.append(dict(assessment, **ui_model))
......
"""
A mixin for staff grading.
"""
import logging
from staff_area_mixin import require_course_staff
from xblock.core import XBlock
from openassessment.assessment.api import staff as staff_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import clean_criterion_feedback, create_submission_dict
logger = logging.getLogger(__name__)
class StaffAssessmentMixin(object):
"""
This mixin is for all staff-assessment related endpoints.
"""
@XBlock.json_handler
@require_course_staff("STUDENT_INFO")
def staff_assess(self, data, suffix=''):
"""
Create a staff assessment from a staff submission.
"""
if 'options_selected' not in data:
return {'success': False, 'msg': self._(u"Missing options_selected key in request")}
if 'overall_feedback' not in data:
return {'success': False, 'msg': self._('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': self._('Must provide feedback for criteria in the assessment')}
if 'submission_uuid' not in data:
return {'success': False, 'msg': self._(u"Missing the submission id of the submission being assessed.")}
try:
assessment = staff_api.create_assessment(
data['submission_uuid'],
self.get_student_item_dict()["student_id"],
data['options_selected'],
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
)
self.publish_assessment_event("openassessmentblock.staff_assessment", assessment)
workflow_api.update_from_assessments(assessment["submission_uuid"], {}, force_update_score=True)
except StaffAssessmentRequestError:
logger.warning(
u"An error occurred while submitting a staff assessment "
u"for the submission {}".format(self.submission_uuid),
exc_info=True
)
msg = self._(u"Your staff assessment could not be submitted.")
return {'success': False, 'msg': msg}
except StaffAssessmentInternalError:
logger.exception(
u"An error occurred while submitting a staff assessment "
u"for the submission {}".format(self.submission_uuid),
)
msg = self._(u"Your staff assessment could not be submitted.")
return {'success': False, 'msg': msg}
else:
return {'success': True, 'msg': u""}
@XBlock.handler
@require_course_staff("STUDENT_INFO")
def render_staff_assessment(self, data, suffix=''):
"""
Render the staff assessment for the given student.
"""
try:
submission_uuid = data.get("submission_uuid")
path, context = self.self_path_and_context(submission_uuid)
except:
msg = u"Could not retrieve staff assessment for submission {}".format(self.submission_uuid)
logger.exception(msg)
return self.render_error(self._(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
def staff_path_and_context(self, submission_uuid):
"""
Retrieve the correct template path and template context for the handler to render.
Args:
submission_uuid (str) -
"""
#TODO: add in the workflow for staff grading instead of assuming it's allowed.
submission = submission_api.get_submission(self.submission_uuid)
context = {'allow_latex': self.allow_latex}
context["rubric_criteria"] = self.rubric_criteria_with_labels
context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = create_submission_dict(submission, self.prompts)
# Determine if file upload is supported for this XBlock.
context["allow_file_upload"] = self.allow_file_upload
context['self_file_url'] = self.get_download_url_from_submission(submission)
#TODO: Replace with the staff assessment template when it's been built.
path = 'openassessmentblock/self/oa_self_assessment.html'
return path, context
# -*- coding: utf-8 -*-
"""
Tests for staff assessment handlers in Open Assessment XBlock.
"""
import json
import mock
import copy
from openassessment.assessment.api import staff as staff_api
from openassessment.xblock.data_conversion import create_rubric_dict
from .base import XBlockHandlerTestCase, scenario
class StaffAssessmentTestBase(XBlockHandlerTestCase):
maxDiff = None
SUBMISSION = (u'ՇﻉรՇ', u'รપ๒๓ٱรรٱѻก')
ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
}
def set_staff_access(self, xblock):
xblock.xmodule_runtime = mock.Mock(user_is_staff=True)
xblock.xmodule_runtime.anonymous_student_id = 'Bob'
class TestStaffAssessment(StaffAssessmentTestBase):
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_assess_handler(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a staff-assessment
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertTrue(resp['success'])
# Expect that a staff-assessment was created
assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'ST')
self.assertEqual(assessment['feedback'], u'')
parts = sorted(assessment['parts'])
self.assertEqual(len(parts), 2)
self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
self.assertEqual(parts[0]['option']['name'], 'Fair')
self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
# get the assessment scores by criteria
assessment_by_crit = staff_api.get_assessment_scores_by_criteria(submission["uuid"])
self.assertEqual(assessment_by_crit[u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'], 3)
self.assertEqual(assessment_by_crit[u'Form'], 2)
score = staff_api.get_score(submission["uuid"], None)
self.assertEqual(assessment['points_earned'], score['points_earned'])
self.assertEqual(assessment['points_possible'], score['points_possible'])
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_permission_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT))
self.assertIn("You do not have permission", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_invalid_options(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = xblock.create_submission(student_item, self.SUBMISSION)
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
for key in self.ASSESSMENT:
assessment_copy = copy.copy(self.ASSESSMENT)
del assessment_copy[key]
resp = self.request(xblock, 'staff_assess', json.dumps(assessment_copy), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('msg', resp)
@scenario('data/self_assessment_scenario.xml', user_id='bob')
def test_assessment_error(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = xblock.create_submission(student_item, self.SUBMISSION)
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
with mock.patch('openassessment.xblock.staff_assessment_mixin.staff_api') as mock_api:
# Simulate a error
mock_api.create_assessment.side_effect = staff_api.StaffAssessmentRequestError
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('msg', resp)
# Simulate a different error
mock_api.create_assessment.side_effect = staff_api.StaffAssessmentInternalError
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('msg', resp)
class TestStaffAssessmentRender(StaffAssessmentTestBase):
# TODO: test success when staff assessment template exists
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_staff_assessment_permission_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
resp = self.request(xblock, 'render_staff_assessment', json.dumps(self.ASSESSMENT))
self.assertIn("You do not have permission", resp)
......@@ -21,6 +21,7 @@ class WorkflowMixin(object):
"self-assessment": "self",
"peer-assessment": "peer",
"student-training": "training",
"staff-assessment": "staff"
}
@XBlock.json_handler
......
......@@ -6,7 +6,7 @@
git+https://github.com/edx/XBlock.git@9c634481dfc85a17dcb3351ca232d7098a38e10e#egg=XBlock
# edx-submissions
git+https://github.com/edx/edx-submissions.git@14aeaa9e30f9a408b34ffaf6d78409dedaad015a#egg=edx-submissions==0.1.0
git+https://github.com/edx/edx-submissions.git@3d3ba2a4ba2d64f37dd1d908c8a1d78c1ac95523#egg=edx-submissions==0.1.1
# Third Party Requirements
boto>=2.32.1,<3.0.0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment