Commit f3871af3 by Eric Fischer

Override submitter requirements on staff override assessment

TNL-4696

When a staff override assessment is recorded, we now treat all the submitter's
requirements as being fulfilled. This allows ORA tooling to match the progress
page reported grade for the student by placing the AssessmentWorkflow into a
DONE state.

Acceptance tests have been updated, and are now also included in quality checks
parent 76acce95
......@@ -436,6 +436,8 @@ def get_assessment_median_scores(submission_uuid):
assessments = [item.assessment for item in items]
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except PeerWorkflow.DoesNotExist:
return {}
except DatabaseError:
error_message = (
u"Error getting assessment median scores for submission {uuid}"
......
......@@ -177,7 +177,7 @@ def get_workflow_for_submission(submission_uuid, assessment_requirements):
return update_from_assessments(submission_uuid, assessment_requirements)
def update_from_assessments(submission_uuid, assessment_requirements):
def update_from_assessments(submission_uuid, assessment_requirements, override_submitter_requirements=False):
"""
Update our workflow status based on the status of the underlying assessments.
......@@ -259,7 +259,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
workflow = _get_workflow_model(submission_uuid)
try:
workflow.update_from_assessments(assessment_requirements)
workflow.update_from_assessments(assessment_requirements, override_submitter_requirements)
logger.info((
u"Updated workflow for submission UUID {uuid} "
u"with requirements {reqs}"
......
......@@ -171,7 +171,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# If we auto-added a staff step, it is optional and should be marked complete immediately
if step.name == "staff" and staff_auto_added:
step.assessment_completed_at=now()
step.assessment_completed_at = now()
step.save()
# For the first valid step, update the workflow status
......@@ -209,6 +209,12 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return score
def status_details(self):
"""
Returns workflow status in the form of a dictionary. Each step in the
workflow is a key, and each key maps to a dictionary defining whether
the step is complete (submitter requirements fulfilled) and graded (the
submission has been assessed).
"""
status_dict = {}
steps = self._get_steps()
for step in steps:
......@@ -259,7 +265,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return score
def update_from_assessments(self, assessment_requirements):
def update_from_assessments(self, assessment_requirements, override_submitter_requirements=False):
"""Query assessment APIs and change our status if appropriate.
If the status is done, we do nothing. Once something is done, we never
......@@ -320,7 +326,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# Update the assessment_completed_at field for all steps
# All steps are considered "assessment complete", as the staff score will override all
for step in steps:
step.assessment_completed_at=now()
step.assessment_completed_at = now()
if override_submitter_requirements:
step.submitter_completed_at = now()
step.save()
if self.status == self.STATUS.done:
......@@ -346,8 +354,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it).
if (new_status == self.STATUS.waiting and
all(step.assessment_completed_at for step in steps)):
if (
new_status == self.STATUS.waiting and
all(step.assessment_completed_at for step in steps)
):
score = self.get_score(assessment_requirements, step_for_name)
# If we found a score, then we're done
......@@ -398,7 +408,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return steps
def set_staff_score(self, score, is_override=False, reason=None):
def set_staff_score(self, score, reason=None):
"""
Set a staff score for the workflow.
......@@ -425,9 +435,9 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self.submission_uuid,
score["points_earned"],
score["points_possible"],
annotation_creator = score["staff_id"],
annotation_type = annotation_type,
annotation_reason = reason
annotation_creator=score["staff_id"],
annotation_type=annotation_type,
annotation_reason=reason
)
def set_score(self, score):
......@@ -602,9 +612,16 @@ class AssessmentWorkflowStep(models.Model):
ordering = ["workflow", "order_num"]
def is_submitter_complete(self):
"""
Used to determine whether the submitter of the response has completed
their required actions.
"""
return self.submitter_completed_at is not None
def is_assessment_complete(self):
"""
Used to determine whether the response has been assessed at this step.
"""
return self.assessment_completed_at is not None
def api(self):
......@@ -660,12 +677,12 @@ class AssessmentWorkflowStep(models.Model):
assessment_finished = getattr(self.api(), 'assessment_is_finished', default_finished)
# Has the user completed their obligations for this step?
if (not self.is_submitter_complete() and submitter_finished(submission_uuid, step_reqs)):
if not self.is_submitter_complete() and submitter_finished(submission_uuid, step_reqs):
self.submitter_completed_at = now()
step_changed = True
# Has the step received a score?
if (not self.is_assessment_complete() and assessment_finished(submission_uuid, step_reqs)):
if not self.is_assessment_complete() and assessment_finished(submission_uuid, step_reqs):
self.assessment_completed_at = now()
step_changed = True
......
......@@ -567,15 +567,16 @@ class GradeMixin(object):
# If criteria/options in the problem definition do NOT have a "label" field
# (because they were created before this change),
# we create a new label that has the same value as "name".
for part in assessment['parts']:
criterion_label_key = part['criterion']['name']
part['criterion']['label'] = criterion_labels.get(criterion_label_key, part['criterion']['name'])
# We need to be a little bit careful here: some assessment parts
# have only written feedback, so they're not associated with any options.
# If that's the case, we don't need to add the label field.
if part.get('option') is not None:
option_label_key = (part['criterion']['name'], part['option']['name'])
part['option']['label'] = option_labels.get(option_label_key, part['option']['name'])
if assessment is not None:
for part in assessment['parts']:
criterion_label_key = part['criterion']['name']
part['criterion']['label'] = criterion_labels.get(criterion_label_key, part['criterion']['name'])
# We need to be a little bit careful here: some assessment parts
# have only written feedback, so they're not associated with any options.
# If that's the case, we don't need to add the label field.
if part.get('option') is not None:
option_label_key = (part['criterion']['name'], part['option']['name'])
part['option']['label'] = option_labels.get(option_label_key, part['option']['name'])
return assessment
......@@ -53,7 +53,11 @@ class StaffAssessmentMixin(object):
)
assess_type = data.get('assess_type', 'regrade')
self.publish_assessment_event("openassessmentblock.staff_assess", assessment, type=assess_type)
workflow_api.update_from_assessments(assessment["submission_uuid"], None)
workflow_api.update_from_assessments(
assessment["submission_uuid"],
None,
override_submitter_requirements=(assess_type == 'regrade')
)
except StaffAssessmentRequestError:
logger.warning(
......
#!/usr/bin/env bash
MAX_PEP8_VIOLATIONS=110
MAX_PEP8_VIOLATIONS=111
mkdir -p test/logs
PEP8_VIOLATIONS=test/logs/pep8.txt
touch $PEP8_VIOLATIONS
pep8 --config=.pep8 openassessment > $PEP8_VIOLATIONS
pep8 --config=.pep8 openassessment test > $PEP8_VIOLATIONS
NUM_PEP8_VIOLATIONS=$(cat $PEP8_VIOLATIONS | wc -l)
echo "Found" $NUM_PEP8_VIOLATIONS "pep8 violations, threshold is" $MAX_PEP8_VIOLATIONS
......
#!/usr/bin/env bash
MAX_PYLINT_VIOLATIONS=519
MAX_PYLINT_VIOLATIONS=504
mkdir -p test/logs
PYLINT_VIOLATIONS=test/logs/pylint.txt
touch $PYLINT_VIOLATIONS
pylint --rcfile=pylintrc openassessment --msg-template='"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"'> $PYLINT_VIOLATIONS
pylint --rcfile=pylintrc openassessment test --msg-template='"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"'> $PYLINT_VIOLATIONS
./scripts/run-pylint.py $PYLINT_VIOLATIONS $MAX_PYLINT_VIOLATIONS
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment