Commit 01540db4 by Eric Fischer Committed by GitHub

Merge pull request #951 from edx/efischer/dont_push_master

Actually check if staff grade is staff grade
parents 9701adb4 553c924e
# coding=utf-8
"""
Tests for staff assessments.
"""
import copy
import mock
from datetime import timedelta
......@@ -6,8 +9,7 @@ from datetime import timedelta
from django.db import DatabaseError
from django.test.utils import override_settings
from django.utils.timezone import now
from ddt import ddt, data, file_data, unpack
from nose.tools import raises
from ddt import ddt, data, unpack
from .constants import OPTIONS_SELECTED_DICT, RUBRIC, RUBRIC_OPTIONS, RUBRIC_POSSIBLE_POINTS, STUDENT_ITEM
from openassessment.assessment.test.test_ai import (
......@@ -20,7 +22,7 @@ from openassessment.test_utils import CacheResetTest
from openassessment.assessment.api import staff as staff_api, ai as ai_api, peer as peer_api
from openassessment.assessment.api.self import create_assessment as self_assess
from openassessment.assessment.api.peer import create_assessment as peer_assess
from openassessment.assessment.models import Assessment, PeerWorkflow, StaffWorkflow
from openassessment.assessment.models import Assessment, StaffWorkflow
from openassessment.assessment.errors import StaffAssessmentRequestError, StaffAssessmentInternalError
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
......@@ -41,21 +43,30 @@ class TestStaffAssessment(CacheResetTest):
@staticmethod
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def _ai_assess(sub):
"""
Helper to fulfill ai assessment requirements.
"""
# Note that CLASSIFIER_SCORE_OVERRIDES matches OPTIONS_SELECTED_DICT['most'] scores
train_classifiers(RUBRIC, AIGradingTest.CLASSIFIER_SCORE_OVERRIDES)
ai_api.on_init(sub, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
return ai_api.get_latest_assessment(sub)
@staticmethod
def _peer_assess(sub, scorer_id, scores):
def _peer_assess(scores):
"""
Helper to fulfill peer assessment requirements.
"""
bob_sub, bob = TestStaffAssessment._create_student_and_submission("Bob", "Bob's answer", problem_steps=['peer'])
peer_api.get_submission_to_assess(bob_sub["uuid"], 1)
return peer_assess(bob_sub["uuid"], bob["student_id"], scores, dict(), "", RUBRIC, 1)
ASSESSMENT_TYPES_DDT = [
('self', lambda sub, scorer_id, scores: self_assess(sub, scorer_id, scores, dict(), "", RUBRIC)),
('peer', lambda sub, scorer_id, scores: TestStaffAssessment._peer_assess(sub, scorer_id, scores)),
('staff', lambda sub, scorer_id, scores: staff_api.create_assessment(sub, scorer_id, scores, dict(), "", RUBRIC)),
('peer', lambda sub, scorer_id, scores: TestStaffAssessment._peer_assess(scores)),
(
'staff',
lambda sub, scorer_id, scores: staff_api.create_assessment(sub, scorer_id, scores, dict(), "", RUBRIC)
),
('ai', lambda sub, scorer_id, scores: TestStaffAssessment._ai_assess(sub))
]
......@@ -79,7 +90,7 @@ class TestStaffAssessment(CacheResetTest):
when staff scores are not required.
"""
# Create assessment
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
# Staff assess it
assessment = staff_api.create_assessment(
......@@ -103,7 +114,7 @@ class TestStaffAssessment(CacheResetTest):
when staff scores are required.
"""
# Create assessment
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=['staff'])
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=['staff'])
# Verify that we're still waiting on a staff assessment
self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS_WITH_STAFF, expect_done=False)
......@@ -260,7 +271,7 @@ class TestStaffAssessment(CacheResetTest):
a bug that had been previously present.
"""
# Tim(student) makes a submission, for a problem that requires peer assessment
tim_sub, tim = TestStaffAssessment._create_student_and_submission("Tim", "Tim's answer", problem_steps=['peer'])
tim_sub, _ = TestStaffAssessment._create_student_and_submission("Tim", "Tim's answer", problem_steps=['peer'])
# Bob(student) also makes a submission for that problem
bob_sub, bob = TestStaffAssessment._create_student_and_submission("Bob", "Bob's answer", problem_steps=['peer'])
......@@ -269,13 +280,13 @@ class TestStaffAssessment(CacheResetTest):
staff_score = "none"
# Dumbledore(staff) uses override ability to provide a score for both submissions
tim_assessment = staff_api.create_assessment(
staff_api.create_assessment(
tim_sub["uuid"],
"Dumbledore",
OPTIONS_SELECTED_DICT[staff_score]["options"], dict(), "",
RUBRIC,
)
bob_assessment = staff_api.create_assessment(
staff_api.create_assessment(
bob_sub["uuid"],
"Dumbledore",
OPTIONS_SELECTED_DICT[staff_score]["options"], dict(), "",
......@@ -302,9 +313,27 @@ class TestStaffAssessment(CacheResetTest):
self.assertEqual(tim_workflow["score"], None)
self.assertNotEqual(tim_workflow["status"], "done")
def test_update_with_override(self):
"""
Test that, when viewing a submission with a staff override present, the workflow is not updated repeatedly.
See TNL-6092 for some historical context.
"""
tim_sub, _ = TestStaffAssessment._create_student_and_submission("Tim", "Tim's answer", problem_steps=['self'])
staff_api.create_assessment(
tim_sub["uuid"],
"Dumbledore",
OPTIONS_SELECTED_DICT["none"]["options"], dict(), "",
RUBRIC,
)
workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
with mock.patch('openassessment.workflow.models.sub_api.reset_score') as mock_reset:
workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
self.assertFalse(mock_reset.called)
def test_invalid_rubric_exception(self):
# Create a submission
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
# Define invalid rubric
invalid_rubric = copy.deepcopy(RUBRIC)
......@@ -314,7 +343,7 @@ class TestStaffAssessment(CacheResetTest):
# Try to staff assess with invalid rubric
with self.assertRaises(StaffAssessmentRequestError) as context_manager:
staff_assessment = staff_api.create_assessment(
staff_api.create_assessment(
tim_sub["uuid"],
"Dumbledore",
OPTIONS_SELECTED_DICT["most"]["options"], dict(), "",
......@@ -336,11 +365,11 @@ class TestStaffAssessment(CacheResetTest):
dict_to_use[RUBRIC["criteria"][0]["name"]] = None
# Create a submission
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
# Try to staff assess with invalid options selected
with self.assertRaises(StaffAssessmentRequestError) as context_manager:
staff_assessment = staff_api.create_assessment(
staff_api.create_assessment(
tim_sub["uuid"],
"Dumbledore",
dict_to_use, dict(), "",
......@@ -351,7 +380,7 @@ class TestStaffAssessment(CacheResetTest):
@mock.patch.object(Assessment.objects, 'filter')
def test_database_filter_error_handling(self, mock_filter):
# Create a submission
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
# Note that we have to define this side effect *after* creating the submission
mock_filter.side_effect = DatabaseError("KABOOM!")
......@@ -380,7 +409,7 @@ class TestStaffAssessment(CacheResetTest):
# Try to create a staff assessment, handle database errors
with self.assertRaises(StaffAssessmentInternalError) as context_manager:
staff_assessment = staff_api.create_assessment(
staff_api.create_assessment(
"000000",
"Dumbledore",
OPTIONS_SELECTED_DICT['most']['options'], dict(), "",
......@@ -392,8 +421,8 @@ class TestStaffAssessment(CacheResetTest):
)
def test_fetch_next_submission(self):
bob_sub, bob = self._create_student_and_submission("bob", "bob's answer")
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, _ = self._create_student_and_submission("bob", "bob's answer")
_, tim = self._create_student_and_submission("Tim", "Tim's answer")
submission = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
self.assertIsNotNone(submission)
self.assertEqual(bob_sub, submission)
......@@ -429,30 +458,30 @@ class TestStaffAssessment(CacheResetTest):
self.assertEqual(tim_to_grade, bob_to_grade)
def test_next_submission_error(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
_, tim = self._create_student_and_submission("Tim", "Tim's answer")
with mock.patch('openassessment.assessment.api.staff.submissions_api.get_submission') as patched_get_submission:
patched_get_submission.side_effect = sub_api.SubmissionNotFoundError('Failed')
with self.assertRaises(staff_api.StaffAssessmentInternalError):
submission = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
def test_no_available_submissions(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
_, tim = self._create_student_and_submission("Tim", "Tim's answer")
# Use a non-existent course and non-existent item.
submission = staff_api.get_submission_to_assess('test_course_id', 'test_item_id', tim['student_id'])
self.assertIsNone(submission)
def test_cancel_staff_workflow(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
workflow_api.cancel_workflow(tim_sub['uuid'], "Test Cancel", "Bob", {})
workflow = StaffWorkflow.objects.get(submission_uuid=tim_sub['uuid'])
self.assertTrue(workflow.is_cancelled)
def test_grading_statistics(self):
bob_sub, bob = self._create_student_and_submission("bob", "bob's answer")
_, bob = self._create_student_and_submission("bob", "bob's answer")
course_id = bob['course_id']
item_id = bob['item_id']
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
sue_sub, sue = self._create_student_and_submission("Sue", "Sue's answer")
_, tim = self._create_student_and_submission("Tim", "Tim's answer")
self._create_student_and_submission("Sue", "Sue's answer")
stats = staff_api.get_staff_grading_statistics(course_id, item_id)
self.assertEqual(stats, {'graded': 0, 'ungraded': 3, 'in-progress': 0})
......@@ -466,7 +495,7 @@ class TestStaffAssessment(CacheResetTest):
self.assertEqual(stats, {'graded': 0, 'ungraded': 1, 'in-progress': 2})
# Grade one of the submissions
staff_assessment = staff_api.create_assessment(
staff_api.create_assessment(
tim_to_grade["uuid"],
tim['student_id'],
OPTIONS_SELECTED_DICT["all"]["options"], dict(), "",
......@@ -503,6 +532,6 @@ class TestStaffAssessment(CacheResetTest):
if 'peer' in steps:
peer_api.on_start(submission["uuid"])
if 'ai' in steps:
init_params['ai'] = {'rubric':RUBRIC, 'algorithm_id':ALGORITHM_ID}
init_params['ai'] = {'rubric': RUBRIC, 'algorithm_id': ALGORITHM_ID}
workflow_api.create_workflow(submission["uuid"], steps, init_params)
return submission, new_student_item
......@@ -82,6 +82,8 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
DEFAULT_ASSESSMENT_SCORE_PRIORITY
)
STAFF_ANNOTATION_TYPE = "staff_defined"
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
uuid = UUIDField(version=1, db_index=True, unique=True)
......@@ -315,9 +317,12 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# new_staff_score is just the most recent staff score, it may already be recorded in sub_api
old_score = sub_api.get_latest_score_for_submission(self.submission_uuid)
if (
not old_score or # There is no recorded score
not old_score.get('staff_id') or # The recorded score is not a staff score
old_score['points_earned'] != new_staff_score['points_earned'] # Previous staff score doesn't match
# Does a prior score exist? Is it a staff score? Do the points earned match?
not old_score or
not self.STAFF_ANNOTATION_TYPE in [
annotation['annotation_type'] for annotation in old_score['annotations']
] or
old_score['points_earned'] != new_staff_score['points_earned']
):
# Set the staff score using submissions api, and log that fact
self.set_staff_score(new_staff_score)
......@@ -426,7 +431,6 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
will be used in the event that this parameter is not provided.
"""
annotation_type = "staff_defined"
if reason is None:
reason = "A staff member has defined the score for this submission"
sub_dict = sub_api.get_submission_and_student(self.submission_uuid)
......@@ -440,7 +444,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
score["points_earned"],
score["points_possible"],
annotation_creator=score["staff_id"],
annotation_type=annotation_type,
annotation_type=self.STAFF_ANNOTATION_TYPE,
annotation_reason=reason
)
......
......@@ -6,7 +6,7 @@
git+https://github.com/edx/XBlock.git@xblock-0.4.12#egg=XBlock==0.4.12
# edx-submissions
git+https://github.com/edx/edx-submissions.git@1.1.2#egg=edx-submissions==1.1.2
git+https://github.com/edx/edx-submissions.git@1.1.4#egg=edx-submissions==1.1.4
# Third Party Requirements
boto>=2.32.1,<3.0.0
......
#!/usr/bin/env bash
MAX_PYLINT_VIOLATIONS=504
MAX_PYLINT_VIOLATIONS=472
mkdir -p test/logs
PYLINT_VIOLATIONS=test/logs/pylint.txt
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment