Commit eff99610 by Eric Fischer

Merge pull request #788 from edx/ora-staff-grading

Feature branch: ORA staff grading
parents 1f342822 68ce06b6
...@@ -37,5 +37,6 @@ script: ...@@ -37,5 +37,6 @@ script:
branches: branches:
only: only:
- master - master
- ora-staff-grading
after_success: after_success:
coveralls coveralls
...@@ -16,6 +16,7 @@ if __name__ == "__main__": ...@@ -16,6 +16,7 @@ if __name__ == "__main__":
import logging import logging
logging.captureWarnings(True) logging.captureWarnings(True)
sys.argv.append('--noinput') sys.argv.append('--noinput')
sys.argv.append('--logging-clear-handlers')
from django.core.management import execute_from_command_line from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) execute_from_command_line(sys.argv)
...@@ -24,14 +24,14 @@ from openassessment.assessment.worker import grading as grading_tasks ...@@ -24,14 +24,14 @@ from openassessment.assessment.worker import grading as grading_tasks
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def submitter_is_finished(submission_uuid, requirements): def submitter_is_finished(submission_uuid, ai_requirements):
""" """
Determine if the submitter has finished their requirements for Example Determine if the submitter has finished their requirements for Example
Based Assessment. Always returns True. Based Assessment. Always returns True.
Args: Args:
submission_uuid (str): Not used. submission_uuid (str): Not used.
requirements (dict): Not used. ai_requirements (dict): Not used.
Returns: Returns:
True True
...@@ -40,14 +40,14 @@ def submitter_is_finished(submission_uuid, requirements): ...@@ -40,14 +40,14 @@ def submitter_is_finished(submission_uuid, requirements):
return True return True
def assessment_is_finished(submission_uuid, requirements): def assessment_is_finished(submission_uuid, ai_requirements):
""" """
Determine if the assessment of the given submission is completed. This Determine if the assessment of the given submission is completed. This
checks to see if the AI has completed the assessment. checks to see if the AI has completed the assessment.
Args: Args:
submission_uuid (str): The UUID of the submission being graded. submission_uuid (str): The UUID of the submission being graded.
requirements (dict): Not used. ai_requirements (dict): Not used.
Returns: Returns:
True if the assessment has been completed for this submission. True if the assessment has been completed for this submission.
...@@ -56,7 +56,7 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -56,7 +56,7 @@ def assessment_is_finished(submission_uuid, requirements):
return bool(get_latest_assessment(submission_uuid)) return bool(get_latest_assessment(submission_uuid))
def get_score(submission_uuid, requirements): def get_score(submission_uuid, ai_requirements):
""" """
Generate a score based on a completed assessment for the given submission. Generate a score based on a completed assessment for the given submission.
If no assessment has been completed for this submission, this will return If no assessment has been completed for this submission, this will return
...@@ -64,10 +64,11 @@ def get_score(submission_uuid, requirements): ...@@ -64,10 +64,11 @@ def get_score(submission_uuid, requirements):
Args: Args:
submission_uuid (str): The UUID for the submission to get a score for. submission_uuid (str): The UUID for the submission to get a score for.
requirements (dict): Not used. ai_requirements (dict): Not used.
Returns: Returns:
A dictionary with the points earned and points possible. A dictionary with the points earned, points possible, and
contributing_assessments information, along with a None staff_id.
""" """
assessment = get_latest_assessment(submission_uuid) assessment = get_latest_assessment(submission_uuid)
...@@ -76,7 +77,9 @@ def get_score(submission_uuid, requirements): ...@@ -76,7 +77,9 @@ def get_score(submission_uuid, requirements):
return { return {
"points_earned": assessment["points_earned"], "points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"] "points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
} }
......
...@@ -28,7 +28,7 @@ logger = logging.getLogger("openassessment.assessment.api.peer") ...@@ -28,7 +28,7 @@ logger = logging.getLogger("openassessment.assessment.api.peer")
PEER_TYPE = "PE" PEER_TYPE = "PE"
def submitter_is_finished(submission_uuid, requirements): def submitter_is_finished(submission_uuid, peer_requirements):
""" """
Check whether the submitter has made the required number of assessments. Check whether the submitter has made the required number of assessments.
...@@ -38,30 +38,32 @@ def submitter_is_finished(submission_uuid, requirements): ...@@ -38,30 +38,32 @@ def submitter_is_finished(submission_uuid, requirements):
Args: Args:
submission_uuid (str): The UUID of the submission being tracked. submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating peer_requirements (dict): Dictionary with the key "must_grade" indicating
the required number of submissions the student must grade. the required number of submissions the student must grade.
Returns: Returns:
bool bool
""" """
if requirements is None: if peer_requirements is None:
return False return False
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None: if workflow.completed_at is not None:
return True return True
elif workflow.num_peers_graded() >= requirements["must_grade"]: elif workflow.num_peers_graded() >= peer_requirements["must_grade"]:
workflow.completed_at = timezone.now() workflow.completed_at = timezone.now()
workflow.save() workflow.save()
return True return True
return False return False
except PeerWorkflow.DoesNotExist: except PeerWorkflow.DoesNotExist:
return False return False
except KeyError:
raise PeerAssessmentRequestError(u'Requirements dict must contain "must_grade" key')
def assessment_is_finished(submission_uuid, requirements): def assessment_is_finished(submission_uuid, peer_requirements):
""" """
Check whether the submitter has received enough assessments Check whether the submitter has received enough assessments
to get a score. to get a score.
...@@ -72,7 +74,7 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -72,7 +74,7 @@ def assessment_is_finished(submission_uuid, requirements):
Args: Args:
submission_uuid (str): The UUID of the submission being tracked. submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by" peer_requirements (dict): Dictionary with the key "must_be_graded_by"
indicating the required number of assessments the student indicating the required number of assessments the student
must receive to get a score. must receive to get a score.
...@@ -80,7 +82,7 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -80,7 +82,7 @@ def assessment_is_finished(submission_uuid, requirements):
bool bool
""" """
if requirements is None: if not peer_requirements:
return False return False
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
...@@ -91,7 +93,7 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -91,7 +93,7 @@ def assessment_is_finished(submission_uuid, requirements):
assessment__submission_uuid=submission_uuid, assessment__submission_uuid=submission_uuid,
assessment__score_type=PEER_TYPE assessment__score_type=PEER_TYPE
) )
return scored_items.count() >= requirements["must_be_graded_by"] return scored_items.count() >= peer_requirements["must_be_graded_by"]
def on_start(submission_uuid): def on_start(submission_uuid):
...@@ -135,7 +137,7 @@ def on_start(submission_uuid): ...@@ -135,7 +137,7 @@ def on_start(submission_uuid):
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def get_score(submission_uuid, requirements): def get_score(submission_uuid, peer_requirements):
""" """
Retrieve a score for a submission if requirements have been satisfied. Retrieve a score for a submission if requirements have been satisfied.
...@@ -146,14 +148,15 @@ def get_score(submission_uuid, requirements): ...@@ -146,14 +148,15 @@ def get_score(submission_uuid, requirements):
must receive to get a score. must receive to get a score.
Returns: Returns:
dict with keys "points_earned" and "points_possible". A dictionary with the points earned, points possible, and
contributing_assessments information, along with a None staff_id.
""" """
if requirements is None: if peer_requirements is None:
return None return None
# User hasn't completed their own submission yet # User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements): if not submitter_is_finished(submission_uuid, peer_requirements):
return None return None
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
...@@ -168,7 +171,7 @@ def get_score(submission_uuid, requirements): ...@@ -168,7 +171,7 @@ def get_score(submission_uuid, requirements):
assessment__score_type=PEER_TYPE assessment__score_type=PEER_TYPE
).order_by('-assessment') ).order_by('-assessment')
submission_finished = items.count() >= requirements["must_be_graded_by"] submission_finished = items.count() >= peer_requirements["must_be_graded_by"]
if not submission_finished: if not submission_finished:
return None return None
...@@ -180,15 +183,18 @@ def get_score(submission_uuid, requirements): ...@@ -180,15 +183,18 @@ def get_score(submission_uuid, requirements):
# which is not supported by some versions of MySQL. # which is not supported by some versions of MySQL.
# Although this approach generates more database queries, the number is likely to # Although this approach generates more database queries, the number is likely to
# be relatively small (at least 1 and very likely less than 5). # be relatively small (at least 1 and very likely less than 5).
for scored_item in items[:requirements["must_be_graded_by"]]: for scored_item in items[:peer_requirements["must_be_graded_by"]]:
scored_item.scored = True scored_item.scored = True
scored_item.save() scored_item.save()
assessments = [item.assessment for item in items]
return { return {
"points_earned": sum( "points_earned": sum(
get_assessment_median_scores(submission_uuid).values() get_assessment_median_scores(submission_uuid).values()
), ),
"points_possible": items[0].assessment.points_possible, "points_possible": assessments[0].points_possible,
"contributing_assessments": [assessment.id for assessment in assessments],
"staff_id": None,
} }
...@@ -281,16 +287,16 @@ def create_assessment( ...@@ -281,16 +287,16 @@ def create_assessment(
logger.exception(message) logger.exception(message)
raise PeerAssessmentWorkflowError(message) raise PeerAssessmentWorkflowError(message)
except InvalidRubric: except InvalidRubric:
msg = u"Rubric definition was not valid" msg = u"The rubric definition is not valid."
logger.exception(msg) logger.exception(msg)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
except InvalidRubricSelection: except InvalidRubricSelection:
msg = u"Invalid options selected in the rubric" msg = u"Invalid options were selected in the rubric."
logger.warning(msg, exc_info=True) logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
except DatabaseError: except DatabaseError:
error_message = ( error_message = (
u"An error occurred while retrieving the peer workflow item by scorer with ID: {}" u"An error occurred while creating an assessment by the scorer with this ID: {}"
).format(scorer_id) ).format(scorer_id)
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
...@@ -477,7 +483,7 @@ def has_finished_required_evaluating(submission_uuid, required_assessments): ...@@ -477,7 +483,7 @@ def has_finished_required_evaluating(submission_uuid, required_assessments):
return done, peers_graded return done, peers_graded
def get_assessments(submission_uuid, scored_only=True, limit=None): def get_assessments(submission_uuid, limit=None):
"""Retrieve the assessments for a submission. """Retrieve the assessments for a submission.
Retrieves all the assessments for a submissions. This API returns related Retrieves all the assessments for a submissions. This API returns related
...@@ -489,9 +495,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -489,9 +495,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
associated with. Required. associated with. Required.
Keyword Arguments: Keyword Arguments:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all. limit (int): Limit the returned assessments. If None, returns all.
...@@ -507,7 +510,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -507,7 +510,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
while retrieving the assessments associated with this submission. while retrieving the assessments associated with this submission.
Examples: Examples:
>>> get_assessments("1", scored_only=True, limit=2) >>> get_assessments("1", limit=2)
[ [
{ {
'points_earned': 6, 'points_earned': 6,
...@@ -527,15 +530,10 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -527,15 +530,10 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
""" """
try: try:
if scored_only: assessments = Assessment.objects.filter(
assessments = PeerWorkflowItem.get_scored_assessments( submission_uuid=submission_uuid,
submission_uuid score_type=PEER_TYPE
)[:limit] )[:limit]
else:
assessments = Assessment.objects.filter(
submission_uuid=submission_uuid,
score_type=PEER_TYPE
)[:limit]
return serialize_assessments(assessments) return serialize_assessments(assessments)
except DatabaseError: except DatabaseError:
error_message = ( error_message = (
...@@ -545,7 +543,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -545,7 +543,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): def get_submitted_assessments(submission_uuid, limit=None):
"""Retrieve the assessments created by the given submission's author. """Retrieve the assessments created by the given submission's author.
Retrieves all the assessments created by the given submission's author. This Retrieves all the assessments created by the given submission's author. This
...@@ -558,8 +556,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -558,8 +556,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
we are requesting. Required. we are requesting. Required.
Keyword Arguments: Keyword Arguments:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all. limit (int): Limit the returned assessments. If None, returns all.
Returns: Returns:
...@@ -575,7 +571,7 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -575,7 +571,7 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
while retrieving the assessments associated with this submission. while retrieving the assessments associated with this submission.
Examples: Examples:
>>> get_submitted_assessments("1", scored_only=True, limit=2) >>> get_submitted_assessments("1", limit=2)
[ [
{ {
'points_earned': 6, 'points_earned': 6,
...@@ -602,8 +598,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -602,8 +598,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
scorer=workflow, scorer=workflow,
assessment__isnull=False assessment__isnull=False
) )
if scored_only:
items = items.exclude(scored=False)
assessments = Assessment.objects.filter( assessments = Assessment.objects.filter(
pk__in=[item.assessment.pk for item in items])[:limit] pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments) return serialize_assessments(assessments)
......
...@@ -23,13 +23,13 @@ SELF_TYPE = "SE" ...@@ -23,13 +23,13 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.api.self") logger = logging.getLogger("openassessment.assessment.api.self")
def submitter_is_finished(submission_uuid, requirements): def submitter_is_finished(submission_uuid, self_requirements):
""" """
Check whether a self-assessment has been completed for a submission. Check whether a self-assessment has been completed for a submission.
Args: Args:
submission_uuid (str): The unique identifier of the submission. submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required self_requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently to determine if this assessment is complete. There are currently
no requirements for a self-assessment. no requirements for a self-assessment.
Returns: Returns:
...@@ -43,14 +43,14 @@ def submitter_is_finished(submission_uuid, requirements): ...@@ -43,14 +43,14 @@ def submitter_is_finished(submission_uuid, requirements):
).exists() ).exists()
def assessment_is_finished(submission_uuid, requirements): def assessment_is_finished(submission_uuid, self_requirements):
""" """
Check whether a self-assessment has been completed. For self-assessment, Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished. this function is synonymous with submitter_is_finished.
Args: Args:
submission_uuid (str): The unique identifier of the submission. submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required self_requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently to determine if this assessment is complete. There are currently
no requirements for a self-assessment. no requirements for a self-assessment.
Returns: Returns:
...@@ -59,19 +59,19 @@ def assessment_is_finished(submission_uuid, requirements): ...@@ -59,19 +59,19 @@ def assessment_is_finished(submission_uuid, requirements):
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {}) >>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True True
""" """
return submitter_is_finished(submission_uuid, requirements) return submitter_is_finished(submission_uuid, self_requirements)
def get_score(submission_uuid, requirements): def get_score(submission_uuid, self_requirements):
""" """
Get the score for this particular assessment. Get the score for this particular assessment.
Args: Args:
submission_uuid (str): The unique identifier for the submission submission_uuid (str): The unique identifier for the submission
requirements (dict): Not used. self_requirements (dict): Not used.
Returns: Returns:
A dict of points earned and points possible for the given submission. A dictionary with the points earned, points possible, and
Returns None if no score can be determined yet. contributing_assessments information, along with a None staff_id.
Examples: Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {}) >>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{ {
...@@ -85,7 +85,9 @@ def get_score(submission_uuid, requirements): ...@@ -85,7 +85,9 @@ def get_score(submission_uuid, requirements):
return { return {
"points_earned": assessment["points_earned"], "points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"] "points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
} }
...@@ -284,12 +286,15 @@ def get_assessment_scores_by_criteria(submission_uuid): ...@@ -284,12 +286,15 @@ def get_assessment_scores_by_criteria(submission_uuid):
information to form the median scores, an error is raised. information to form the median scores, an error is raised.
""" """
try: try:
# This will always create a list of length 1
assessments = list( assessments = list(
Assessment.objects.filter( Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1] ).order_by('-scored_at')[:1]
) )
scores = Assessment.scores_by_criterion(assessments) scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores) return Assessment.get_median_score_dict(scores)
except DatabaseError: except DatabaseError:
error_message = ( error_message = (
......
...@@ -24,14 +24,14 @@ from openassessment.assessment.errors import ( ...@@ -24,14 +24,14 @@ from openassessment.assessment.errors import (
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W0613 def submitter_is_finished(submission_uuid, training_requirements): # pylint:disable=W0613
""" """
Check whether the student has correctly assessed Check whether the student has correctly assessed
all the training example responses. all the training example responses.
Args: Args:
submission_uuid (str): The UUID of the student's submission. submission_uuid (str): The UUID of the student's submission.
requirements (dict): Must contain "num_required" indicating training_requirements (dict): Must contain "num_required" indicating
the number of examples the student must assess. the number of examples the student must assess.
Returns: Returns:
...@@ -41,11 +41,11 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06 ...@@ -41,11 +41,11 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
StudentTrainingRequestError StudentTrainingRequestError
""" """
if requirements is None: if training_requirements is None:
return False return False
try: try:
num_required = int(requirements['num_required']) num_required = int(training_requirements['num_required'])
except KeyError: except KeyError:
raise StudentTrainingRequestError(u'Requirements dict must contain "num_required" key') raise StudentTrainingRequestError(u'Requirements dict must contain "num_required" key')
except ValueError: except ValueError:
......
...@@ -6,5 +6,6 @@ Export errors from all modules defined in this package. ...@@ -6,5 +6,6 @@ Export errors from all modules defined in this package.
from .peer import * from .peer import *
from .self import * from .self import *
from .staff import *
from .student_training import * from .student_training import *
from .ai import * from .ai import *
""" Create generic errors that can be shared across different assessment types. """
class AssessmentError(Exception):
""" A generic error for errors that occur during assessment. """
pass
""" """
Errors for the peer assessment. Errors for the peer assessment.
""" """
from .base import AssessmentError
class PeerAssessmentError(Exception): class PeerAssessmentError(AssessmentError):
"""Generic Peer Assessment Error """Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the Raised when an error occurs while processing a request related to the
......
""" """
Errors for self-assessment Errors for self-assessment
""" """
from .base import AssessmentError
class SelfAssessmentError(Exception):
class SelfAssessmentError(AssessmentError):
"""Generic Self Assessment Error """Generic Self Assessment Error
Raised when an error occurs while processing a request related to the Raised when an error occurs while processing a request related to the
......
"""
Errors for the staff assessment api.
"""
from .base import AssessmentError
class StaffAssessmentError(AssessmentError):
"""Generic Staff Assessment Error
Raised when an error occurs while processing a request related to
staff assessment.
"""
pass
class StaffAssessmentRequestError(StaffAssessmentError):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
information which does not allow the request to be processed.
"""
pass
class StaffAssessmentInternalError(StaffAssessmentError):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
the actions or parameters given to the API.
"""
pass
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('assessment', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StaffWorkflow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('scorer_id', models.CharField(max_length=40, db_index=True)),
('course_id', models.CharField(max_length=40, db_index=True)),
('item_id', models.CharField(max_length=128, db_index=True)),
('submission_uuid', models.CharField(unique=True, max_length=128, db_index=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('grading_completed_at', models.DateTimeField(null=True, db_index=True)),
('grading_started_at', models.DateTimeField(null=True, db_index=True)),
('cancelled_at', models.DateTimeField(null=True, db_index=True)),
('assessment', models.CharField(max_length=128, null=True, db_index=True)),
],
options={
'ordering': ['created_at', 'id'],
},
),
]
...@@ -8,3 +8,4 @@ from .peer import * ...@@ -8,3 +8,4 @@ from .peer import *
from .training import * from .training import *
from .student_training import * from .student_training import *
from .ai import * from .ai import *
from .staff import *
...@@ -573,7 +573,7 @@ class Assessment(models.Model): ...@@ -573,7 +573,7 @@ class Assessment(models.Model):
""" """
assessments = list(assessments) # Force us to read it all assessments = list(assessments) # Force us to read it all
if not assessments: if not assessments:
return [] return {}
# Generate a cache key that represents all the assessments we're being # Generate a cache key that represents all the assessments we're being
# asked to grab scores from (comma separated list of assessment IDs) # asked to grab scores from (comma separated list of assessment IDs)
......
"""
Models for managing staff assessments.
"""
from datetime import timedelta
from django.db import models, DatabaseError
from django.utils.timezone import now
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import StaffAssessmentInternalError
class StaffWorkflow(models.Model):
"""
Internal Model for tracking Staff Assessment Workflow
This model can be used to determine the following information required
throughout the Staff Assessment Workflow:
1) Get next submission that requires assessment.
2) Does a submission have a staff assessment?
3) Does this staff member already have a submission open for assessment?
4) Close open assessments when completed.
"""
# Amount of time before a lease on a submission expires
TIME_LIMIT = timedelta(hours=8)
scorer_id = models.CharField(max_length=40, db_index=True)
course_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True)
grading_completed_at = models.DateTimeField(null=True, db_index=True)
grading_started_at = models.DateTimeField(null=True, db_index=True)
cancelled_at = models.DateTimeField(null=True, db_index=True)
assessment = models.CharField(max_length=128, db_index=True, null=True)
class Meta:
ordering = ["created_at", "id"]
app_label = "assessment"
@property
def is_cancelled(self):
"""
Check if the workflow is cancelled.
Returns:
True/False
"""
return bool(self.cancelled_at)
@classmethod
def get_workflow_statistics(cls, course_id, item_id):
"""
Returns the number of graded, ungraded, and in-progress submissions for staff grading.
Args:
course_id (str): The course that this problem belongs to
item_id (str): The student_item (problem) that we want to know statistics about.
Returns:
dict: a dictionary that contains the following keys: 'graded', 'ungraded', and 'in-progress'
"""
timeout = (now() - cls.TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
ungraded = cls.objects.filter(
models.Q(grading_started_at=None) | models.Q(grading_started_at__lte=timeout),
course_id=course_id, item_id=item_id, grading_completed_at=None, cancelled_at=None
).count()
in_progress = cls.objects.filter(
course_id=course_id, item_id=item_id, grading_completed_at=None, cancelled_at=None,
grading_started_at__gt=timeout
).count()
graded = cls.objects.filter(
course_id=course_id, item_id=item_id, cancelled_at=None
).exclude(grading_completed_at=None).count()
return {'ungraded': ungraded, 'in-progress': in_progress, 'graded': graded}
@classmethod
def get_submission_for_review(cls, course_id, item_id, scorer_id):
"""
Find a submission for staff assessment. This function will find the next
submission that requires assessment, excluding any submission that has been
completely graded, or is actively being reviewed by other staff members.
Args:
submission_uuid (str): The submission UUID from the student
requesting a submission for assessment. This is used to explicitly
avoid giving the student their own submission, and determines the
associated Peer Workflow.
item_id (str): The student_item that we would like to retrieve submissions for.
scorer_id (str): The user id of the staff member scoring this submission
Returns:
submission_uuid (str): The submission_uuid for the submission to review.
Raises:
StaffAssessmentInternalError: Raised when there is an error retrieving
the workflows for this request.
"""
timeout = (now() - cls.TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
try:
# Search for existing submissions that the scorer has worked on.
staff_workflows = StaffWorkflow.objects.filter(
course_id=course_id,
item_id=item_id,
scorer_id=scorer_id,
grading_completed_at=None,
cancelled_at=None,
)
# If no existing submissions exist, then get any other
# available workflows.
if not staff_workflows:
staff_workflows = StaffWorkflow.objects.filter(
models.Q(scorer_id='') | models.Q(grading_started_at__lte=timeout),
course_id=course_id,
item_id=item_id,
grading_completed_at=None,
cancelled_at=None,
)
if not staff_workflows:
return None
workflow = staff_workflows[0]
workflow.scorer_id = scorer_id
workflow.grading_started_at = now()
workflow.save()
return workflow.submission_uuid
except DatabaseError:
error_message = (
u"An internal error occurred while retrieving a submission for staff grading"
)
logger.exception(error_message)
raise StaffAssessmentInternalError(error_message)
def close_active_assessment(self, assessment, scorer_id):
"""
Assign assessment to workflow, and mark the grading as complete.
"""
self.assessment = assessment.id
self.scorer_id = scorer_id
self.grading_completed_at = now()
self.save()
...@@ -232,6 +232,7 @@ def full_assessment_dict(assessment, rubric_dict=None): ...@@ -232,6 +232,7 @@ def full_assessment_dict(assessment, rubric_dict=None):
for part_dict in parts for part_dict in parts
) )
assessment_dict["points_possible"] = rubric_dict["points_possible"] assessment_dict["points_possible"] = rubric_dict["points_possible"]
assessment_dict["id"] = assessment.id
cache.set(assessment_cache_key, assessment_dict) cache.set(assessment_cache_key, assessment_dict)
......
...@@ -51,6 +51,34 @@ RUBRIC = { ...@@ -51,6 +51,34 @@ RUBRIC = {
] ]
} }
RUBRIC_POSSIBLE_POINTS = sum(
max(
option["points"] for option in criterion["options"]
) for criterion in RUBRIC["criteria"]
)
# Used to generate OPTIONS_SELECTED_DICT. Indices refer to RUBRIC_OPTIONS.
OPTIONS_SELECTED_CHOICES = {
"none": [0, 0],
"few": [0, 1],
"most": [1, 2],
"all": [2, 2],
}
OPTIONS_SELECTED_DICT = {
# This dict is constructed from OPTIONS_SELECTED_CHOICES.
# 'key' is expected to be a string, such as 'none', 'all', etc.
# 'value' is a list, indicating the indices of the RUBRIC_OPTIONS selections that pertain to that key
key: {
"options": {
RUBRIC["criteria"][i]["name"]: RUBRIC_OPTIONS[j]["name"] for i, j in enumerate(value)
},
"expected_points": sum(
RUBRIC_OPTIONS[i]["points"] for i in value
)
} for key, value in OPTIONS_SELECTED_CHOICES.iteritems()
}
EXAMPLES = [ EXAMPLES = [
{ {
'answer': ( 'answer': (
......
...@@ -104,7 +104,7 @@ ASSESSMENT_DICT_FAIL = { ...@@ -104,7 +104,7 @@ ASSESSMENT_DICT_FAIL = {
} }
} }
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 14 points
ASSESSMENT_DICT_PASS = { ASSESSMENT_DICT_PASS = {
'overall_feedback': u"这是中国", 'overall_feedback': u"这是中国",
'criterion_feedback': {}, 'criterion_feedback': {},
...@@ -116,7 +116,7 @@ ASSESSMENT_DICT_PASS = { ...@@ -116,7 +116,7 @@ ASSESSMENT_DICT_PASS = {
} }
} }
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 14 points
# Feedback text is one character over the limit. # Feedback text is one character over the limit.
LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "." LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "."
ASSESSMENT_DICT_HUGE = { ASSESSMENT_DICT_HUGE = {
...@@ -322,7 +322,7 @@ class TestPeerApi(CacheResetTest): ...@@ -322,7 +322,7 @@ class TestPeerApi(CacheResetTest):
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False) assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
@file_data('data/valid_assessments.json') @file_data('data/valid_assessments.json')
...@@ -340,7 +340,7 @@ class TestPeerApi(CacheResetTest): ...@@ -340,7 +340,7 @@ class TestPeerApi(CacheResetTest):
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
MONDAY, MONDAY,
) )
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False) assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
self.assertEqual(assessments[0]["scored_at"], MONDAY) self.assertEqual(assessments[0]["scored_at"], MONDAY)
...@@ -859,14 +859,12 @@ class TestPeerApi(CacheResetTest): ...@@ -859,14 +859,12 @@ class TestPeerApi(CacheResetTest):
) )
self.assertEqual(assessment["points_earned"], 6) self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14) self.assertEqual(assessment["points_possible"], 14)
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=True)
self.assertEqual(0, len(submitted_assessments))
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments)) self.assertEqual(1, len(submitted_assessments))
def test_get_submitted_assessments_with_bad_submission(self): def test_get_submitted_assessments_with_bad_submission(self):
submitted_assessments = peer_api.get_submitted_assessments("bad-uuid", scored_only=True) submitted_assessments = peer_api.get_submitted_assessments("bad-uuid")
self.assertEqual(0, len(submitted_assessments)) self.assertEqual(0, len(submitted_assessments))
def test_find_active_assessments(self): def test_find_active_assessments(self):
...@@ -1122,7 +1120,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1122,7 +1120,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY) peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments)) self.assertEqual(1, len(submitted_assessments))
@patch.object(PeerWorkflow.objects, 'raw') @patch.object(PeerWorkflow.objects, 'raw')
...@@ -1253,11 +1251,11 @@ class TestPeerApi(CacheResetTest): ...@@ -1253,11 +1251,11 @@ class TestPeerApi(CacheResetTest):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"]) peer_api.get_assessment_median_scores(tim["uuid"])
@patch.object(PeerWorkflowItem, 'get_scored_assessments') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter): def test_get_assessments_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(tim["uuid"]) peer_api.get_assessments(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch.object(PeerWorkflow.objects, 'get_or_create')
...@@ -1276,7 +1274,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1276,7 +1274,7 @@ class TestPeerApi(CacheResetTest):
MONDAY, MONDAY,
) )
@patch.object(PeerWorkflowItem, 'get_scored_assessments') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter): def test_error_on_get_assessment(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -1372,15 +1370,15 @@ class TestPeerApi(CacheResetTest): ...@@ -1372,15 +1370,15 @@ class TestPeerApi(CacheResetTest):
) )
# Make sure Tim has one assessment. # Make sure Tim has one assessment.
tim_assessments = peer_api.get_assessments(tim_sub['uuid'], scored_only=False) tim_assessments = peer_api.get_assessments(tim_sub['uuid'])
self.assertEqual(1, len(tim_assessments)) self.assertEqual(1, len(tim_assessments))
# Make sure Sally has one assessment. # Make sure Sally has one assessment.
sally_assessments = peer_api.get_assessments(sally_sub['uuid'], scored_only=False) sally_assessments = peer_api.get_assessments(sally_sub['uuid'])
self.assertEqual(1, len(sally_assessments)) self.assertEqual(1, len(sally_assessments))
# Make sure Jane has no assessment. # Make sure Jane has no assessment.
jane_assessments = peer_api.get_assessments(jane_sub['uuid'], scored_only=False) jane_assessments = peer_api.get_assessments(jane_sub['uuid'])
self.assertEqual(0, len(jane_assessments)) self.assertEqual(0, len(jane_assessments))
def test_get_submission_to_assess_no_workflow(self): def test_get_submission_to_assess_no_workflow(self):
...@@ -1472,14 +1470,14 @@ class TestPeerApi(CacheResetTest): ...@@ -1472,14 +1470,14 @@ class TestPeerApi(CacheResetTest):
required_graded_by required_graded_by
) )
# Tim grades Bob, so now Bob has one assessment # Tim grades Bob, so now Bob has one assessment with a good grade
peer_api.get_submission_to_assess(tim_sub['uuid'], tim['student_id']) peer_api.get_submission_to_assess(tim_sub['uuid'], tim['student_id'])
peer_api.create_assessment( peer_api.create_assessment(
tim_sub['uuid'], tim_sub['uuid'],
tim['student_id'], tim['student_id'],
ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT_PASS['options_selected'],
ASSESSMENT_DICT['criterion_feedback'], ASSESSMENT_DICT_PASS['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'], ASSESSMENT_DICT_PASS['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
required_graded_by required_graded_by
) )
...@@ -1506,27 +1504,24 @@ class TestPeerApi(CacheResetTest): ...@@ -1506,27 +1504,24 @@ class TestPeerApi(CacheResetTest):
required_graded_by required_graded_by
) )
# Sue grades the only person she hasn't graded yet (Bob) # Sue grades the only person she hasn't graded yet (Bob), with a failing grade
peer_api.get_submission_to_assess(sue_sub['uuid'], sue['student_id']) peer_api.get_submission_to_assess(sue_sub['uuid'], sue['student_id'])
peer_api.create_assessment( peer_api.create_assessment(
sue_sub['uuid'], sue_sub['uuid'],
sue['student_id'], sue['student_id'],
ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT_FAIL['options_selected'],
ASSESSMENT_DICT['criterion_feedback'], ASSESSMENT_DICT_FAIL['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'], ASSESSMENT_DICT_FAIL['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
required_graded_by required_graded_by
) )
# This used to create a second assessment, # This used to create a second assessment,
# which was the bug. # which was the bug.
peer_api.get_score(bob_sub['uuid'], requirements) score = peer_api.get_score(bob_sub['uuid'], requirements)
# Get the assessments used to generate the score # Verify that only the first assessment was used to generate the score
# Only the first assessment should be used self.assertEqual(score['points_earned'], 14)
scored_assessments = peer_api.get_assessments(bob_sub['uuid'], scored_only=True)
self.assertEqual(len(scored_assessments), 1)
self.assertEqual(scored_assessments[0]['scorer_id'], tim['student_id'])
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_create_assessment_database_error(self): def test_create_assessment_database_error(self):
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment