Commit 3da5e45f by Stephen Sanchez

Merge pull request #26 from edx/sanchez/workflow_api

WIP: Outlining the API intended for Peer Grading Workflow.
parents 5f0dd7a3 ddbc8775
"""Public interface managing the workflow for peer assessments.
The Peer Evaluation Workflow API exposes all public actions required to complete
the workflow for a given submission.
"""
Workflow related methods.
import copy
import logging
The only weird thing here would be that the XBlock needs to pass in a lot of
information about problem constraints. This could happen at either query time
or we could have it trigger on save in the studio/authoring view (though that
might be tricky with XML import based courses).
from django.db import DatabaseError
from openassessment.peer.models import PeerEvaluation
(There would be a lot more here.)
from openassessment.peer.serializers import PeerEvaluationSerializer
from submissions.models import Submission, StudentItem
from submissions.serializers import SubmissionSerializer
"""
logger = logging.getLogger(__name__)
PEER_TYPE = "PE"
def create_evaluation(submission_id, score, rubric):
class PeerEvaluationError(Exception):
"""Generic Peer Evaluation Error
Raised when an error occurs while processing a request related to the
Peer Evaluation Workflow.
"""
pass
def get_evaluations(submission_id):
class PeerEvaluationRequestError(PeerEvaluationError):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
information which does not allow the request to be processed.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class PeerEvaluationWorkflowError(PeerEvaluationError):
"""Error indicating a step in the workflow cannot be completed,
Raised when the action taken cannot be completed in the workflow. This can
occur based on parameters specific to the Submission, User, or Peer Scorers.
"""
pass
def get_submission_to_evaluate(student_item, scorer_student_id):
class PeerEvaluationInternalError(PeerEvaluationError):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
the actions or parameters given to the API.
"""
pass
def create_evaluation(submission_uuid, scorer_id, assessment_dict,
scored_at=None):
"""Creates an evaluation on the given submission.
Evaluations are created based on feedback associated with a particular
rubric.
Args:
submission_uuid (str): The submission uuid this assessment is associated
with. The submission uuid is required and must already exist in the
Submission model.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which
the evaluation took place. If not specified, scored_at is set to
now.
Returns:
dict: The dictionary representing the evaluation. This includes the
points earned, points possible, time scored, scorer id, score type,
and feedback.
Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid, or
the assessment_dict does not contain the required values to create
an assessment.
PeerEvaluationInternalError: Raised when there is an internal error
while creating a new evaluation.
Examples:
>>> assessment_dict = dict(
>>> points_earned=[1, 0, 3, 2],
>>> points_possible=12,
>>> feedback="Your submission was thrilling.",
>>> )
>>> create_evaluation("1", "Tim", assessment_dict)
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer_id': u"Tim",
'feedback': u'Your submission was thrilling.'
}
"""
try:
submission = Submission.objects.get(uuid=submission_uuid)
peer_evaluation = {
"scorer_id": scorer_id,
"submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]),
"points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE,
"feedback": assessment_dict["feedback"],
}
if scored_at:
peer_evaluation["scored_at"] = scored_at
peer_serializer = PeerEvaluationSerializer(data=peer_evaluation)
if not peer_serializer.is_valid():
raise PeerEvaluationRequestError(peer_serializer.errors)
peer_serializer.save()
return peer_serializer.data
except DatabaseError:
error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format(
assessment_dict,
submission_uuid,
scorer_id
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
def has_finished_required_evaluating(student_id, required_evaluations):
"""Check if a student still needs to evaluate more submissions
Per the contract of the peer assessment workflow, a student must evaluate a
number of peers before receiving feedback on their submission.
Args:
student_id (str): The student in the peer grading workflow to check for
peer workflow criteria. This argument is required.
required_evaluations (int): The number of evaluations a student has to
submit before receiving the feedback on their submission. This is a
required argument.
Returns:
bool: True if the student has evaluated enough peer submissions to move
through the peer assessment workflow. False if the student needs to
evaluate more peer submissions.
Raises:
PeerEvaluationRequestError: Raised when the student_id is invalid, or
the required_evaluations is not a positive integer.
PeerEvaluationInternalError: Raised when there is an internal error
while evaluating this workflow rule.
Examples:
>>> has_finished_required_evaluating("Tim")
True
"""
if required_evaluations < 0:
raise PeerEvaluationRequestError(
"Required Evaluation count must be a positive integer.")
return PeerEvaluation.objects.filter(
scorer_id=student_id
).count() >= required_evaluations
def get_evaluations(submission_id):
"""Retrieve the evaluations for a submission.
Retrieves all the evaluations for a submissions. This API returns related
feedback without making any assumptions about grading. Any outstanding
evaluations associated with this submission will not be returned.
Args:
submission_id (str): The submission all the requested evaluations are
associated with. Required.
Returns:
list(dict): A list of dictionaries, where each dictionary represents a
separate evaluation. Each evaluation contains points earned, points
possible, time scored, scorer id, score type, and feedback.
Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid.
PeerEvaluationInternalError: Raised when there is an internal error
while retrieving the evaluations associated with this submission.
Examples:
>>> get_evaluations("1")
[
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer_id': u"Tim",
'feedback': u'Your submission was thrilling.'
},
{
'points_earned': 11,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>),
'scorer_id': u"Bob",
'feedback': u'Great submission.'
}
]
"""
try:
submission = Submission.objects.get(uuid=submission_id)
evaluations = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerEvaluationSerializer(evaluations, many=True)
return serializer.data
except DatabaseError:
error_message = (
u"Error getting evaluations for submission {}".format(submission_id)
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
def get_submission_to_evaluate(student_item_dict):
"""Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will
not return a submission submitted by the requesting scorer. The submission
returned (TODO: will be) is based on a priority queue. Submissions with the
fewest evaluations and the most active students will be prioritized over
submissions from students who are not as active in the evaluation process.
Args:
student_item_dict (dict): The student item information from the student
requesting a submission for evaluation. The dict contains an
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
Returns:
dict: A peer submission for evaluation. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for evaluation.
Raises:
PeerEvaluationRequestError: Raised when the request parameters are
invalid for the request.
PeerEvaluationInternalError:
PeerEvaluationWorkflowError:
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_evaluate(student_item_dict)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_items = StudentItem.objects.filter(
course_id=student_item_dict["course_id"],
item_id=student_item_dict["item_id"],
).exclude(student_id=student_item_dict["student_id"])
# TODO: We need a priority queue.
submission = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at",
"-attempt_number")[:1]
if not submission:
raise PeerEvaluationWorkflowError(
"There are no submissions available for evaluation."
)
return SubmissionSerializer(submission[0]).data
......@@ -6,9 +6,11 @@ be a lot here, like rubrics and such.
from django.db import models
from django.utils.timezone import now
from submissions.models import Submission
class PeerEvaluation(models.Model):
# submission = models.ForeignKey(Submission)
submission = models.ForeignKey(Submission)
points_earned = models.PositiveIntegerField(default=0)
points_possible = models.PositiveIntegerField(default=0)
scored_at = models.DateTimeField(default=now, db_index=True)
......@@ -16,3 +18,16 @@ class PeerEvaluation(models.Model):
score_type = models.CharField(max_length=2)
feedback = models.TextField(max_length=10000, default="")
def __repr__(self):
return repr(dict(
submission=self.submission,
points_earned=self.points_earned,
points_possible=self.points_possible,
scored_at=self.scored_at,
scorer_id=self.scorer_id,
score_type=self.score_type,
feedback=self.feedback,
))
class Meta:
ordering = ["-scored_at"]
"""
Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs.
"""
from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation
class PeerEvaluationSerializer(serializers.ModelSerializer):
class Meta:
model = PeerEvaluation
fields = (
'submission',
'points_earned',
'points_possible',
'scored_at',
'scorer_id',
'score_type',
'feedback',
)
from ddt import ddt, file_data
from django.db import DatabaseError
import pytz
import datetime
from django.test import TestCase
from nose.tools import raises
from mock import patch
from openassessment.peer import api
from openassessment.peer.models import PeerEvaluation
from submissions.api import create_submission
from submissions.models import Submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
ASSESSMENT_DICT = dict(
points_earned=[1, 0, 3, 2],
points_possible=12,
feedback="Your submission was thrilling.",
)
MONDAY = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
class TestApi(TestCase):
def test_create_evaluation(self):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT
)
self._assert_evaluation(evaluation, **ASSESSMENT_DICT)
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
assessment_dict
)
evaluations = api.get_evaluations(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
assessment_dict,
MONDAY
)
evaluations = api.get_evaluations(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
def test_student_finished_evaluating(self):
bob = self._create_student_and_submission("Bob", "Bob's answer")
sally = self._create_student_and_submission("Sally", "Sally's answer")
jim = self._create_student_and_submission("Jim", "Jim's answer")
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
api.create_evaluation(bob["uuid"], "Tim", ASSESSMENT_DICT)
api.create_evaluation(sally["uuid"], "Tim", ASSESSMENT_DICT)
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
api.create_evaluation(jim["uuid"], "Tim", ASSESSMENT_DICT)
self.assertTrue(api.has_finished_required_evaluating("Tim", 3))
@raises(api.PeerEvaluationRequestError)
def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1)
def test_get_submission_to_evaluate(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
self._create_student_and_submission("Bob", "Bob's answer", TUESDAY)
self._create_student_and_submission(
"Sally", "Sally's answer", WEDNESDAY
)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT,
MONDAY
)
@patch.object(PeerEvaluation.objects, 'filter')
@raises(api.PeerEvaluationInternalError)
def test_error_on_get_evaluation(self, mock_filter):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
submission["uuid"],
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT,
MONDAY
)
mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"])
@staticmethod
def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
return create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible,
feedback):
self.assertIsNotNone(evaluation)
self.assertEqual(evaluation["points_earned"], sum(points_earned))
self.assertEqual(evaluation["points_possible"], points_possible)
self.assertEqual(evaluation["feedback"], feedback)
\ No newline at end of file
{
"unicode_evaluation": {
"points_earned": [10, 0, 24, 36],
"points_possible": 12,
"feedback": "这是中国"
},
"basic_evaluation": {
"points_earned": [1, 0, 3, 2],
"points_possible": 12,
"feedback": "Your submission was thrilling."
}
}
\ No newline at end of file
......@@ -129,7 +129,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
u"Submission answer could not be properly decoded to unicode.")
model_kwargs = {
"student_item": student_item_model,
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
......@@ -137,12 +137,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
model_kwargs["submitted_at"] = submitted_at
try:
# Serializer validation requires the student item primary key, rather
# than the student item model itself. Create a copy of the submission
# kwargs and replace the student item model with it's primary key.
validation_data = model_kwargs.copy()
validation_data["student_item"] = student_item_model.pk
submission_serializer = SubmissionSerializer(data=validation_data)
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(submission_serializer.errors)
submission_serializer.save()
......
......@@ -5,6 +5,7 @@ different problem types, and is therefore ignorant of ORA workflow.
"""
from django.db import models
from django.utils.timezone import now
from django_extensions.db.fields import UUIDField
class StudentItem(models.Model):
......@@ -52,6 +53,8 @@ class Submission(models.Model):
because it makes caching trivial.
"""
uuid = UUIDField()
student_item = models.ForeignKey(StudentItem)
# Which attempt is this? Consecutive Submissions do not necessarily have
......
"""
Serializers are created to ensure models do not have to be accessed outside the scope of the Tim APIs.
Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs.
"""
from rest_framework import serializers
from submissions.models import StudentItem, Submission, Score
......@@ -14,7 +15,14 @@ class StudentItemSerializer(serializers.ModelSerializer):
class SubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = Submission
fields = ('student_item', 'attempt_number', 'submitted_at', 'created_at', 'answer')
fields = (
'uuid',
'student_item',
'attempt_number',
'submitted_at',
'created_at',
'answer'
)
class ScoreSerializer(serializers.ModelSerializer):
......
......@@ -5,6 +5,7 @@ from django.db import DatabaseError
from django.test import TestCase
from nose.tools import raises
from mock import patch
import pytz
from submissions.api import create_submission, get_submissions, SubmissionRequestError, SubmissionInternalError
from submissions.models import Submission
......@@ -62,8 +63,8 @@ class TestApi(TestCase):
self._assert_submission(submission, ANSWER_ONE, 1, 1)
def test_get_latest_submission(self):
past_date = datetime.date(2007, 11, 23)
more_recent_date = datetime.date(2011, 10, 15)
past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
more_recent_date = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
create_submission(STUDENT_ITEM, ANSWER_ONE, more_recent_date)
create_submission(STUDENT_ITEM, ANSWER_TWO, past_date)
......
......@@ -24,6 +24,8 @@ Submissions
Peer Assessment
***************
.. automodule:: openassessment.peer.api
:members:
Django Apps
-----------
......
......@@ -7,3 +7,4 @@ django==1.4.8
django-extensions==1.3.3
djangorestframework==2.3.5
Mako==0.9.1
pytz==2013.9
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment