Commit 0b717bd5 by Stephen Sanchez

First pass at creating the Peer Workflow appropriately.

Wiring all the peer queue into place

Updating based on rebase to fix tests
parent 6bf500c3
from django.contrib import admin from django.contrib import admin
from openassessment.assessment.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption from openassessment.assessment.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption, PeerWorkflow, PeerWorkflowItem
admin.site.register(Assessment) admin.site.register(Assessment)
admin.site.register(AssessmentPart) admin.site.register(AssessmentPart)
admin.site.register(Rubric) admin.site.register(Rubric)
admin.site.register(Criterion) admin.site.register(Criterion)
admin.site.register(CriterionOption) admin.site.register(CriterionOption)
admin.site.register(PeerWorkflow)
admin.site.register(PeerWorkflowItem)
...@@ -224,7 +224,7 @@ class Assessment(models.Model): ...@@ -224,7 +224,7 @@ class Assessment(models.Model):
feedback = models.TextField(max_length=10000, default="", blank=True) feedback = models.TextField(max_length=10000, default="", blank=True)
class Meta: class Meta:
ordering = ["-scored_at"] ordering = ["-scored_at", "-id"]
@property @property
def points_earned(self): def points_earned(self):
...@@ -365,3 +365,67 @@ class AssessmentPart(models.Model): ...@@ -365,3 +365,67 @@ class AssessmentPart(models.Model):
@property @property
def points_possible(self): def points_possible(self):
return self.option.criterion.points_possible return self.option.criterion.points_possible
class PeerWorkflow(models.Model):
"""Internal Model for tracking Peer Assessment Workflow
This model can be used to determine the following information required
throughout the Peer Assessment Workflow:
1) Get next submission that requires assessment.
2) Does a submission have enough assessments?
3) Has a student completed enough assessments?
4) Does a student already have a submission open for assessment?
5) Close open assessments when completed.
6) Should 'over grading' be allowed for a submission?
The student item is the author of the submission. Peer Workflow Items are
created for each assessment made by this student.
"""
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=40, db_index=True)
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True)
class Meta:
ordering = ["created_at", "id"]
def __repr__(self):
return (
"PeerWorkflow(student_id={0.student_id}, item_id={0.item_id}, "
"course_id={0.course_id}, submission_uuid={0.submission_uuid})"
"created_at={0.created_at}"
).format(self)
def __unicode__(self):
return repr(self)
class PeerWorkflowItem(models.Model):
"""Represents an assessment associated with a particular workflow
Created every time a submission is requested for peer assessment. The
associated workflow represents the scorer of the given submission, and the
assessment represents the completed assessment for this work item.
"""
scorer_id = models.ForeignKey(PeerWorkflow, related_name='items')
submission_uuid = models.CharField(max_length=128, db_index=True)
started_at = models.DateTimeField(default=now, db_index=True)
assessment = models.IntegerField(default=-1)
class Meta:
ordering = ["started_at", "id"]
def __repr__(self):
return (
"PeerWorkflowItem(scorer_id={0.scorer_id}, "
"submission_uuid={0.submission_uuid}, "
"started_at={0.started_at}, assessment={0.assessment})"
).format(self)
def __unicode__(self):
return repr(self)
...@@ -6,20 +6,23 @@ the workflow for a given submission. ...@@ -6,20 +6,23 @@ the workflow for a given submission.
""" """
import copy import copy
import logging import logging
from datetime import datetime, timedelta
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from django.db.models import Q
from pytz import UTC
from openassessment.assessment.models import Assessment, InvalidOptionSelection from openassessment.assessment.models import Assessment, InvalidOptionSelection, PeerWorkflow, PeerWorkflowItem
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
AssessmentSerializer, rubric_from_dict, get_assessment_review) AssessmentSerializer, rubric_from_dict, get_assessment_review)
from submissions import api as submission_api from submissions.models import Submission, StudentItem
from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
PEER_TYPE = "PE" PEER_TYPE = "PE"
TIME_LIMIT = timedelta(hours=8)
class PeerAssessmentError(Exception): class PeerAssessmentError(Exception):
...@@ -66,12 +69,11 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -66,12 +69,11 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def is_complete(submission_uuid, requirements): def is_complete(submission_uuid, requirements):
submission = Submission.objects.get(uuid=submission_uuid) try:
finished_evaluating, _count = has_finished_required_evaluating( workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
StudentItemSerializer(submission.student_item).data, except PeerWorkflow.DoesNotExist:
requirements["must_grade"] return False
) return _check_student_done_grading(workflow, requirements["must_grade"])
return finished_evaluating
def get_score(submission_uuid, requirements): def get_score(submission_uuid, requirements):
...@@ -80,8 +82,8 @@ def get_score(submission_uuid, requirements): ...@@ -80,8 +82,8 @@ def get_score(submission_uuid, requirements):
return None return None
submission = Submission.objects.get(uuid=submission_uuid) submission = Submission.objects.get(uuid=submission_uuid)
assessments = Assessment.objects.filter(submission=submission) assessments = Assessment.objects.filter(submission=submission, score_type=PEER_TYPE)
submission_finished = assessments.count() >= requirements["must_be_graded_by"] submission_finished = _check_submission_graded(submission_uuid, requirements["must_be_graded_by"])
if not submission_finished: if not submission_finished:
return None return None
...@@ -95,11 +97,10 @@ def get_score(submission_uuid, requirements): ...@@ -95,11 +97,10 @@ def get_score(submission_uuid, requirements):
"points_possible": assessments[0].points_possible, "points_possible": assessments[0].points_possible,
} }
def create_assessment( def create_assessment(
submission_uuid, submission_uuid,
scorer_id, scorer_id,
must_grade,
must_be_graded_by,
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
scored_at=None): scored_at=None):
...@@ -114,10 +115,6 @@ def create_assessment( ...@@ -114,10 +115,6 @@ def create_assessment(
Submission model. Submission model.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
must_grade (int): The number of assessments
required for the student to receive a score for their submission.
must_be_graded_by (int): The number of assessments
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback. assessment contains points_earned, points_possible, and feedback.
...@@ -141,11 +138,10 @@ def create_assessment( ...@@ -141,11 +138,10 @@ def create_assessment(
>>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"}, >>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"},
>>> feedback="Your submission was thrilling.", >>> feedback="Your submission was thrilling.",
>>> ) >>> )
>>> create_assessment("1", "Tim", 3, 2, assessment_dict, rubric_dict) >>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
""" """
try: try:
submission = Submission.objects.get(uuid=submission_uuid) submission = Submission.objects.get(uuid=submission_uuid)
student_item = submission.student_item
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
# Validate that the selected options matched the rubric # Validate that the selected options matched the rubric
...@@ -156,19 +152,6 @@ def create_assessment( ...@@ -156,19 +152,6 @@ def create_assessment(
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message) msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
# Check if the grader has even submitted an answer themselves...
try:
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
except StudentItem.DoesNotExist:
raise PeerAssessmentWorkflowError(
_("You must make a submission before assessing another student")
)
feedback = assessment_dict.get('feedback', u'') feedback = assessment_dict.get('feedback', u'')
peer_assessment = { peer_assessment = {
"rubric": rubric.id, "rubric": rubric.id,
...@@ -186,49 +169,48 @@ def create_assessment( ...@@ -186,49 +169,48 @@ def create_assessment(
if not peer_serializer.is_valid(): if not peer_serializer.is_valid():
raise PeerAssessmentRequestError(peer_serializer.errors) raise PeerAssessmentRequestError(peer_serializer.errors)
peer_serializer.save() assessment = peer_serializer.save()
student_item = submission.student_item
student_item_dict = StudentItemSerializer(student_item).data
try:
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
except StudentItem.DoesNotExist:
raise PeerAssessmentWorkflowError(_(
"You must make a submission before assessing another student."))
scorer_item_dict = StudentItemSerializer(scorer_item).data
scorer_workflow = _get_latest_workflow(scorer_item_dict)
workflow = _get_latest_workflow(student_item_dict)
if not scorer_workflow:
raise PeerAssessmentWorkflowError(_(
"You must make a submission before assessing another student."))
if not workflow:
raise PeerAssessmentWorkflowError(_(
"The submission you reviewed is not in the peer workflow. This "
"assessment cannot be submitted unless the associated "
"submission came from the peer workflow."))
# Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment)
return peer_serializer.data return peer_serializer.data
except DatabaseError: except DatabaseError:
error_message = u"An error occurred while creating assessment {} for submission: {} by: {}".format( error_message = _(
assessment_dict, u"An error occurred while creating assessment {} for submission: "
submission_uuid, u"{} by: {}"
scorer_id .format(assessment_dict, submission_uuid, scorer_id)
) )
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def _score_if_finished(student_item,
submission,
required_assessments_for_student,
must_be_graded_by):
"""Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer assessment workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
"""
if Score.objects.filter(student_item=student_item):
return
finished_evaluating = has_finished_required_evaluating(
StudentItemSerializer(student_item).data,
required_assessments_for_student
)
assessments = Assessment.objects.filter(submission=submission, score_type=PEER_TYPE)
submission_finished = assessments.count() >= must_be_graded_by
if finished_evaluating and submission_finished:
submission_api.set_score(
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
sum(get_assessment_median_scores(submission.uuid, must_be_graded_by).values()),
assessments[0].points_possible
)
def get_assessment_median_scores(submission_id, must_be_graded_by): def get_assessment_median_scores(submission_id, must_be_graded_by):
"""Get the median score for each rubric criterion """Get the median score for each rubric criterion
...@@ -266,7 +248,7 @@ def get_assessment_median_scores(submission_id, must_be_graded_by): ...@@ -266,7 +248,7 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
scores = Assessment.scores_by_criterion(submission, must_be_graded_by) scores = Assessment.scores_by_criterion(submission, must_be_graded_by)
return Assessment.get_median_score_dict(scores) return Assessment.get_median_score_dict(scores)
except DatabaseError: except DatabaseError:
error_message = ( error_message = _(
u"Error getting assessment median scores {}".format(submission_id) u"Error getting assessment median scores {}".format(submission_id)
) )
logger.exception(error_message) logger.exception(error_message)
...@@ -311,22 +293,13 @@ def has_finished_required_evaluating(student_item_dict, required_assessments): ...@@ -311,22 +293,13 @@ def has_finished_required_evaluating(student_item_dict, required_assessments):
True, 3 True, 3
""" """
if required_assessments < 0: workflow = _get_latest_workflow(student_item_dict)
raise PeerAssessmentRequestError( done = False
"Required Assessment count must be a positive integer.") count = 0
student_items = StudentItem.objects.filter( if workflow:
item_id=student_item_dict["item_id"], done = _check_student_done_grading(workflow, required_assessments)
course_id=student_item_dict["course_id"] count = workflow.items.all().count()
) return done, count
submissions = Submission.objects.filter(
student_item__in=student_items
)
count = Assessment.objects.filter(
submission__in=submissions,
scorer_id=student_item_dict["student_id"],
score_type=PEER_TYPE
).count()
return count >= required_assessments, count
def get_assessments(submission_id): def get_assessments(submission_id):
...@@ -374,21 +347,26 @@ def get_assessments(submission_id): ...@@ -374,21 +347,26 @@ def get_assessments(submission_id):
submission = Submission.objects.get(uuid=submission_id) submission = Submission.objects.get(uuid=submission_id)
return get_assessment_review(submission) return get_assessment_review(submission)
except DatabaseError: except DatabaseError:
error_message = ( error_message = _(
u"Error getting assessments for submission {}".format(submission_id) u"Error getting assessments for submission {}".format(submission_id)
) )
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def get_submission_to_assess(student_item_dict, required_num_assessments): def get_submission_to_assess(
student_item_dict,
graded_by,
over_grading=False):
"""Get a submission to peer evaluate. """Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student_item. This will Retrieves a submission for assessment for the given student_item. This will
not return a submission submitted by the requesting scorer. The submission not return a submission submitted by the requesting scorer. Submissions are
returned (TODO: will be) is based on a priority queue. Submissions with the returned based on how many assessments are still required, and if there are
fewest assessments and the most active students will be prioritized over peers actively assessing a particular submission. If there are no
submissions from students who are not as active in the assessment process. submissions requiring assessment, a submission may be returned that will be
'over graded', and the assessment will not be counted towards the overall
grade.
Args: Args:
student_item_dict (dict): The student item information from the student student_item_dict (dict): The student item information from the student
...@@ -396,8 +374,12 @@ def get_submission_to_assess(student_item_dict, required_num_assessments): ...@@ -396,8 +374,12 @@ def get_submission_to_assess(student_item_dict, required_num_assessments):
item_id, course_id, and item_type, used to identify the unique item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly question for the review, while the student_id is used to explicitly
avoid giving the student their own submission. avoid giving the student their own submission.
required_num_assessments (int): The number of assessments a submission graded_by (int): The number of assessments a submission
requires before it has completed the peer assessment process. requires before it has completed the peer assessment process.
over_grading (bool): Allows over grading to be performed if no submission
requires assessments. Over grading should only occur if the deadline
for submissions has passed, but there is still a window for peer
assessment. Defaults to False.
Returns: Returns:
dict: A peer submission for assessment. This contains a 'student_item', dict: A peer submission for assessment. This contains a 'student_item',
...@@ -407,8 +389,11 @@ def get_submission_to_assess(student_item_dict, required_num_assessments): ...@@ -407,8 +389,11 @@ def get_submission_to_assess(student_item_dict, required_num_assessments):
Raises: Raises:
PeerAssessmentRequestError: Raised when the request parameters are PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request. invalid for the request.
PeerAssessmentInternalError: PeerAssessmentInternalError: Raised when there is an internal error
PeerAssessmentWorkflowError: retrieving peer workflow information.
PeerAssessmentWorkflowError: Raised when an error occurs because this
function, or the student item, is not in the proper workflow state
to retrieve a peer submission.
Examples: Examples:
>>> student_item_dict = dict( >>> student_item_dict = dict(
...@@ -426,37 +411,447 @@ def get_submission_to_assess(student_item_dict, required_num_assessments): ...@@ -426,37 +411,447 @@ def get_submission_to_assess(student_item_dict, required_num_assessments):
'answer': u'The answer is 42.' 'answer': u'The answer is 42.'
} }
"""
workflow = _get_latest_workflow(student_item_dict)
if not workflow:
raise PeerAssessmentWorkflowError(_(
u"A Peer Assessment Workflow does not exist for the specified "
u"student."))
submission_uuid = _find_active_assessments(workflow)
# If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise, if over grading
# is turned on, get the first submission available for over grading.
if submission_uuid is None:
submission_uuid = _get_submission_for_review(workflow, graded_by)
if submission_uuid is None and over_grading:
submission_uuid = _get_submission_for_over_grading(workflow)
if submission_uuid:
try:
submission = Submission.objects.get(uuid=submission_uuid)
_create_peer_workflow_item(workflow, submission_uuid)
return SubmissionSerializer(submission).data
except Submission.DoesNotExist:
error_message = _(
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
.format(submission_uuid, student_item_dict)
)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
return None
def create_peer_workflow(submission_uuid):
"""Create a new peer workflow for a student item and submission.
Creates a unique peer workflow for a student item, associated with a
submission.
Args:
submission_uuid (str): The submission associated with this workflow.
Returns:
Workflow (PeerWorkflow): A PeerWorkflow item created based on the given
student item and submission.
Raises:
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
Examples:
>>> create_peer_workflow("1")
"""
try:
submission = Submission.objects.get(uuid=submission_uuid)
workflow = PeerWorkflow.objects.get_or_create(
student_id=submission.student_item.student_id,
course_id=submission.student_item.course_id,
item_id=submission.student_item.item_id,
submission_uuid=submission_uuid
)
return workflow
except DatabaseError:
error_message = _(
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def _get_latest_workflow(student_item_dict):
"""Given a student item, return the current workflow for this student.
Given a student item, get the most recent workflow for the student.
TODO: API doesn't take in current submission; do we pass that in, or get
the latest workflow item? Currently using "latest".
Args:
student_item_dict (dict): Dictionary representation of a student item.
The most recent workflow associated with this student item is
returned.
Returns:
workflow (PeerWorkflow): The most recent peer workflow associated with
this student item.
Raises:
PeerAssessmentWorkflowError: Thrown when no workflow can be found for
the associated student item. This should always exist before a
student is allow to request submissions for peer assessment.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
""" """
student_items = StudentItem.objects.filter( try:
course_id=student_item_dict["course_id"], workflows = PeerWorkflow.objects.filter(
student_id=student_item_dict["student_id"],
item_id=student_item_dict["item_id"], item_id=student_item_dict["item_id"],
).exclude(student_id=student_item_dict["student_id"]) course_id=student_item_dict["course_id"]
).order_by("-created_at", "-id")
return workflows[0] if workflows else None
except DatabaseError:
error_message = _(
u"Error finding workflow for student {}. Workflow must be created "
u"for student before beginning peer assessment."
.format(student_item_dict)
)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
def _create_peer_workflow_item(workflow, submission_uuid):
"""Create a new peer workflow for a student item and submission.
Creates a unique peer workflow for a student item, associated with a
submission.
Args:
workflow (PeerWorkflow): The peer workflow associated with the scorer.
submission_uuid (str): The submission associated with this workflow.
Raises:
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> _create_peer_workflow_item(workflow, "1")
"""
try:
workflow_item, __ = PeerWorkflowItem.objects.get_or_create(
scorer_id=workflow,
submission_uuid=submission_uuid
)
return workflow_item
except DatabaseError:
error_message = _(
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}".format(workflow)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def _find_active_assessments(workflow):
"""Given a student item, return an active assessment if one is found.
Before retrieving a new submission for a peer assessor, check to see if that
assessor already has a submission out for assessment. If an unfinished
assessment is found that has not expired, return the associated submission.
TODO: If a user begins an assessment, then resubmits, this will never find
the unfinished assessment. Is this OK?
Args:
workflow (PeerWorkflow): See if there is an associated active assessment
for this PeerWorkflow.
Returns:
submission_uuid (str): The submission_uuid for the submission that the
student has open for active assessment.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> _find_active_assessments(student_item_dict)
"1"
submission = _get_first_submission_not_evaluated( """
student_items, workflows = workflow.items.filter(
student_item_dict["student_id"], assessment=-1,
required_num_assessments started_at__gt=datetime.utcnow().replace(tzinfo=UTC) - TIME_LIMIT
) )
if not submission: return workflows[0].submission_uuid if workflows else None
raise PeerAssessmentWorkflowError(
"There are no submissions available for assessment."
def _get_submission_for_review(workflow, graded_by, over_grading=False):
"""Get the next submission for peer assessment
Find a submission for peer assessment. This function will find the next
submission that requires assessment, excluding any submission that has been
completely graded, or is actively being reviewed by other students.
Args:
workflow (PeerWorkflow): Used to determine the next submission to get
for peer assessment. Iterates over all workflows that have the same
course_id and item_id as the student_item_dict, excluding any
workflow which has the same student_id.
Returns:
submission_uuid (str): The submission_uuid for the submission to review.
Raises:
PeerAssessmentInternalError: Raised when there is an error retrieving
the workflows or workflow items for this request.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> _find_active_assessments(student_item_dict)
"1"
"""
order = " having count(1) < %s order by pw.created_at, pw.id "
timeout = (datetime.utcnow().replace(tzinfo=UTC) - TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
sub = _get_next_submission(
order,
workflow,
workflow.item_id,
workflow.course_id,
workflow.student_id,
timeout,
graded_by
) )
return SubmissionSerializer(submission).data return sub
def _get_submission_for_over_grading(workflow):
"""Retrieve the next submission uuid for over grading
Gets the next submission uuid for over grading in peer assessment.
Specifically, this will construct a query that:
1) selects all the peer workflows for the current course and item,
excluding the current student
2) checks all the assessments associated with those workflows, excluding
the current student's assessments, and any workflows connected to them.
3) checks to see if any unfinished assessments are expired
4) Groups all the workflows with their collective assessments
5) Orders them but their total assessments
6) Returns the workflow with the fewest assessments.
def _get_first_submission_not_evaluated(student_items, student_id, required_num_assessments): """
# TODO: We need a priority queue. order = " order by c, pw.created_at, pw.id "
submissions = Submission.objects.filter(student_item__in=student_items).order_by( timeout = (datetime.utcnow().replace(tzinfo=UTC) - TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
"submitted_at", return _get_next_submission(
"-attempt_number" order,
workflow,
workflow.item_id,
workflow.course_id,
workflow.student_id,
timeout
) )
for submission in submissions:
assessments = Assessment.objects.filter(submission=submission, score_type=PEER_TYPE)
if assessments.count() < required_num_assessments: def _get_next_submission(order, workflow, *args):
already_evaluated = False """Constructs a raw SQL query for over grading or general peer review
for assessment in assessments:
already_evaluated = already_evaluated or assessment.scorer_id == student_id Refactored function for retrieving the first submission that meets the
if not already_evaluated: criteria of the query, which is altered based on the parameters passed
return submission into the function.
For example, for a general peer assessment query, the following would be
the generated SQL query:
select pw.id, pw.submission_uuid , count(pwi.id) as c
from assessment_peerworkflow pw
left join assessment_peerworkflowitem pwi
on pw.submission_uuid=pwi.submission_uuid
where pw.item_id='item_one'
and pw.course_id='Demo_Course'
and pw.student_id<>'Buffy1'
and pw.submission_uuid<>'bc164f09-eb14-4b1d-9ba8-bb2c1c924fba'
and pw.submission_uuid<>'7c5e7db4-e82d-45e1-8fda-79c5deaa16d5'
and pw.submission_uuid<>'9ba64ff5-f18e-4794-b45b-cee26248a0a0'
and pw.submission_uuid<>'cdd6cf7a-2787-43ec-8d31-62fdb14d4e09'
and pw.submission_uuid<>'ebc7d4e1-1577-4443-ab58-2caad9a10837'
and (pwi.scorer_id_id is NULL or pwi.assessment<>-1 or pwi.started_at > '2014-03-04 20:09:04')
group by pw.submission_uuid having count(1) < 3
order by pw.created_at, pw.id
limit 1;
Args:
order (str): A piece of the query that is unique to over grading or
general peer review. This is inserted in the otherwise identical
query.
workflow (PeerWorkflow): The workflow associated with the student
requesting a submission for peer assessment. Used to parametrize
the query.
Returns:
A submission uuid for the submission that should be peer assessed.
"""
try:
exclude = ""
for item in workflow.items.all():
exclude += "and pw.submission_uuid<>'{}' ".format(item.submission_uuid)
raw_query = (
"select pw.id, pw.submission_uuid, pwi.scorer_id_id, count(pwi.id) as c "
"from assessment_peerworkflow pw "
"left join assessment_peerworkflowitem pwi "
"on pw.submission_uuid=pwi.submission_uuid "
"where pw.item_id=%s "
"and pw.course_id=%s "
"and pw.student_id<>%s "
"{} "
" and (pwi.scorer_id_id is NULL or pwi.assessment<>-1 or pwi.started_at > %s) "
"group by pw.submission_uuid "
"{} "
"limit 1; "
)
query = raw_query.format(exclude, order)
peer_workflows = PeerWorkflow.objects.raw(query, args)
if len(list(peer_workflows)) == 0:
return None
return peer_workflows[0].submission_uuid
except DatabaseError:
error_message = _(
u"An internal error occurred while retrieving a peer submission "
u"for student {}".format(workflow)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def _assessors_count(peer_workflow):
return PeerWorkflowItem.objects.filter(
~Q(assessment=-1) |
Q(assessment=-1, started_at__gt=datetime.utcnow().replace(tzinfo=UTC) - TIME_LIMIT),
submission_uuid=peer_workflow.submission_uuid).count()
def _close_active_assessment(workflow, submission_uuid, assessment):
"""Associate the work item with a complete assessment.
Updates a workflow item on the student's workflow with the associated
assessment. When a workflow item has an assessment, it is considered
finished.
Args:
workflow (PeerWorkflow): The scorer's workflow
submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> assessment = Assessment.objects.all()[0]
>>> _close_active_assessment(workflow, "1", assessment)
"""
try:
item = workflow.items.get(submission_uuid=submission_uuid)
item.assessment = assessment.id
item.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = _(
u"An internal error occurred while retrieving a workflow item for "
u"student {}. Workflow Items are created when submissions are "
u"pulled for assessment."
.format(workflow.student_id)
)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
def _check_student_done_grading(workflow, must_grade):
"""Checks if the student has graded enough peers.
Determines if the student has graded enough peers.
Args:
workflow (PeerWorkflow): The workflow associated with the current
student.
must_grade (int): The number of submissions the student has to peer
assess before they are finished.
Returns:
True if the student is done peer assessing, False if not.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> _check_student_done_grading(workflow, 3)
True
"""
return workflow.items.all().exclude(assessment=-1).count() >= must_grade
def _check_submission_graded(submission_uuid, must_be_graded_by):
"""Checks to see if the submission has enough grades.
Determine if the given submission has enough grades.
Args:
submission_uuid (str): The submission we want to check to see if it has
been graded by enough peers.
must_be_graded_by (int): The number of peer assessments there should be.
Returns:
True if the submission is finished, False if not.
Examples:
>>> _check_submission_graded("1", 3)
True
"""
return PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid
).exclude(assessment=-1).count() >= must_be_graded_by
...@@ -10,7 +10,7 @@ from mock import patch ...@@ -10,7 +10,7 @@ from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from openassessment.assessment.models import Assessment from openassessment.assessment.models import Assessment, PeerWorkflow, PeerWorkflowItem
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
from submissions.models import Submission from submissions.models import Submission
...@@ -112,12 +112,12 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -112,12 +112,12 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt @ddt
class TestPeerApi(TestCase): class TestPeerApi(TestCase):
def test_create_assessment(self): def test_create_assessment(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
submission["uuid"], sub["uuid"],
STUDENT_ITEM["student_id"], bob["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
) )
...@@ -127,41 +127,41 @@ class TestPeerApi(TestCase): ...@@ -127,41 +127,41 @@ class TestPeerApi(TestCase):
@file_data('valid_assessments.json') @file_data('valid_assessments.json')
def test_get_assessments(self, assessment_dict): def test_get_assessments(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], sub["uuid"],
STUDENT_ITEM["student_id"], bob["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
) )
assessments = peer_api.get_assessments(submission["uuid"]) assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
@file_data('valid_assessments.json') @file_data('valid_assessments.json')
def test_get_assessments_with_date(self, assessment_dict): def test_get_assessments_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], sub["uuid"],
STUDENT_ITEM["student_id"], bob["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY MONDAY
) )
assessments = peer_api.get_assessments(submission["uuid"]) assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
self.assertEqual(assessments[0]["scored_at"], MONDAY) self.assertEqual(assessments[0]["scored_at"], MONDAY)
def test_peer_assessment_workflow(self): def test_peer_assessment_workflow(self):
tim = self._create_student_and_submission("Tim", "Tim's answer") tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sally = self._create_student_and_submission("Sally", "Sally's answer") sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
jim = self._create_student_and_submission("Jim", "Jim's answer") jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
buffy = self._create_student_and_submission("Buffy", "Buffy's answer") self._create_student_and_submission("Buffy", "Buffy's answer")
xander = self._create_student_and_submission("Xander", "Xander's answer") self._create_student_and_submission("Xander", "Xander's answer")
# Tim should not have a score, because he has not evaluated enough # Tim should not have a score, because he has not evaluated enough
# peer submissions. # peer submissions.
...@@ -173,62 +173,347 @@ class TestPeerApi(TestCase): ...@@ -173,62 +173,347 @@ class TestPeerApi(TestCase):
} }
# score = sub_api.get_score(STUDENT_ITEM) # score = sub_api.get_score(STUDENT_ITEM)
score = workflow_api.get_workflow_for_submission( score = workflow_api.get_workflow_for_submission(
tim["uuid"], requirements tim_sub["uuid"], requirements
)["score"] )["score"]
self.assertIsNone(score) self.assertIsNone(score)
self.assertEquals((False, 0), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED)) for i in range(5):
self.assertEquals((False, i), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(tim, REQUIRED_GRADED)
peer_api.create_assessment( peer_api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have
# enough assessments.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertEquals((False, 2), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT
) )
self.assertEquals((False, 3), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT
) )
self.assertEquals((False, 4), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
# Tim has met the critera, and should now be complete.
requirements = {
'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY
}
self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements))
def test_complex_peer_assessment_workflow(self):
"""
Intended to mimic a more complicated scenario where people do not
necessarily always finish their assessments.
1) Angel submits
2) Angel waits for Peer Assessments
3) Bob submits and pulls Angel's submission but never reviews it.
4) Sally submits
5) Sally pulls Angel's Submission but never reviews it.
6) Jim submits
7) Jim also doesn't care about Angel and does not bother to review.
8) Buffy comes along and she submits
9) Buffy cares about Angel, but she won't get Angel's submission;
it's held by Bob, Sally, and Jim.
10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
11) Xander comes along and submits.
12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
lazy and doesn't grade Buffy when her submission comes along.
13) Buffy is waiting in the wings. She pulls Xander's submission and
grades it.
14) Spike submits.
15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
16) Buffy reviews Spike
17) Willow comes along and submits
18) Willow goes to grade, and should get Xander
19) Xander comes back and gets Buffy's submission, and grades it.
20) Buffy should now have a grade.
"""
# Buffy should not have a score, because she has not evaluated enough
# peer submissions.
requirements = {
"peer": {
"must_grade": REQUIRED_GRADED,
"must_be_graded_by": REQUIRED_GRADED_BY,
}
}
# 1) Angel Submits
angel_sub, angel = self._create_student_and_submission("Angel", "Angel's answer")
# 2) Angel waits for peers
sub = peer_api.get_submission_to_assess(angel, REQUIRED_GRADED_BY)
self.assertIsNone(sub)
# 3) Bob submits
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 4) Sally submits
sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
# 5) Sally pulls Angel's Submission but never reviews it.
sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 6) Jim submits
jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
# 7) Jim also doesn't care about Angel and does not bother to review.
sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 8) Buffy comes along and she submits
buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
# 9) Buffy cares about Angel, but she won't get Angel's submission;
# it's held by Bob, Sally, and Jim.
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"])
# 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
peer_api.create_assessment( peer_api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED)) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertIsNone(sub)
# Tim should not have a score, because his submission does not have # 11) Xander comes along and submits.
# enough assessments. xander_sub, xander = self._create_student_and_submission("Xander", "Xander's answer")
score = workflow_api.get_workflow_for_submission(
tim["uuid"], requirements
)["score"]
self.assertIsNone(score)
# 12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
# lazy and doesn't grade Buffy when her submission comes along.
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
) )
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT_FAIL, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
) )
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT_PASS, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
) )
# Tim has met the critera, and should now have a score. # Tim has met the critera, and should now have a score.
# We patch the call to `self_api.is_complete()` simulate having completed a self-assessment. # We patch the call to `self_api.is_complete()` simulate having completed a self-assessment.
with patch('openassessment.workflow.models.self_api.is_complete') as mock_complete: with patch('openassessment.workflow.models.self_api.is_complete') as mock_complete:
mock_complete.return_value = True mock_complete.return_value = True
score = workflow_api.get_workflow_for_submission(tim["uuid"], requirements)["score"] score = workflow_api.get_workflow_for_submission(sub["uuid"], requirements)["score"]
self.assertEqual(score["points_earned"], 6) # 13) Buffy is waiting in the wings. She pulls Xander's submission and
self.assertEqual(score["points_possible"], 14) # grades it.
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
# 14) Spike submits.
spike_sub, spike = self._create_student_and_submission("Spike", "Spike's answer")
@raises(peer_api.PeerAssessmentRequestError) # 15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
def test_bad_configuration(self): sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
peer_api.has_finished_required_evaluating(STUDENT_ITEM, -1) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
# 16) Buffy reviews Spike
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(spike_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
# 17) Willow comes along and submits
willow_sub, willow = self._create_student_and_submission("Willow", "Willow's answer")
# 18) Willow goes to grade, and should get Buffy
sub = peer_api.get_submission_to_assess(willow, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
# 19) Xander comes back and gets Buffy's submission, and grades it.
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT
)
# 20) Buffy should now have a grade.
requirements = {
'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY
}
self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements))
def test_find_active_assessments(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Check for a workflow for Buffy.
buffy_workflow = peer_api._get_latest_workflow(buffy)
self.assertIsNotNone(buffy_workflow)
# Check to see if Buffy is actively reviewing Xander's submission.
# She isn't so we should get back no uuid.
submission_uuid = peer_api._find_active_assessments(buffy_workflow)
self.assertIsNone(submission_uuid)
# Buffy is going to review Xander's submission, so create a workflow
# item for Buffy.
peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
# Check to see if Buffy is still actively reviewing Xander's submission.
submission_uuid = peer_api._find_active_assessments(buffy_workflow)
self.assertEqual(xander_answer["uuid"], submission_uuid)
def test_get_latest_workflow(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer")
buffy_answer_two, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
workflow = peer_api._get_latest_workflow(buffy)
self.assertNotEqual(buffy_answer["uuid"], workflow.submission_uuid)
self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid)
def test_get_submission_for_review(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = peer_api._get_latest_workflow(buffy)
# Get the next submission for review
submission_uuid = peer_api._get_submission_for_review(buffy_workflow, 3)
self.assertEqual(xander_answer["uuid"], submission_uuid)
def test_get_submission_for_over_grading(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
willow_answer, willow = self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = peer_api._get_latest_workflow(buffy)
xander_workflow = peer_api._get_latest_workflow(xander)
willow_workflow = peer_api._get_latest_workflow(willow)
# Get a bunch of workflow items opened up.
peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
peer_api._create_peer_workflow_item(willow_workflow, xander_answer["uuid"])
peer_api._create_peer_workflow_item(xander_workflow, xander_answer["uuid"])
peer_api._create_peer_workflow_item(buffy_workflow, willow_answer["uuid"])
peer_api._create_peer_workflow_item(xander_workflow, willow_answer["uuid"])
#Get the next submission for review
submission_uuid = peer_api._get_submission_for_over_grading(xander_workflow)
self.assertEqual(buffy_answer["uuid"], submission_uuid)
def test_close_active_assessment(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Create a workflow for Buffy.
buffy_workflow = peer_api._get_latest_workflow(buffy)
# Get a workflow item opened up.
submission = peer_api.get_submission_to_assess(buffy, 3)
self.assertEqual(xander_answer["uuid"], submission["uuid"])
assessment_dict = peer_api.create_assessment(
xander_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT
)
assessment = Assessment.objects.filter(
scorer_id=assessment_dict["scorer_id"],
scored_at=assessment_dict["scored_at"])[0]
peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment)
item = peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
self.assertEqual(xander_answer["uuid"], submission["uuid"])
self.assertIsNotNone(item.assessment)
@patch.object(PeerWorkflow.objects, 'raw')
@raises(peer_api.PeerAssessmentInternalError)
def test_failure_to_get_review_submission(self, mock_filter):
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
tim_workflow = peer_api._get_latest_workflow(tim)
mock_filter.side_effect = DatabaseError("Oh no.")
peer_api._get_submission_for_review(tim_workflow, 3)
@patch.object(PeerWorkflow.objects, 'filter')
@raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api._get_latest_workflow(tim)
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
def test_create_workflow_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
@patch.object(PeerWorkflowItem.objects, 'get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
def test_create_workflow_item_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api._create_peer_workflow_item(tim, "5")
def test_get_submission_to_evaluate(self): def test_get_submission_to_evaluate(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
...@@ -244,23 +529,23 @@ class TestPeerApi(TestCase): ...@@ -244,23 +529,23 @@ class TestPeerApi(TestCase):
self.assertEqual(submission["student_item"], 2) self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1) self.assertEqual(submission["attempt_number"], 1)
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self): def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.get_submission_to_assess(STUDENT_ITEM, 3) submission = peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNone(submission)
@patch.object(Assessment.objects, 'filter') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_median_score_db_error(self, mock_filter): def test_median_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
tim = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"], 3) peer_api.get_assessment_median_scores(tim["uuid"], 3)
@patch.object(Assessment.objects, 'filter') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter): def test_get_assessments_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
tim = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessments(tim["uuid"]) peer_api.get_assessments(tim["uuid"])
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
...@@ -268,11 +553,10 @@ class TestPeerApi(TestCase): ...@@ -268,11 +553,10 @@ class TestPeerApi(TestCase):
def test_error_on_assessment_creation(self, mock_filter): def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_peer_workflow(submission["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY MONDAY
...@@ -281,18 +565,18 @@ class TestPeerApi(TestCase): ...@@ -281,18 +565,18 @@ class TestPeerApi(TestCase):
@patch.object(Assessment.objects, 'filter') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter): def test_error_on_get_assessment(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], sub["uuid"],
STUDENT_ITEM["student_id"], bob["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY MONDAY
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(submission["uuid"]) peer_api.get_assessments(sub["uuid"])
def test_choose_score(self): def test_choose_score(self):
self.assertEqual(0, Assessment.get_median_score([])) self.assertEqual(0, Assessment.get_median_score([]))
...@@ -313,8 +597,6 @@ class TestPeerApi(TestCase): ...@@ -313,8 +597,6 @@ class TestPeerApi(TestCase):
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
"another_student", "another_student",
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY MONDAY
...@@ -325,5 +607,6 @@ class TestPeerApi(TestCase): ...@@ -325,5 +607,6 @@ class TestPeerApi(TestCase):
new_student_item = STUDENT_ITEM.copy() new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date) submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"]) workflow_api.create_workflow(submission["uuid"])
return submission return submission, new_student_item
...@@ -4,7 +4,6 @@ assessment process. The submission state is not explicitly tracked because ...@@ -4,7 +4,6 @@ assessment process. The submission state is not explicitly tracked because
the assessment workflow only begins after a submission has been created. the assessment workflow only begins after a submission has been created.
""" """
from django.db import models from django.db import models
from django.utils.timezone import now
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
...@@ -82,6 +81,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -82,6 +81,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
else: else:
# Default starting status is peer # Default starting status is peer
new_status = self.STATUS.peer new_status = self.STATUS.peer
peer_api.create_peer_workflow(self.submission_uuid)
# If we're at least waiting, let's check if we have a peer score and # If we're at least waiting, let's check if we have a peer score and
# can move all the way to done # can move all the way to done
......
...@@ -3,6 +3,7 @@ from django.db import DatabaseError ...@@ -3,6 +3,7 @@ from django.db import DatabaseError
from django.test import TestCase from django.test import TestCase
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.assessment import peer_api
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission from submissions.models import Submission
......
...@@ -67,16 +67,15 @@ class PeerAssessmentMixin(object): ...@@ -67,16 +67,15 @@ class PeerAssessmentMixin(object):
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
data["submission_uuid"], data["submission_uuid"],
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
int(assessment_ui_model["must_grade"]),
int(assessment_ui_model["must_be_graded_by"]),
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
) )
except PeerAssessmentRequestError as ex: except PeerAssessmentRequestError as ex:
return {'success': False, 'msg': ex.message} return {'success': False, 'msg': ex.message}
except PeerAssessmentInternalError as ex: except PeerAssessmentInternalError as ex:
logger.exception() msg = _("Internal error occurred while creating the assessment")
return {'success': False, 'msg': _("Internal error occurred while creating the assessment")} logger.exception(msg)
return {'success': False, 'msg': msg}
# Update both the workflow that the submission we're assessing # Update both the workflow that the submission we're assessing
# belongs to, as well as our own (e.g. have we evaluated enough?) # belongs to, as well as our own (e.g. have we evaluated enough?)
......
...@@ -5,7 +5,6 @@ Tests for grade handlers in Open Assessment XBlock. ...@@ -5,7 +5,6 @@ Tests for grade handlers in Open Assessment XBlock.
import copy import copy
import json import json
from openassessment.assessment import peer_api, self_api from openassessment.assessment import peer_api, self_api
from submissions import api as sub_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -30,27 +29,33 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -30,27 +29,33 @@ class TestGrade(XBlockHandlerTestCase):
# Create a submission from the user # Create a submission from the user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION) submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.get_workflow_info()
scorer_submissions = [] scorer_submissions = []
for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS): for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS):
# Create a submission for each scorer # Create a submission for each scorer
scorer = copy.deepcopy(student_item) scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, self.SUBMISSION) scorer_sub = xblock.create_submission(scorer, self.SUBMISSION)
xblock.get_workflow_info()
submission = peer_api.get_submission_to_assess(scorer, 2)
# Store the scorer's submission so our user can assess it later # Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub) scorer_submissions.append(scorer_sub)
# Create an assessment of the user's submission # Create an assessment of the user's submission
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], scorer_name, 2, 2, submission['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria} assessment, {'criteria': xblock.rubric_criteria}
) )
# Since xblock.create_submission sets the xblock's submission_uuid,
# we need to set it back to the proper user for this test.
xblock.submission_uuid = submission["uuid"]
# Have our user make assessments (so she can get a score) # Have our user make assessments (so she can get a score)
for scorer_sub in scorer_submissions: for _ in range(2):
new_submission = peer_api.get_submission_to_assess(student_item, 2)
peer_api.create_assessment( peer_api.create_assessment(
scorer_sub['uuid'], 'Greggs', 2, 2, new_submission['uuid'], 'Greggs',
self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria} self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria}
) )
......
...@@ -26,12 +26,16 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -26,12 +26,16 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user # Create a submission for this problem from another user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally' student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.get_workflow_info()
# Create a submission for the scorer (required before assessing another student) # Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item) another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob" another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION) xblock.create_submission(another_student, self.SUBMISSION)
xblock.get_workflow_info()
peer_api.get_submission_to_assess(another_student, 3)
# Submit an assessment and expect a successful response # Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
......
...@@ -350,6 +350,9 @@ def set_score(submission_uuid, score, points_possible): ...@@ -350,6 +350,9 @@ def set_score(submission_uuid, score, points_possible):
externally to the API. externally to the API.
Args: Args:
student_item (dict): The student item associated with this score. This
dictionary must contain a course_id, student_id, and item_id.
submission_uuid (str): The submission associated with this score.
submission_uuid (str): UUID for the submission (must exist). submission_uuid (str): UUID for the submission (must exist).
score (int): The score to associate with the given submission and score (int): The score to associate with the given submission and
student item. student item.
...@@ -384,7 +387,7 @@ def set_score(submission_uuid, score, points_possible): ...@@ -384,7 +387,7 @@ def set_score(submission_uuid, score, points_possible):
) )
except DatabaseError: except DatabaseError:
error_msg = u"Could not retrieve student item: {} or submission {}.".format( error_msg = u"Could not retrieve student item: {} or submission {}.".format(
student_item, submission submission_uuid
) )
logger.exception(error_msg) logger.exception(error_msg)
raise SubmissionRequestError(error_msg) raise SubmissionRequestError(error_msg)
......
...@@ -85,7 +85,7 @@ class Submission(models.Model): ...@@ -85,7 +85,7 @@ class Submission(models.Model):
)) ))
class Meta: class Meta:
ordering = ["-submitted_at"] ordering = ["-submitted_at", "-id"]
class Score(models.Model): class Score(models.Model):
......
...@@ -9,7 +9,7 @@ from mock import patch ...@@ -9,7 +9,7 @@ from mock import patch
import pytz import pytz
from submissions import api as api from submissions import api as api
from submissions.models import Submission from submissions.models import Submission, StudentItem
from submissions.serializers import StudentItemSerializer from submissions.serializers import StudentItemSerializer
STUDENT_ITEM = dict( STUDENT_ITEM = dict(
...@@ -39,7 +39,8 @@ class TestSubmissionsApi(TestCase): ...@@ -39,7 +39,8 @@ class TestSubmissionsApi(TestCase):
def test_create_submission(self): def test_create_submission(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1) student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
def test_get_submission_by_uuid(self): def test_get_submission_by_uuid(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
...@@ -57,8 +58,9 @@ class TestSubmissionsApi(TestCase): ...@@ -57,8 +58,9 @@ class TestSubmissionsApi(TestCase):
api.create_submission(STUDENT_ITEM, ANSWER_TWO) api.create_submission(STUDENT_ITEM, ANSWER_TWO)
submissions = api.get_submissions(STUDENT_ITEM) submissions = api.get_submissions(STUDENT_ITEM)
self._assert_submission(submissions[1], ANSWER_ONE, 1, 1) student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_TWO, 1, 2) self._assert_submission(submissions[1], ANSWER_ONE, student_item.pk, 1)
self._assert_submission(submissions[0], ANSWER_TWO, student_item.pk, 2)
def test_get_submission(self): def test_get_submission(self):
# Test base case that we can create a submission and get it back # Test base case that we can create a submission and get it back
...@@ -85,24 +87,28 @@ class TestSubmissionsApi(TestCase): ...@@ -85,24 +87,28 @@ class TestSubmissionsApi(TestCase):
mock_get.side_effect = DatabaseError("Kaboom!") mock_get.side_effect = DatabaseError("Kaboom!")
api.get_submission("000000000000000") api.get_submission("000000000000000")
def test_two_students(self): def test_two_students(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO) api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)
submissions = api.get_submissions(STUDENT_ITEM) submissions = api.get_submissions(STUDENT_ITEM)
self.assertEqual(1, len(submissions)) self.assertEqual(1, len(submissions))
self._assert_submission(submissions[0], ANSWER_ONE, 1, 1) student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, student_item.pk, 1)
submissions = api.get_submissions(SECOND_STUDENT_ITEM) submissions = api.get_submissions(SECOND_STUDENT_ITEM)
self.assertEqual(1, len(submissions)) self.assertEqual(1, len(submissions))
self._assert_submission(submissions[0], ANSWER_TWO, 2, 1) student_item = self._get_student_item(SECOND_STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_TWO, student_item.pk, 1)
@file_data('test_valid_student_items.json') @file_data('test_valid_student_items.json')
def test_various_student_items(self, valid_student_item): def test_various_student_items(self, valid_student_item):
api.create_submission(valid_student_item, ANSWER_ONE) api.create_submission(valid_student_item, ANSWER_ONE)
student_item = self._get_student_item(valid_student_item)
submission = api.get_submissions(valid_student_item)[0] submission = api.get_submissions(valid_student_item)[0]
self._assert_submission(submission, ANSWER_ONE, 1, 1) self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
def test_get_latest_submission(self): def test_get_latest_submission(self):
past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC) past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
...@@ -120,7 +126,8 @@ class TestSubmissionsApi(TestCase): ...@@ -120,7 +126,8 @@ class TestSubmissionsApi(TestCase):
def test_set_attempt_number(self): def test_set_attempt_number(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2) api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2)
submissions = api.get_submissions(STUDENT_ITEM) submissions = api.get_submissions(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, 1, 2) student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, student_item.pk, 2)
@raises(api.SubmissionRequestError) @raises(api.SubmissionRequestError)
@file_data('test_bad_student_items.json') @file_data('test_bad_student_items.json')
...@@ -155,13 +162,21 @@ class TestSubmissionsApi(TestCase): ...@@ -155,13 +162,21 @@ class TestSubmissionsApi(TestCase):
self.assertEqual(submission["student_item"], expected_item) self.assertEqual(submission["student_item"], expected_item)
self.assertEqual(submission["attempt_number"], expected_attempt) self.assertEqual(submission["attempt_number"], expected_attempt)
def _get_student_item(self, student_item):
return StudentItem.objects.get(
student_id=student_item["student_id"],
course_id=student_item["course_id"],
item_id=student_item["item_id"]
)
""" """
Testing Scores Testing Scores
""" """
def test_create_score(self): def test_create_score(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1) student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
score = api.set_score(submission["uuid"], 11, 12) score = api.set_score(submission["uuid"], 11, 12)
self._assert_score(score, 11, 12) self._assert_score(score, 11, 12)
......
...@@ -76,9 +76,9 @@ STATIC_URL = '/static/' ...@@ -76,9 +76,9 @@ STATIC_URL = '/static/'
# Additional locations of static files # Additional locations of static files
STATICFILES_DIRS = ( STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static". # Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows. # Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths. # Don't forget to use absolute paths, not relative paths.
) )
# List of finder classes that know how to find static files in # List of finder classes that know how to find static files in
...@@ -86,7 +86,7 @@ STATICFILES_DIRS = ( ...@@ -86,7 +86,7 @@ STATICFILES_DIRS = (
STATICFILES_FINDERS = ( STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
) )
# Make this unique, and don't share it with anybody. # Make this unique, and don't share it with anybody.
...@@ -176,4 +176,4 @@ LOGGING = { ...@@ -176,4 +176,4 @@ LOGGING = {
# TODO: add config for XBLOCK_WORKBENCH { SCENARIO_CLASSES } # TODO: add config for XBLOCK_WORKBENCH { SCENARIO_CLASSES }
WORKBENCH = { WORKBENCH = {
'reset_state_on_restart': False, 'reset_state_on_restart': False,
} }
...@@ -18,7 +18,7 @@ NOSE_ARGS = [ ...@@ -18,7 +18,7 @@ NOSE_ARGS = [
'--cover-package=' + ",".join(TEST_APPS), '--cover-package=' + ",".join(TEST_APPS),
'--cover-branches', '--cover-branches',
'--cover-erase', '--cover-erase',
] ]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment