Commit 0b717bd5 by Stephen Sanchez

First pass at creating the Peer Workflow appropriately.

Wiring all the peer queue into place

Updating based on rebase to fix tests
parent 6bf500c3
from django.contrib import admin
from openassessment.assessment.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption
from openassessment.assessment.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption, PeerWorkflow, PeerWorkflowItem
admin.site.register(Assessment)
admin.site.register(AssessmentPart)
admin.site.register(Rubric)
admin.site.register(Criterion)
admin.site.register(CriterionOption)
admin.site.register(PeerWorkflow)
admin.site.register(PeerWorkflowItem)
......@@ -224,7 +224,7 @@ class Assessment(models.Model):
feedback = models.TextField(max_length=10000, default="", blank=True)
class Meta:
ordering = ["-scored_at"]
ordering = ["-scored_at", "-id"]
@property
def points_earned(self):
......@@ -365,3 +365,67 @@ class AssessmentPart(models.Model):
@property
def points_possible(self):
return self.option.criterion.points_possible
class PeerWorkflow(models.Model):
"""Internal Model for tracking Peer Assessment Workflow
This model can be used to determine the following information required
throughout the Peer Assessment Workflow:
1) Get next submission that requires assessment.
2) Does a submission have enough assessments?
3) Has a student completed enough assessments?
4) Does a student already have a submission open for assessment?
5) Close open assessments when completed.
6) Should 'over grading' be allowed for a submission?
The student item is the author of the submission. Peer Workflow Items are
created for each assessment made by this student.
"""
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=40, db_index=True)
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True)
class Meta:
ordering = ["created_at", "id"]
def __repr__(self):
return (
"PeerWorkflow(student_id={0.student_id}, item_id={0.item_id}, "
"course_id={0.course_id}, submission_uuid={0.submission_uuid})"
"created_at={0.created_at}"
).format(self)
def __unicode__(self):
return repr(self)
class PeerWorkflowItem(models.Model):
"""Represents an assessment associated with a particular workflow
Created every time a submission is requested for peer assessment. The
associated workflow represents the scorer of the given submission, and the
assessment represents the completed assessment for this work item.
"""
scorer_id = models.ForeignKey(PeerWorkflow, related_name='items')
submission_uuid = models.CharField(max_length=128, db_index=True)
started_at = models.DateTimeField(default=now, db_index=True)
assessment = models.IntegerField(default=-1)
class Meta:
ordering = ["started_at", "id"]
def __repr__(self):
return (
"PeerWorkflowItem(scorer_id={0.scorer_id}, "
"submission_uuid={0.submission_uuid}, "
"started_at={0.started_at}, assessment={0.assessment})"
).format(self)
def __unicode__(self):
return repr(self)
......@@ -4,7 +4,6 @@ assessment process. The submission state is not explicitly tracked because
the assessment workflow only begins after a submission has been created.
"""
from django.db import models
from django.utils.timezone import now
from django_extensions.db.fields import UUIDField
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
......@@ -82,6 +81,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
else:
# Default starting status is peer
new_status = self.STATUS.peer
peer_api.create_peer_workflow(self.submission_uuid)
# If we're at least waiting, let's check if we have a peer score and
# can move all the way to done
......
......@@ -3,6 +3,7 @@ from django.db import DatabaseError
from django.test import TestCase
from mock import patch
from nose.tools import raises
from openassessment.assessment import peer_api
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
......
......@@ -67,16 +67,15 @@ class PeerAssessmentMixin(object):
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment_ui_model["must_grade"]),
int(assessment_ui_model["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
except PeerAssessmentRequestError as ex:
return {'success': False, 'msg': ex.message}
except PeerAssessmentInternalError as ex:
logger.exception()
return {'success': False, 'msg': _("Internal error occurred while creating the assessment")}
msg = _("Internal error occurred while creating the assessment")
logger.exception(msg)
return {'success': False, 'msg': msg}
# Update both the workflow that the submission we're assessing
# belongs to, as well as our own (e.g. have we evaluated enough?)
......
......@@ -5,7 +5,6 @@ Tests for grade handlers in Open Assessment XBlock.
import copy
import json
from openassessment.assessment import peer_api, self_api
from submissions import api as sub_api
from .base import XBlockHandlerTestCase, scenario
......@@ -30,27 +29,33 @@ class TestGrade(XBlockHandlerTestCase):
# Create a submission from the user
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.get_workflow_info()
scorer_submissions = []
for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS):
# Create a submission for each scorer
scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, self.SUBMISSION)
scorer_sub = xblock.create_submission(scorer, self.SUBMISSION)
xblock.get_workflow_info()
submission = peer_api.get_submission_to_assess(scorer, 2)
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
# Create an assessment of the user's submission
peer_api.create_assessment(
submission['uuid'], scorer_name, 2, 2,
submission['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria}
)
# Since xblock.create_submission sets the xblock's submission_uuid,
# we need to set it back to the proper user for this test.
xblock.submission_uuid = submission["uuid"]
# Have our user make assessments (so she can get a score)
for scorer_sub in scorer_submissions:
for _ in range(2):
new_submission = peer_api.get_submission_to_assess(student_item, 2)
peer_api.create_assessment(
scorer_sub['uuid'], 'Greggs', 2, 2,
new_submission['uuid'], 'Greggs',
self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria}
)
......
......@@ -26,12 +26,16 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.get_workflow_info()
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
xblock.get_workflow_info()
peer_api.get_submission_to_assess(another_student, 3)
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT)
......
......@@ -350,6 +350,9 @@ def set_score(submission_uuid, score, points_possible):
externally to the API.
Args:
student_item (dict): The student item associated with this score. This
dictionary must contain a course_id, student_id, and item_id.
submission_uuid (str): The submission associated with this score.
submission_uuid (str): UUID for the submission (must exist).
score (int): The score to associate with the given submission and
student item.
......@@ -384,7 +387,7 @@ def set_score(submission_uuid, score, points_possible):
)
except DatabaseError:
error_msg = u"Could not retrieve student item: {} or submission {}.".format(
student_item, submission
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(error_msg)
......
......@@ -85,7 +85,7 @@ class Submission(models.Model):
))
class Meta:
ordering = ["-submitted_at"]
ordering = ["-submitted_at", "-id"]
class Score(models.Model):
......
......@@ -9,7 +9,7 @@ from mock import patch
import pytz
from submissions import api as api
from submissions.models import Submission
from submissions.models import Submission, StudentItem
from submissions.serializers import StudentItemSerializer
STUDENT_ITEM = dict(
......@@ -39,7 +39,8 @@ class TestSubmissionsApi(TestCase):
def test_create_submission(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1)
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
def test_get_submission_by_uuid(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
......@@ -57,8 +58,9 @@ class TestSubmissionsApi(TestCase):
api.create_submission(STUDENT_ITEM, ANSWER_TWO)
submissions = api.get_submissions(STUDENT_ITEM)
self._assert_submission(submissions[1], ANSWER_ONE, 1, 1)
self._assert_submission(submissions[0], ANSWER_TWO, 1, 2)
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submissions[1], ANSWER_ONE, student_item.pk, 1)
self._assert_submission(submissions[0], ANSWER_TWO, student_item.pk, 2)
def test_get_submission(self):
# Test base case that we can create a submission and get it back
......@@ -85,24 +87,28 @@ class TestSubmissionsApi(TestCase):
mock_get.side_effect = DatabaseError("Kaboom!")
api.get_submission("000000000000000")
def test_two_students(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)
submissions = api.get_submissions(STUDENT_ITEM)
self.assertEqual(1, len(submissions))
self._assert_submission(submissions[0], ANSWER_ONE, 1, 1)
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, student_item.pk, 1)
submissions = api.get_submissions(SECOND_STUDENT_ITEM)
self.assertEqual(1, len(submissions))
self._assert_submission(submissions[0], ANSWER_TWO, 2, 1)
student_item = self._get_student_item(SECOND_STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_TWO, student_item.pk, 1)
@file_data('test_valid_student_items.json')
def test_various_student_items(self, valid_student_item):
api.create_submission(valid_student_item, ANSWER_ONE)
student_item = self._get_student_item(valid_student_item)
submission = api.get_submissions(valid_student_item)[0]
self._assert_submission(submission, ANSWER_ONE, 1, 1)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
def test_get_latest_submission(self):
past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
......@@ -120,7 +126,8 @@ class TestSubmissionsApi(TestCase):
def test_set_attempt_number(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2)
submissions = api.get_submissions(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, 1, 2)
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, student_item.pk, 2)
@raises(api.SubmissionRequestError)
@file_data('test_bad_student_items.json')
......@@ -155,13 +162,21 @@ class TestSubmissionsApi(TestCase):
self.assertEqual(submission["student_item"], expected_item)
self.assertEqual(submission["attempt_number"], expected_attempt)
def _get_student_item(self, student_item):
return StudentItem.objects.get(
student_id=student_item["student_id"],
course_id=student_item["course_id"],
item_id=student_item["item_id"]
)
"""
Testing Scores
"""
def test_create_score(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1)
student_item = self._get_student_item(STUDENT_ITEM)
self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
score = api.set_score(submission["uuid"], 11, 12)
self._assert_score(score, 11, 12)
......
......@@ -76,9 +76,9 @@ STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
......@@ -86,7 +86,7 @@ STATICFILES_DIRS = (
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
......@@ -169,11 +169,11 @@ LOGGING = {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
},
}
}
# TODO: add config for XBLOCK_WORKBENCH { SCENARIO_CLASSES }
WORKBENCH = {
'reset_state_on_restart': False,
}
}
......@@ -18,7 +18,7 @@ NOSE_ARGS = [
'--cover-package=' + ",".join(TEST_APPS),
'--cover-branches',
'--cover-erase',
]
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment