Commit e830d852 by David Ormsbee

Introduces a basic workflow API for overall assessment state.

[TIM-202]
parent 32466299
...@@ -10,7 +10,6 @@ before_script: ...@@ -10,7 +10,6 @@ before_script:
- "pip install coveralls" - "pip install coveralls"
script: script:
- "python manage.py test" - "python manage.py test"
- "python manage.py harvest"
- "python setup.py install" - "python setup.py install"
after_success: after_success:
coveralls coveralls
...@@ -65,6 +65,35 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -65,6 +65,35 @@ class PeerAssessmentInternalError(PeerAssessmentError):
pass pass
def is_complete(submission_uuid, requirements):
submission = Submission.objects.get(uuid=submission_uuid)
finished_evaluating, _count = has_finished_required_evaluating(
StudentItemSerializer(submission.student_item).data,
requirements["must_grade"]
)
return finished_evaluating
def get_score(submission_uuid, requirements):
# User hasn't completed their own submission yet
if not is_complete(submission_uuid, requirements):
return None
submission = Submission.objects.get(uuid=submission_uuid)
assessments = Assessment.objects.filter(submission=submission)
submission_finished = assessments.count() >= requirements["must_be_graded_by"]
if not submission_finished:
return None
return {
"points_earned": sum(
get_assessment_median_scores(
submission.uuid, requirements["must_be_graded_by"]
).values()
),
"points_possible": assessments[0].points_possible,
}
def create_assessment( def create_assessment(
submission_uuid, submission_uuid,
scorer_id, scorer_id,
...@@ -114,7 +143,7 @@ def create_assessment( ...@@ -114,7 +143,7 @@ def create_assessment(
""" """
try: try:
submission = Submission.objects.get(uuid=submission_uuid) submission = Submission.objects.get(uuid=submission_uuid)
student_item = submission.student_item
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(assessment_dict["options_selected"]) option_ids = rubric.options_ids(assessment_dict["options_selected"])
...@@ -123,6 +152,19 @@ def create_assessment( ...@@ -123,6 +152,19 @@ def create_assessment(
if None in option_ids: if None in option_ids:
raise PeerAssessmentRequestError(_("Selected options do not match the rubric options.")) raise PeerAssessmentRequestError(_("Selected options do not match the rubric options."))
# Check if the grader has even submitted an answer themselves...
try:
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
except StudentItem.DoesNotExist:
raise PeerAssessmentWorkflowError(
_("You must make a submission before assessing another student")
)
feedback = assessment_dict.get('feedback', u'') feedback = assessment_dict.get('feedback', u'')
peer_assessment = { peer_assessment = {
"rubric": rubric.id, "rubric": rubric.id,
...@@ -142,44 +184,6 @@ def create_assessment( ...@@ -142,44 +184,6 @@ def create_assessment(
raise PeerAssessmentRequestError(peer_serializer.errors) raise PeerAssessmentRequestError(peer_serializer.errors)
peer_serializer.save() peer_serializer.save()
# Check if the submission is finished and its Author has graded enough.
student_item = submission.student_item
_score_if_finished(
student_item,
submission,
must_grade,
must_be_graded_by
)
# Check if the grader is finished and has enough assessments
try:
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
except StudentItem.DoesNotExist:
raise PeerAssessmentWorkflowError(_("You must make a submission before assessing another student"))
scorer_submissions = Submission.objects.filter(
student_item=scorer_item
).order_by("-attempt_number")
if len(scorer_submissions) > 0:
_score_if_finished(
scorer_item,
scorer_submissions[0],
must_grade,
must_be_graded_by
)
# Currently, this condition is unreachable, since the only way to create a StudentItem is to
# create a submission for that student. We check anyway just in case this invariant changes.
else:
raise PeerAssessmentWorkflowError(_("You must make at least one submission before assessing another student"))
return peer_serializer.data return peer_serializer.data
except DatabaseError: except DatabaseError:
error_message = u"An error occurred while creating assessment {} for submission: {} by: {}".format( error_message = u"An error occurred while creating assessment {} for submission: {} by: {}".format(
...@@ -191,36 +195,6 @@ def create_assessment( ...@@ -191,36 +195,6 @@ def create_assessment(
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def _score_if_finished(student_item,
submission,
required_assessments_for_student,
must_be_graded_by):
"""Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer assessment workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
"""
if Score.objects.filter(student_item=student_item):
return
finished_evaluating = has_finished_required_evaluating(
StudentItemSerializer(student_item).data,
required_assessments_for_student
)
assessments = Assessment.objects.filter(submission=submission)
submission_finished = assessments.count() >= must_be_graded_by
if finished_evaluating and submission_finished:
submission_api.set_score(
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
sum(get_assessment_median_scores(submission.uuid, must_be_graded_by).values()),
assessments[0].points_possible
)
def get_assessment_median_scores(submission_id, must_be_graded_by): def get_assessment_median_scores(submission_id, must_be_graded_by):
"""Get the median score for each rubric criterion """Get the median score for each rubric criterion
......
# Shameless stub since Will is writing this.
def is_complete(submission_uuid):
return True
\ No newline at end of file
...@@ -11,6 +11,7 @@ from nose.tools import raises ...@@ -11,6 +11,7 @@ from nose.tools import raises
from openassessment.peer import api as peer_api from openassessment.peer import api as peer_api
from openassessment.peer.models import Assessment from openassessment.peer.models import Assessment
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
from submissions.models import Submission from submissions.models import Submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
...@@ -109,7 +110,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -109,7 +110,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt @ddt
class TestApi(TestCase): class TestPeerApi(TestCase):
def test_create_assessment(self): def test_create_assessment(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
...@@ -164,8 +165,17 @@ class TestApi(TestCase): ...@@ -164,8 +165,17 @@ class TestApi(TestCase):
# Tim should not have a score, because he has not evaluated enough # Tim should not have a score, because he has not evaluated enough
# peer submissions. # peer submissions.
scores = sub_api.get_score(STUDENT_ITEM) requirements = {
self.assertFalse(scores) "peer": {
"must_grade": REQUIRED_GRADED,
"must_be_graded_by": REQUIRED_GRADED_BY,
}
}
# score = sub_api.get_score(STUDENT_ITEM)
score = workflow_api.get_workflow_for_submission(
tim["uuid"], requirements
)["score"]
self.assertIsNone(score)
self.assertEquals((False, 0), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED)) self.assertEquals((False, 0), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
peer_api.create_assessment( peer_api.create_assessment(
...@@ -191,8 +201,10 @@ class TestApi(TestCase): ...@@ -191,8 +201,10 @@ class TestApi(TestCase):
# Tim should not have a score, because his submission does not have # Tim should not have a score, because his submission does not have
# enough assessments. # enough assessments.
scores = sub_api.get_score(STUDENT_ITEM) score = workflow_api.get_workflow_for_submission(
self.assertFalse(scores) tim["uuid"], requirements
)["score"]
self.assertIsNone(score)
peer_api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
...@@ -205,10 +217,11 @@ class TestApi(TestCase): ...@@ -205,10 +217,11 @@ class TestApi(TestCase):
) )
# Tim has met the critera, and should now have a score. # Tim has met the critera, and should now have a score.
scores = sub_api.get_score(STUDENT_ITEM) score = workflow_api.get_workflow_for_submission(
self.assertTrue(scores) tim["uuid"], requirements
self.assertEqual(6, scores[0]["points_earned"]) )["score"]
self.assertEqual(14, scores[0]["points_possible"]) self.assertEqual(score["points_earned"], 6)
self.assertEqual(score["points_possible"], 14)
@raises(peer_api.PeerAssessmentRequestError) @raises(peer_api.PeerAssessmentRequestError)
...@@ -264,7 +277,7 @@ class TestApi(TestCase): ...@@ -264,7 +277,7 @@ class TestApi(TestCase):
) )
@patch.object(Assessment.objects, 'filter') @patch.object(Assessment.objects, 'filter')
@raises(sub_api.SubmissionInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter): def test_error_on_get_assessment(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_assessment( peer_api.create_assessment(
...@@ -309,4 +322,6 @@ class TestApi(TestCase): ...@@ -309,4 +322,6 @@ class TestApi(TestCase):
def _create_student_and_submission(student, answer, date=None): def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy() new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student new_student_item["student_id"] = student
return sub_api.create_submission(new_student_item, answer, date) submission = sub_api.create_submission(new_student_item, answer, date)
workflow_api.create_workflow(submission["uuid"])
return submission
from django.contrib import admin
from .models import AssessmentWorkflow
class AssessmentWorkflowAdmin(admin.ModelAdmin):
list_display = (
'uuid', 'status', 'status_changed', 'submission_uuid', 'score'
)
admin.site.register(AssessmentWorkflow, AssessmentWorkflowAdmin)
"""
Public interface for the Assessment Workflow.
"""
import copy
import logging
from django.db import DatabaseError
from submissions import api as sub_api
from .models import AssessmentWorkflow
from .serializers import AssessmentWorkflowSerializer
logger = logging.getLogger(__name__)
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
def create_workflow(submission_uuid):
"""Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against.
Args:
submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating.
Returns:
dict: Assessment workflow information containing the keys
`submission_uuid`, `uuid`, `status`, `created`, `modified`
Raises:
AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
not exist or is of an invalid type.
AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation
problem.
Examples:
>>> create_assessment_workflow('e12bd3ee-9fb0-11e3-9f68-040ccee02800')
{
'submission_uuid': u'e12bd3ee-9fb0-11e3-9f68-040ccee02800',
'uuid': u'e12ef27a-9fb0-11e3-aad4-040ccee02800',
'status': u'peer',
'created': datetime.datetime(2014, 2, 27, 13, 12, 59, 225359, tzinfo=<UTC>),
'modified': datetime.datetime(2014, 2, 27, 13, 12, 59, 225675, tzinfo=<UTC>)
}
"""
def sub_err_msg(specific_err_msg):
return (
u"Could not create assessment workflow: "
u"retrieving submission {} failed: {}"
.format(submission_uuid, specific_err_msg)
)
try:
submission_dict = sub_api.get_submission(submission_uuid)
except sub_api.SubmissionNotFoundError as err:
err_msg = sub_err_msg("submission not found")
logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg)
except sub_api.SubmissionRequestError as err:
err_msg = sub_err_msg(err)
logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg)
except sub_api.SubmissionInternalError as err:
err_msg = sub_err_msg(err)
logger.error(err)
raise AssessmentWorkflowInternalError(
u"retrieving submission {} failed with unknown error: {}"
.format(submission_uuid, err)
)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
try:
workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.peer
)
except DatabaseError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
return AssessmentWorkflowSerializer(workflow).data
def get_workflow_for_submission(submission_uuid, assessment_requirements):
"""Returns Assessment Workflow information
This will implicitly call `update_from_assessments()` to make sure we
give the most current information.
Args:
student_item_dict (dict):
submission_uuid (str):
Returns:
dict: Assessment workflow information containing the keys
`submission_uuid`, `uuid`, `status`, `created`, `modified`
Raises:
AssessmentWorkflowRequestError: If the `workflow_uuid` passed in is not
a string type.
AssessmentWorkflowNotFoundError: No assessment workflow matching the
requested UUID exists.
AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation
problem.
Examples:
>>> get_assessment_workflow('e12ef27a-9fb0-11e3-aad4-040ccee02800')
{
'submission_uuid': u'e12bd3ee-9fb0-11e3-9f68-040ccee02800',
'uuid': u'e12ef27a-9fb0-11e3-aad4-040ccee02800',
'status': u'peer',
'created': datetime.datetime(2014, 2, 27, 13, 12, 59, 225359, tzinfo=<UTC>),
'modified': datetime.datetime(2014, 2, 27, 13, 12, 59, 225675, tzinfo=<UTC>)
}
"""
return update_from_assessments(submission_uuid, assessment_requirements)
def update_from_assessments(submission_uuid, assessment_requirements):
workflow = _get_workflow_model(submission_uuid)
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
def _get_workflow_model(submission_uuid):
if not isinstance(submission_uuid, basestring):
raise AssessmentWorkflowRequestError("submission_uuid must be a string type")
try:
workflow = AssessmentWorkflow.objects.get(submission_uuid=submission_uuid)
except AssessmentWorkflow.DoesNotExist:
raise AssessmentWorkflowNotFoundError(
u"No assessment workflow matching submission_uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = (
"Could not get assessment workflow with submission_uuid {} due to error: {}"
.format(submission_uuid, exc)
)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
return workflow
def _serialized_with_details(workflow, assessment_requirements):
data_dict = AssessmentWorkflowSerializer(workflow).data
data_dict["status_details"] = workflow.status_details(assessment_requirements)
return data_dict
"""
Workflow models are intended to track which step the student is in during the
assessment process. The submission state is not explicitly tracked because
the assessment workflow only begins after a submission has been created.
"""
from django.db import models
from django.utils.timezone import now
from django_extensions.db.fields import UUIDField
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
from openassessment.peer import api as peer_api
from openassessment.peer import self_api
from submissions import api as sub_api
class AssessmentWorkflow(TimeStampedModel, StatusModel):
"""Tracks the open-ended assessment status of a student submission.
It's important to note that although we track the status as an explicit
field here, it is not the canonical status. This is because the
determination of what we need to do in order to be "done" is specified by
the OpenAssessmentBlock problem definition and can change. So every time
we are asked where the student is, we have to query the peer, self, and
later other assessment APIs with the latest requirements (e.g. "number of
submissions you have to assess = 5"). The "status" field on this model is
an after the fact recording of the last known state of that information so
we can search easily.
"""
STATUS = Choices( # implicit "status" field
"peer", # User needs to assess peer submissions
"self", # User needs to assess themselves
"waiting", # User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their
# submission by others.
"done", # Complete
)
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
uuid = UUIDField(version=1, db_index=True, unique=True)
class Meta:
ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status)
@property
def score(self):
return sub_api.get_latest_score_for_submission(self.submission_uuid)
def status_details(self, assessment_requirements):
return {
"peer": {
"complete": self._is_peer_complete(assessment_requirements),
},
"self": {
"complete": self._is_self_complete(),
},
}
def _is_peer_complete(self, assessment_requirements):
peer_requirements = assessment_requirements["peer"]
return peer_api.is_complete(self.submission_uuid, peer_requirements)
def _is_self_complete(self):
return self_api.is_complete(self.submission_uuid)
def update_from_assessments(self, assessment_requirements):
# If we're done, we're done -- it doesn't matter if requirements have
# changed because we've already written a score.
if self.status == self.STATUS.done:
return
# Have they completed the peer and self steps?
peer_complete = self._is_peer_complete(assessment_requirements)
self_complete = self._is_self_complete()
if peer_complete and self_complete:
# If they've completed both, they're at least waiting, possibly done
new_status = self.STATUS.waiting
elif peer_complete:
# If they haven't done self assessment yet, that's their status
new_status = self.STATUS.self
else:
# Default starting status is peer
new_status = self.STATUS.peer
# If we're at least waiting, let's check if we have a peer score and
# can move all the way to done
if new_status == self.STATUS.waiting:
score = peer_api.get_score(
self.submission_uuid, assessment_requirements["peer"]
)
if score:
sub_api.set_score(
self.submission_uuid,
score["points_earned"],
score["points_possible"]
)
new_status = self.STATUS.done
# Finally save our changes if the status has changed
if self.status != new_status:
self.status = new_status
self.save()
# Just here to record thoughts for later:
#
# class AssessmentWorkflowEvent(models.Model):
# workflow = models.ForeignKey(AssessmentWorkflow, related_name="events")
# app = models.CharField(max_length=50)
# event_type = models.CharField(max_length=255)
# event_data = models.TextField()
# description = models.TextField()
# created_at = models.DateTimeField(default=now, db_index=True)
"""
Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs.
"""
from rest_framework import serializers
from openassessment.workflow.models import AssessmentWorkflow
class AssessmentWorkflowSerializer(serializers.ModelSerializer):
score = serializers.Field(source='score')
class Meta:
model = AssessmentWorkflow
fields = (
'uuid',
'submission_uuid',
'status',
'created',
'modified',
# Computed
'score'
)
# Not implemented yet:
#
# class AssessmentWorkflowHistorySerializer(serializers.ModelSerializer):
# class Meta:
# model = AssessmentWorkflowHistory
# fields = (
# 'workflow',
# 'app',
# 'event_type',
# 'event_data',
# 'description',
# 'created_at'
# )
from django.db import DatabaseError
from django.test import TestCase
from mock import patch
from nose.tools import raises
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
import openassessment.workflow.api as workflow_api
import submissions.api as sub_api
ITEM_1 = {
"student_id": "Optimus Prime 001",
"item_id": "Matrix of Leadership",
"course_id": "Advanced Auto Mechanics 200",
"item_type": "openassessment",
}
REQUIREMENTS = {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3,
}
}
class TestAssessmentWorkflowApi(TestCase):
def test_create_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"])
workflow_keys = set(workflow.keys())
self.assertEqual(
workflow_keys,
{
'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
}
)
self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "peer")
workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], REQUIREMENTS
)
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
def test_need_valid_submission_uuid(self):
# submission doesn't exist
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow("xxxxxxxxxxx")
# submission_uuid is the wrong type
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow(123)
@patch.object(Submission.objects, 'get')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_submissions_errors_wrapped(self, mock_get):
mock_get.side_effect = Exception("Kaboom!")
workflow_api.create_workflow("zzzzzzzzzzzzzzz")
@patch.object(AssessmentWorkflow.objects, 'create')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"])
def test_get_assessment_workflow_expected_errors(self):
with self.assertRaises(workflow_api.AssessmentWorkflowNotFoundError):
workflow_api.get_workflow_for_submission("0000000000000", REQUIREMENTS)
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow_api.get_workflow_for_submission(123, REQUIREMENTS)
@patch.object(Submission.objects, 'get')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_get_errors_wrapped(self, mock_get):
mock_get.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"])
workflow_api.get_workflow_for_submission(workflow["uuid"], REQUIREMENTS)
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.peer.api import get_assessments from openassessment.peer import api as peer_api
from submissions.api import get_score from openassessment.workflow import api as workflow_api
class GradeMixin(object): class GradeMixin(object):
...@@ -17,23 +17,19 @@ class GradeMixin(object): ...@@ -17,23 +17,19 @@ class GradeMixin(object):
@XBlock.handler @XBlock.handler
def render_grade(self, data, suffix=''): def render_grade(self, data, suffix=''):
problem_open, date = self.is_open() problem_open, date = self.is_open()
workflowstate = "complete" # TODO hook in workflow. workflow = self.get_workflow_info()
context = {} context = {}
if workflowstate == "complete": if workflow.get('status') == "done":
path = 'openassessmentblock/grade/oa_grade_complete.html' path = 'openassessmentblock/grade/oa_grade_complete.html'
student_item = self.get_student_item_dict() context = {
scores = get_score(student_item) "score": workflow["score"],
if scores: "assessments": [
context = { assessment
"score": scores[0], for assessment in peer_api.get_assessments(self.submission_uuid)
"assessments": [], ],
} }
elif workflow.get('status') == "waiting":
# Look up assessment feedback path = 'openassessmentblock/grade/oa_grade_waiting.html'
for assessment in get_assessments(scores[0]['submission_uuid']):
context['assessments'].append(assessment)
else:
path = 'openassessmentblock/grade/oa_grade_waiting.html'
elif not problem_open and date == "due": elif not problem_open and date == "due":
path = 'openassessmentblock/grade/oa_grade_closed.html' path = 'openassessmentblock/grade/oa_grade_closed.html'
else: else:
......
...@@ -19,6 +19,8 @@ from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin ...@@ -19,6 +19,8 @@ from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import update_from_xml from openassessment.xblock.xml import update_from_xml
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
DEFAULT_PROMPT = """ DEFAULT_PROMPT = """
...@@ -146,7 +148,14 @@ def load(path): ...@@ -146,7 +148,14 @@ def load(path):
return data.decode("utf8") return data.decode("utf8")
class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAssessmentMixin, StudioMixin, GradeMixin): class OpenAssessmentBlock(
XBlock,
SubmissionMixin,
PeerAssessmentMixin,
SelfAssessmentMixin,
StudioMixin,
GradeMixin,
WorkflowMixin):
"""Displays a question and gives an area where students can compose a response.""" """Displays a question and gives an area where students can compose a response."""
start_datetime = String( start_datetime = String(
...@@ -190,6 +199,11 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -190,6 +199,11 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
scope=Scope.content, scope=Scope.content,
help="The course_id associated with this prompt (until we can get it from runtime).", help="The course_id associated with this prompt (until we can get it from runtime).",
) )
submission_uuid = String(
default=None,
scope=Scope.user_state,
help="The student's submission that others will be assessing."
)
saved_response = String( saved_response = String(
default=u"", default=u"",
...@@ -372,3 +386,13 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -372,3 +386,13 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
if due < datetime.datetime.utcnow(): if due < datetime.datetime.utcnow():
return False, "due" return False, "due"
return True, None return True, None
def update_workflow_status(self, submission_uuid):
assessment_ui_model = self.get_assessment_module('peer-assessment')
requirements = {
"peer": {
"must_grade": assessment_ui_model["must_grade"],
"must_be_graded_by": assessment_ui_model["must_be_graded_by"]
}
}
return workflow_api.update_from_assessments(submission_uuid, requirements)
...@@ -78,6 +78,11 @@ class PeerAssessmentMixin(object): ...@@ -78,6 +78,11 @@ class PeerAssessmentMixin(object):
logger.exception() logger.exception()
return {'success': False, 'msg': _("Internal error occurred while creating the assessment")} return {'success': False, 'msg': _("Internal error occurred while creating the assessment")}
# Update both the workflow that the submission we're assessing
# belongs to, as well as our own (e.g. have we evaluated enough?)
self.update_workflow_status(data["submission_uuid"])
self.update_workflow_status(self.submission_uuid)
# Temp kludge until we fix JSON serialization for datetime # Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"]) assessment["scored_at"] = str(assessment["scored_at"])
...@@ -142,15 +147,8 @@ class PeerAssessmentMixin(object): ...@@ -142,15 +147,8 @@ class PeerAssessmentMixin(object):
peer_submission = peer_api.get_submission_to_assess( peer_submission = peer_api.get_submission_to_assess(
student_item_dict, assessment["must_be_graded_by"] student_item_dict, assessment["must_be_graded_by"]
) )
except PeerAssessmentWorkflowError as err:
peer_submission = peer_api.get_submission_to_assess( logger.exception(err)
student_item_dict,
assessment["must_be_graded_by"]
)
except PeerAssessmentWorkflowError:
# TODO: Log?
pass
return peer_submission return peer_submission
def get_assessment_module(self, mixin_name): def get_assessment_module(self, mixin_name):
......
...@@ -2,6 +2,7 @@ from xblock.core import XBlock ...@@ -2,6 +2,7 @@ from xblock.core import XBlock
from submissions import api from submissions import api
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from openassessment.peer import api as peer_api from openassessment.peer import api as peer_api
from openassessment.workflow import api as workflow_api
class SubmissionMixin(object): class SubmissionMixin(object):
...@@ -54,16 +55,16 @@ class SubmissionMixin(object): ...@@ -54,16 +55,16 @@ class SubmissionMixin(object):
if not prev_sub: if not prev_sub:
status_tag = 'ENODATA' status_tag = 'ENODATA'
try: try:
response = api.create_submission(student_item_dict, student_sub) submission = self.create_submission(student_item_dict, student_sub)
except api.SubmissionRequestError, e: except api.SubmissionRequestError as err:
status_tag = 'EBADFORM' status_tag = 'EBADFORM'
status_text = unicode(e.field_errors) status_text = unicode(err.field_errors)
except api.SubmissionError: except (api.SubmissionError, workflow_api.AssessmentWorkflowError):
status_tag = 'EUNKNOWN' status_tag = 'EUNKNOWN'
else: else:
status = True status = True
status_tag = response.get('student_item') status_tag = submission.get('student_item')
status_text = response.get('attempt_number') status_text = submission.get('attempt_number')
# relies on success being orthogonal to errors # relies on success being orthogonal to errors
status_text = status_text if status_text else self.submit_errors[status_tag] status_text = status_text if status_text else self.submit_errors[status_tag]
...@@ -93,6 +94,12 @@ class SubmissionMixin(object): ...@@ -93,6 +94,12 @@ class SubmissionMixin(object):
else: else:
return {'success': False, 'msg': _(u"Missing required key 'submission'")} return {'success': False, 'msg': _(u"Missing required key 'submission'")}
def create_submission(self, student_item_dict, student_sub):
submission = api.create_submission(student_item_dict, student_sub)
workflow = workflow_api.create_workflow(submission["uuid"])
self.submission_uuid = submission["uuid"]
return submission
@staticmethod @staticmethod
def _get_submission_score(student_item_dict): def _get_submission_score(student_item_dict):
"""Return the most recent score, if any, for student item """Return the most recent score, if any, for student item
......
...@@ -4,8 +4,8 @@ Tests for grade handlers in Open Assessment XBlock. ...@@ -4,8 +4,8 @@ Tests for grade handlers in Open Assessment XBlock.
""" """
import copy import copy
import json import json
from submissions import api as submission_api
from openassessment.peer import api as peer_api from openassessment.peer import api as peer_api
from submissions import api as sub_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -29,14 +29,14 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -29,14 +29,14 @@ class TestGrade(XBlockHandlerTestCase):
# Create a submission from the user # Create a submission from the user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
submission = submission_api.create_submission(student_item, self.SUBMISSION) submission = xblock.create_submission(student_item, self.SUBMISSION)
scorer_submissions = [] scorer_submissions = []
for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS): for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS):
# Create a submission for each scorer # Create a submission for each scorer
scorer = copy.deepcopy(student_item) scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name scorer['student_id'] = scorer_name
scorer_sub = submission_api.create_submission(scorer, self.SUBMISSION) scorer_sub = sub_api.create_submission(scorer, self.SUBMISSION)
# Store the scorer's submission so our user can assess it later # Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub) scorer_submissions.append(scorer_sub)
......
...@@ -5,7 +5,6 @@ Tests for peer assessment handlers in Open Assessment XBlock. ...@@ -5,7 +5,6 @@ Tests for peer assessment handlers in Open Assessment XBlock.
import copy import copy
import json import json
from submissions import api as submission_api
from openassessment.peer import api as peer_api from openassessment.peer import api as peer_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -26,12 +25,12 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -26,12 +25,12 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user # Create a submission for this problem from another user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally' student_item['student_id'] = 'Sally'
submission = submission_api.create_submission(student_item, self.SUBMISSION) submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student) # Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item) another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob" another_student['student_id'] = "Bob"
submission_api.create_submission(another_student, self.SUBMISSION) xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment and expect a successful response # Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
...@@ -63,12 +62,12 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -63,12 +62,12 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for this problem from another user # Create a submission for this problem from another user
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally' student_item['student_id'] = 'Sally'
submission = submission_api.create_submission(student_item, self.SUBMISSION) submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student) # Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item) another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob" another_student['student_id'] = "Bob"
submission_api.create_submission(another_student, self.SUBMISSION) xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric # Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
......
from xblock.core import XBlock
from openassessment.workflow import api as workflow_api
class WorkflowMixin(object):
@XBlock.json_handler
def handle_workflow_info(self, data, suffix=''):
if not self.submission_uuid:
return None
return workflow_api.get_workflow_for_submission(
self.submission_uuid, self.workflow_requirements()
)
def workflow_requirements(self):
assessment_ui_model = self.get_assessment_module('peer-assessment')
return {
"peer": {
"must_grade": assessment_ui_model["must_grade"],
"must_be_graded_by": assessment_ui_model["must_be_graded_by"]
}
}
def get_workflow_info(self):
if not self.submission_uuid:
return {}
return workflow_api.get_workflow_for_submission(
self.submission_uuid, self.workflow_requirements()
)
...@@ -4,8 +4,8 @@ from submissions.models import Score, StudentItem, Submission ...@@ -4,8 +4,8 @@ from submissions.models import Score, StudentItem, Submission
class SubmissionAdmin(admin.ModelAdmin): class SubmissionAdmin(admin.ModelAdmin):
list_display = ( list_display = (
'student_item', 'attempt_number', 'submitted_at', 'created_at', 'answer', 'student_item', 'uuid', 'attempt_number', 'submitted_at', 'created_at',
'scores' 'answer', 'scores'
) )
def scores(self, obj): def scores(self, obj):
......
...@@ -141,6 +141,7 @@ def create_submission(student_item_dict, answer, submitted_at=None, ...@@ -141,6 +141,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
if not submission_serializer.is_valid(): if not submission_serializer.is_valid():
raise SubmissionRequestError(submission_serializer.errors) raise SubmissionRequestError(submission_serializer.errors)
submission_serializer.save() submission_serializer.save()
return submission_serializer.data return submission_serializer.data
except DatabaseError: except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format( error_message = u"An error occurred while creating submission {} for student item: {}".format(
...@@ -150,6 +151,46 @@ def create_submission(student_item_dict, answer, submitted_at=None, ...@@ -150,6 +151,46 @@ def create_submission(student_item_dict, answer, submitted_at=None,
logger.exception(error_message) logger.exception(error_message)
raise SubmissionInternalError(error_message) raise SubmissionInternalError(error_message)
def get_submission(submission_uuid):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, basestring):
raise SubmissionRequestError(
"submission_uuid ({!r}) must be a string type".format(submission_uuid)
)
try:
submission = Submission.objects.get(uuid=submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return SubmissionSerializer(submission).data
def get_submissions(student_item_dict, limit=None): def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item, """Retrieves the submissions for the specified student item,
...@@ -262,22 +303,31 @@ def get_score(student_item): ...@@ -262,22 +303,31 @@ def get_score(student_item):
return ScoreSerializer(scores, many=True).data return ScoreSerializer(scores, many=True).data
def get_latest_score_for_submission(submission_uuid):
try:
submission = Submission.objects.get(uuid=submission_uuid)
score = Score.objects.filter(submission=submission).order_by("-id")[0]
except IndexError:
return None
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
return ScoreSerializer(score).data
def get_scores(course_id, student_id, types=None): def get_scores(course_id, student_id, types=None):
pass pass
def set_score(student_item, submission, score, points_possible): def set_score(submission_uuid, score, points_possible):
"""Set a score for a particular student item, submission pair. """Set a score for a particular submission.
Sets the score for a particular student item and submission pair. This score Sets the score for a particular submission. This score is calculated
is calculated externally to the API. externally to the API.
Args: Args:
student_item (dict): The student item associated with this score. This submission_uuid (str): UUID for the submission (must exist).
dictionary must contain a course_id, student_id, and item_id.
submission (dict): The submission associated with this score. This
dictionary must contain all submission fields to properly get a
unique submission item.
score (int): The score to associate with the given submission and score (int): The score to associate with the given submission and
student item. student item.
points_possible (int): The total points possible for this particular points_possible (int): The total points possible for this particular
...@@ -293,21 +343,7 @@ def set_score(student_item, submission, score, points_possible): ...@@ -293,21 +343,7 @@ def set_score(student_item, submission, score, points_possible):
are not found. are not found.
Examples: Examples:
>>> student_item_dict = dict( >>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>>
>>> submission_dict = dict(
>>> student_item=2,
>>> attempt_number=1,
>>> submitted_at=datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
>>> created_at=datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
>>> answer=u'The answer is 42.'
>>> )
>>> set_score(student_item_dict, submission_dict, 11, 12)
{ {
'student_item': 2, 'student_item': 2,
'submission': 1, 'submission': 1,
...@@ -318,8 +354,11 @@ def set_score(student_item, submission, score, points_possible): ...@@ -318,8 +354,11 @@ def set_score(student_item, submission, score, points_possible):
""" """
try: try:
student_item_model = StudentItem.objects.get(**student_item) submission_model = Submission.objects.get(uuid=submission_uuid)
submission_model = Submission.objects.get(**submission) except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError: except DatabaseError:
error_msg = u"Could not retrieve student item: {} or submission {}.".format( error_msg = u"Could not retrieve student item: {} or submission {}.".format(
student_item, submission student_item, submission
...@@ -329,7 +368,7 @@ def set_score(student_item, submission, score, points_possible): ...@@ -329,7 +368,7 @@ def set_score(student_item, submission, score, points_possible):
score = ScoreSerializer( score = ScoreSerializer(
data={ data={
"student_item": student_item_model.pk, "student_item": submission_model.student_item.pk,
"submission": submission_model.pk, "submission": submission_model.pk,
"points_earned": score, "points_earned": score,
"points_possible": points_possible, "points_possible": points_possible,
......
...@@ -56,7 +56,7 @@ class Submission(models.Model): ...@@ -56,7 +56,7 @@ class Submission(models.Model):
because it makes caching trivial. because it makes caching trivial.
""" """
uuid = UUIDField() uuid = UUIDField(version=1, db_index=True)
student_item = models.ForeignKey(StudentItem) student_item = models.ForeignKey(StudentItem)
......
...@@ -31,7 +31,7 @@ ANSWER_TWO = u"this is my other answer!" ...@@ -31,7 +31,7 @@ ANSWER_TWO = u"this is my other answer!"
@ddt @ddt
class TestApi(TestCase): class TestSubmissionsApi(TestCase):
""" """
Testing Submissions Testing Submissions
...@@ -49,6 +49,31 @@ class TestApi(TestCase): ...@@ -49,6 +49,31 @@ class TestApi(TestCase):
self._assert_submission(submissions[1], ANSWER_ONE, 1, 1) self._assert_submission(submissions[1], ANSWER_ONE, 1, 1)
self._assert_submission(submissions[0], ANSWER_TWO, 1, 2) self._assert_submission(submissions[0], ANSWER_TWO, 1, 2)
def test_get_submission(self):
# Test base case that we can create a submission and get it back
sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_dict2 = api.get_submission(sub_dict1["uuid"])
self.assertEqual(sub_dict1, sub_dict2)
# Test invalid inputs
with self.assertRaises(api.SubmissionRequestError):
api.get_submission(20)
with self.assertRaises(api.SubmissionRequestError):
api.get_submission({})
# Test not found
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("not a real uuid")
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("0" * 50) # This is bigger than our field size
@patch.object(Submission.objects, 'get')
@raises(api.SubmissionInternalError)
def test_get_submission_deep_error(self, mock_get):
# Test deep explosions are wrapped
mock_get.side_effect = DatabaseError("Kaboom!")
api.get_submission("000000000000000")
def test_two_students(self): def test_two_students(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO) api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)
...@@ -127,12 +152,12 @@ class TestApi(TestCase): ...@@ -127,12 +152,12 @@ class TestApi(TestCase):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1) self._assert_submission(submission, ANSWER_ONE, 1, 1)
score = api.set_score(STUDENT_ITEM, submission, 11, 12) score = api.set_score(submission["uuid"], 11, 12)
self._assert_score(score, 11, 12) self._assert_score(score, 11, 12)
def test_get_score(self): def test_get_score(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.set_score(STUDENT_ITEM, submission, 11, 12) api.set_score(submission["uuid"], 11, 12)
scores = api.get_score(STUDENT_ITEM) scores = api.get_score(STUDENT_ITEM)
self._assert_score(scores[0], 11, 12) self._assert_score(scores[0], 11, 12)
self.assertEqual(scores[0]['submission_uuid'], submission['uuid']) self.assertEqual(scores[0]['submission_uuid'], submission['uuid'])
......
...@@ -6,6 +6,7 @@ git+https://github.com/ormsbee/xblock-sdk.git@4f62e508#egg=xblock-sdk ...@@ -6,6 +6,7 @@ git+https://github.com/ormsbee/xblock-sdk.git@4f62e508#egg=xblock-sdk
defusedxml==0.4.1 defusedxml==0.4.1
django==1.4.8 django==1.4.8
django-extensions==1.2.5 django-extensions==1.2.5
django-model-utils==1.4.0
djangorestframework==2.3.5 djangorestframework==2.3.5
Mako==0.9.1 Mako==0.9.1
python-dateutil==2.1 python-dateutil==2.1
......
...@@ -36,7 +36,7 @@ DATABASES = { ...@@ -36,7 +36,7 @@ DATABASES = {
# timezone as the operating system. # timezone as the operating system.
# If running in a Windows environment this must be set to the same as your # If running in a Windows environment this must be set to the same as your
# system time zone. # system time zone.
TIME_ZONE = 'America/Chicago' TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here: # Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html # http://www.i18nguy.com/unicode/language-identifiers.html
...@@ -139,8 +139,9 @@ INSTALLED_APPS = ( ...@@ -139,8 +139,9 @@ INSTALLED_APPS = (
# edx-tim apps # edx-tim apps
'submissions', 'submissions',
'openassessment.peer',
'openassessment', 'openassessment',
'openassessment.peer',
'openassessment.workflow',
) )
# A sample logging configuration. The only tangible logging # A sample logging configuration. The only tangible logging
......
...@@ -5,7 +5,12 @@ Test-specific Django settings. ...@@ -5,7 +5,12 @@ Test-specific Django settings.
# Inherit from base settings # Inherit from base settings
from .base import * from .base import *
TEST_APPS = ('openassessment.peer', 'submissions', "openassessment.xblock") TEST_APPS = (
'openassessment.peer',
'openassessment.workflow',
'openassessment.xblock',
'submissions',
)
# Configure nose # Configure nose
NOSE_ARGS = [ NOSE_ARGS = [
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment