Commit a7293f32 by Will Daly

Merge pull request #92 from edx/will/self-assessment-workflow

Check for self-assessment completion
parents e10f5914 42833184
...@@ -144,6 +144,16 @@ def get_submission_and_assessment(student_item_dict): ...@@ -144,6 +144,16 @@ def get_submission_and_assessment(student_item_dict):
return (submissions[0], None) return (submissions[0], None)
# TODO: fill in this stub
def is_complete(submission_uuid): def is_complete(submission_uuid):
return True """
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
Returns:
bool
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission__uuid=submission_uuid
).exists()
...@@ -217,9 +217,11 @@ class TestPeerApi(TestCase): ...@@ -217,9 +217,11 @@ class TestPeerApi(TestCase):
) )
# Tim has met the critera, and should now have a score. # Tim has met the critera, and should now have a score.
score = workflow_api.get_workflow_for_submission( # We patch the call to `self_api.is_complete()` simulate having completed a self-assessment.
tim["uuid"], requirements with patch('openassessment.workflow.models.self_api.is_complete') as mock_complete:
)["score"] mock_complete.return_value = True
score = workflow_api.get_workflow_for_submission(tim["uuid"], requirements)["score"]
self.assertEqual(score["points_earned"], 6) self.assertEqual(score["points_earned"], 6)
self.assertEqual(score["points_possible"], 14) self.assertEqual(score["points_possible"], 14)
......
...@@ -9,7 +9,7 @@ import pytz ...@@ -9,7 +9,7 @@ import pytz
from django.test import TestCase from django.test import TestCase
from submissions.api import create_submission from submissions.api import create_submission
from openassessment.assessment.self_api import ( from openassessment.assessment.self_api import (
create_assessment, get_submission_and_assessment, create_assessment, get_submission_and_assessment, is_complete,
SelfAssessmentRequestError SelfAssessmentRequestError
) )
...@@ -62,6 +62,7 @@ class TestSelfApi(TestCase): ...@@ -62,6 +62,7 @@ class TestSelfApi(TestCase):
received_submission, assessment = get_submission_and_assessment(self.STUDENT_ITEM) received_submission, assessment = get_submission_and_assessment(self.STUDENT_ITEM)
self.assertItemsEqual(received_submission, submission) self.assertItemsEqual(received_submission, submission)
self.assertIs(assessment, None) self.assertIs(assessment, None)
self.assertFalse(is_complete(submission['uuid']))
# Create a self-assessment for the submission # Create a self-assessment for the submission
assessment = create_assessment( assessment = create_assessment(
...@@ -70,6 +71,9 @@ class TestSelfApi(TestCase): ...@@ -70,6 +71,9 @@ class TestSelfApi(TestCase):
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
# Self-assessment should be complete
self.assertTrue(is_complete(submission['uuid']))
# Retrieve the self-assessment # Retrieve the self-assessment
received_submission, retrieved = get_submission_and_assessment(self.STUDENT_ITEM) received_submission, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
self.assertItemsEqual(received_submission, submission) self.assertItemsEqual(received_submission, submission)
...@@ -194,3 +198,7 @@ class TestSelfApi(TestCase): ...@@ -194,3 +198,7 @@ class TestSelfApi(TestCase):
# Expect that we still have the original assessment # Expect that we still have the original assessment
_, retrieved = get_submission_and_assessment(self.STUDENT_ITEM) _, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
self.assertItemsEqual(assessment, retrieved) self.assertItemsEqual(assessment, retrieved)
def test_is_complete_no_submission(self):
# This submission uuid does not exist
self.assertFalse(is_complete('abc1234'))
\ No newline at end of file
...@@ -4,7 +4,7 @@ Tests for grade handlers in Open Assessment XBlock. ...@@ -4,7 +4,7 @@ Tests for grade handlers in Open Assessment XBlock.
""" """
import copy import copy
import json import json
from openassessment.assessment import peer_api from openassessment.assessment import peer_api, self_api
from submissions import api as sub_api from submissions import api as sub_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -48,12 +48,19 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -48,12 +48,19 @@ class TestGrade(XBlockHandlerTestCase):
) )
# Have our user make assessments (so she can get a score) # Have our user make assessments (so she can get a score)
for submission in scorer_submissions: for scorer_sub in scorer_submissions:
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], 'Greggs', 2, 2, scorer_sub['uuid'], 'Greggs', 2, 2,
self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria} self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria}
) )
# Have the user submit a self-assessment (so she can get a score)
self_api.create_assessment(
submission['uuid'], 'Greggs',
self.ASSESSMENTS[0]['options_selected'],
{'criteria': xblock.rubric_criteria}
)
# Render the view # Render the view
resp = self.request(xblock, 'render_grade', json.dumps(dict())) resp = self.request(xblock, 'render_grade', json.dumps(dict()))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment