Commit 00efb1f8 by Stephen Sanchez

Merge pull request #163 from edx/sanchez/TIM-277-track-scored-assessments

Setting additional metadata for completing a peer workflow
parents ad1a8f86 db615fab
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PeerWorkflow.completed_at'
db.add_column('assessment_peerworkflow', 'completed_at',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'PeerWorkflowItem.scored'
db.add_column('assessment_peerworkflowitem', 'scored',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PeerWorkflow.completed_at'
db.delete_column('assessment_peerworkflow', 'completed_at')
# Deleting field 'PeerWorkflowItem.scored'
db.delete_column('assessment_peerworkflowitem', 'scored')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'helpfulness': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
......@@ -307,7 +307,7 @@ class Assessment(models.Model):
return median_score
@classmethod
def scores_by_criterion(cls, submission_uuid, must_be_graded_by):
def scores_by_criterion(cls, assessments):
"""Create a dictionary of lists for scores associated with criterion
Create a key value in a dict with a list of values, for every criterion
......@@ -318,18 +318,17 @@ class Assessment(models.Model):
of scores.
Args:
submission_uuid (str): Obtain assessments associated with this submission.
must_be_graded_by (int): The number of assessments to include in this score analysis.
assessments (list): List of assessments to sort scores by their
associated criteria.
Examples:
>>> Assessment.scores_by_criterion('abcd', 3)
>>> assessments = Assessment.objects.all()
>>> Assessment.scores_by_criterion(assessments)
{
"foo": [1, 2, 3],
"bar": [6, 7, 8]
}
"""
assessments = cls.objects.filter(submission_uuid=submission_uuid).order_by("scored_at")[:must_be_graded_by]
scores = defaultdict(list)
for assessment in assessments:
for part in assessment.parts.all():
......@@ -401,6 +400,7 @@ class PeerWorkflow(models.Model):
course_id = models.CharField(max_length=40, db_index=True)
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True)
class Meta:
ordering = ["created_at", "id"]
......@@ -433,6 +433,18 @@ class PeerWorkflowItem(models.Model):
started_at = models.DateTimeField(default=now, db_index=True)
assessment = models.IntegerField(default=-1)
# This WorkflowItem was used to determine the final score for the Workflow.
scored = models.BooleanField(default=False)
@classmethod
def get_scored_assessments(cls, submission_uuid):
workflow_items = PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid, scored=True
)
return Assessment.objects.filter(
pk__in=[item.pk for item in workflow_items]
)
class Meta:
ordering = ["started_at", "id"]
......
......@@ -92,35 +92,22 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
return serializer.data
def get_submission_and_assessment(submission_uuid):
def get_assessment(submission_uuid):
"""
Retrieve a submission and self-assessment for a student item.
Retrieve a self-assessment for a submission_uuid.
Args:
submission_uuid (str): The submission uuid for we want information for
submission_uuid (str): The submission UUID for we want information for
regarding self assessment.
Returns:
A tuple `(submission, assessment)` where:
submission (dict) is a serialized Submission model, or None (if the user has not yet made a submission)
assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed)
assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed)
If multiple submissions or self-assessments are found, returns the most recent one.
Raises:
SelfAssessmentRequestError: Student item dict was invalid.
SelfAssessmentRequestError: submission_uuid was invalid.
"""
# Look up the most recent submission from the student item
try:
submission = get_submission(submission_uuid)
if not submission:
return (None, None)
except SubmissionNotFoundError:
return (None, None)
except SubmissionRequestError:
raise SelfAssessmentRequestError(_('Could not retrieve submission'))
# Retrieve assessments for the submission
# Retrieve assessments for the submission UUID
# We weakly enforce that number of self-assessments per submission is <= 1,
# but not at the database level. Someone could take advantage of the race condition
# between checking the number of self-assessments and creating a new self-assessment.
......@@ -131,9 +118,8 @@ def get_submission_and_assessment(submission_uuid):
if assessments.exists():
assessment_dict = full_assessment_dict(assessments[0])
return submission, assessment_dict
else:
return submission, None
return assessment_dict
return None
def is_complete(submission_uuid):
......
......@@ -8,8 +8,8 @@ from copy import deepcopy
from django.utils.translation import ugettext as _
from rest_framework import serializers
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart, Criterion, CriterionOption, Rubric
)
Assessment, AssessmentFeedback, AssessmentPart, Criterion, CriterionOption, Rubric,
PeerWorkflowItem, PeerWorkflow)
class InvalidRubric(Exception):
......@@ -132,61 +132,6 @@ class AssessmentSerializer(serializers.ModelSerializer):
)
def get_assessment_review(submission_uuid):
"""Get all information pertaining to an assessment for review.
Given an assessment serializer, return a serializable formatted model of
the assessment, all assessment parts, all criterion options, and the
associated rubric.
Args:
submission_uuid (str): The UUID of the submission whose assessment reviews we want to retrieve.
Returns:
(list): A list of assessment reviews, combining assessments with
rubrics and assessment parts, to allow a cohesive object for
rendering the complete peer grading workflow.
Examples:
>>> get_assessment_review(submission, score_type)
[{
'submission': 1,
'rubric': {
'id': 1,
'content_hash': u'45cc932c4da12a1c2b929018cd6f0785c1f8bc07',
'criteria': [{
'order_num': 0,
'name': u'Spelling',
'prompt': u'Did the student have spelling errors?',
'options': [{
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.',
}]
}]
},
'scored_at': datetime.datetime(2014, 2, 25, 19, 50, 7, 290464, tzinfo=<UTC>),
'scorer_id': u'Bob',
'score_type': u'PE',
'parts': [{
'option': {
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.'}
}],
'submission_uuid': u'0a600160-be7f-429d-a853-1283d49205e7',
'points_earned': 9,
'points_possible': 20,
}]
"""
return [
full_assessment_dict(assessment)
for assessment in Assessment.objects.filter(submission_uuid=submission_uuid)
]
def full_assessment_dict(assessment):
"""
Return a dict representation of the Assessment model,
......@@ -278,10 +223,41 @@ class AssessmentFeedbackSerializer(serializers.ModelSerializer):
class Meta:
model = AssessmentFeedback
fields = ('submission_uuid', 'helpfulness', 'feedback', 'assessments',)
class PeerWorkflowSerializer(serializers.ModelSerializer):
"""Representation of the PeerWorkflow.
A PeerWorkflow should not be exposed to the front end of any question. This
model should only be exposed externally for administrative views, in order
to visualize the Peer Workflow.
"""
class Meta:
model = PeerWorkflow
fields = (
'student_id',
'item_id',
'course_id',
'submission_uuid',
'helpfulness',
'feedback',
'assessments',
'created_at',
'completed_at'
)
class PeerWorkflowItemSerializer(serializers.ModelSerializer):
"""Representation of the PeerWorkflowItem
As with the PeerWorkflow, this should not be exposed to the front end. This
should only be used to visualize the Peer Workflow in an administrative
view.
"""
class Meta:
model = PeerWorkflowItem
fields = (
'scorer_id', 'submission_uuid', 'started_at', 'assessment', 'scored'
)
......@@ -10,7 +10,7 @@ from mock import patch
from nose.tools import raises
from openassessment.assessment import peer_api
from openassessment.assessment.models import Assessment, PeerWorkflow, PeerWorkflowItem
from openassessment.assessment.models import Assessment, PeerWorkflow, PeerWorkflowItem, AssessmentFeedback
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
from submissions.models import Submission
......@@ -136,7 +136,7 @@ class TestPeerApi(TestCase):
assessment_dict,
RUBRIC_DICT,
)
assessments = peer_api.get_assessments(sub["uuid"])
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
self.assertEqual(1, len(assessments))
@file_data('valid_assessments.json')
......@@ -151,7 +151,7 @@ class TestPeerApi(TestCase):
RUBRIC_DICT,
MONDAY
)
assessments = peer_api.get_assessments(sub["uuid"])
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
self.assertEqual(1, len(assessments))
self.assertEqual(assessments[0]["scored_at"], MONDAY)
......@@ -480,6 +480,45 @@ class TestPeerApi(TestCase):
submission_uuid = peer_api._get_submission_for_over_grading(xander_workflow)
self.assertEqual(buffy_answer["uuid"], submission_uuid)
def test_create_assessment_feedback(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
assessment = peer_api.create_assessment(
sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT,
RUBRIC_DICT,
)
sub = peer_api.get_submission_to_assess(tim, 1)
peer_api.create_assessment(
sub["uuid"],
tim["student_id"],
ASSESSMENT_DICT,
RUBRIC_DICT,
)
peer_api.get_score(
tim_sub["uuid"],
{
'must_grade': 1,
'must_be_graded_by': 1
}
)
feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
self.assertIsNone(feedback)
feedback = peer_api.set_assessment_feedback(
{
'submission_uuid': tim_sub['uuid'],
'helpfulness': 0,
'feedback': 'Bob is a jerk!'
}
)
self.assertIsNotNone(feedback)
self.assertEquals(feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
self.assertEquals(feedback, saved_feedback)
def test_close_active_assessment(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
......@@ -512,6 +551,33 @@ class TestPeerApi(TestCase):
mock_filter.side_effect = DatabaseError("Oh no.")
peer_api._get_submission_for_review(tim_workflow, 3)
@patch.object(AssessmentFeedback.objects, 'get')
@raises(peer_api.PeerAssessmentInternalError)
def test_get_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.get_assessment_feedback(tim_answer['uuid'])
@patch.object(PeerWorkflowItem, 'get_scored_assessments')
@raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback({'submission_uuid': tim_answer['uuid']})
@patch.object(AssessmentFeedback, 'save')
@raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error_on_save(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback(
{
'submission_uuid': tim_answer['uuid'],
'helpfulness': 0,
'feedback': 'Boo',
}
)
@patch.object(PeerWorkflow.objects, 'filter')
@raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter):
......@@ -581,7 +647,7 @@ class TestPeerApi(TestCase):
def test_median_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"], 3)
peer_api.get_assessment_median_scores(tim["uuid"])
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
......
......@@ -9,8 +9,7 @@ import pytz
from django.test import TestCase
from submissions.api import create_submission
from openassessment.assessment.self_api import (
create_assessment, get_submission_and_assessment, is_complete,
SelfAssessmentRequestError
create_assessment, is_complete, SelfAssessmentRequestError, get_assessment
)
......@@ -53,16 +52,13 @@ class TestSelfApi(TestCase):
def test_create_assessment(self):
# Initially, there should be no submission or self assessment
self.assertEqual(get_submission_and_assessment("5"), (None, None))
self.assertEqual(get_assessment("5"), None)
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Now there should be a submission, but no self-assessment
received_submission, assessment = get_submission_and_assessment(
submission["uuid"]
)
self.assertItemsEqual(received_submission, submission)
assessment = get_assessment(submission["uuid"])
self.assertIs(assessment, None)
self.assertFalse(is_complete(submission['uuid']))
......@@ -77,10 +73,7 @@ class TestSelfApi(TestCase):
self.assertTrue(is_complete(submission['uuid']))
# Retrieve the self-assessment
received_submission, retrieved = get_submission_and_assessment(
submission["uuid"]
)
self.assertItemsEqual(received_submission, submission)
retrieved = get_assessment(submission["uuid"])
# Check that the assessment we created matches the assessment we retrieved
# and that both have the correct values
......@@ -175,7 +168,7 @@ class TestSelfApi(TestCase):
)
# Retrieve the self-assessment
_, retrieved = get_submission_and_assessment(submission["uuid"])
retrieved = get_assessment(submission["uuid"])
# Expect that both the created and retrieved assessments have the same
# timestamp, and it's >= our recorded time.
......@@ -200,7 +193,7 @@ class TestSelfApi(TestCase):
)
# Expect that we still have the original assessment
_, retrieved = get_submission_and_assessment(submission["uuid"])
retrieved = get_assessment(submission["uuid"])
self.assertItemsEqual(assessment, retrieved)
def test_is_complete_no_submission(self):
......
......@@ -29,20 +29,17 @@ class CreateSubmissionsTest(TestCase):
self.assertGreater(len(submissions[0]['answer']), 0)
# Check that peer and self assessments were created
assessments = peer_api.get_assessments(submissions[0]['uuid'])
assessments = peer_api.get_assessments(submissions[0]['uuid'], scored_only=False)
# Verify that the assessments exist and have content
# TODO: currently peer_api.get_assessments() returns both peer- and self-assessments
# When this call gets split, we'll need to update the test
self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS + 1)
self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)
for assessment in assessments:
self.assertGreater(assessment['points_possible'], 0)
# Check that a self-assessment was created
submission, assessment = self_api.get_submission_and_assessment(submissions[0]['uuid'])
assessment = self_api.get_assessment(submissions[0]['uuid'])
# Verify that the assessment exists and has content
self.assertIsNot(submission, None)
self.assertIsNot(assessment, None)
self.assertGreater(assessment['points_possible'], 0)
......@@ -4,6 +4,8 @@ from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment import peer_api
from openassessment.assessment import self_api
from submissions import api as submission_api
class GradeMixin(object):
......@@ -23,28 +25,24 @@ class GradeMixin(object):
status = workflow.get('status')
context = {}
if status == "done":
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
path = 'openassessmentblock/grade/oa_grade_complete.html'
assessment_ui_model = self.get_assessment_module('peer-assessment')
student_submission = self.get_user_submission(
workflow["submission_uuid"]
)
student_score = workflow["score"]
assessments = peer_api.get_assessments(student_submission["uuid"])
peer_assessments = []
self_assessment = None
for assessment in assessments:
if assessment["score_type"] == "PE":
peer_assessments.append(assessment)
else:
self_assessment = assessment
peer_assessments = peer_assessments[:assessment_ui_model["must_grade"]]
median_scores = peer_api.get_assessment_median_scores(
student_submission["uuid"],
assessment_ui_model["must_be_graded_by"]
)
try:
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
path = 'openassessmentblock/grade/oa_grade_complete.html'
student_submission = submission_api.get_submission(workflow["submission_uuid"])
student_score = workflow["score"]
peer_assessments = peer_api.get_assessments(student_submission["uuid"])
self_assessment = self_api.get_assessment(student_submission["uuid"])
median_scores = peer_api.get_assessment_median_scores(
student_submission["uuid"]
)
except (
submission_api.SubmissionError,
peer_api.PeerAssessmentError,
self_api.SelfAssessmentRequestError
):
return self.render_error(_(u"An unexpected error occurred."))
context["feedback_text"] = feedback_text
context["student_submission"] = student_submission
context["peer_assessments"] = peer_assessments
......@@ -75,7 +73,6 @@ class GradeMixin(object):
@XBlock.json_handler
def feedback_submit(self, data, suffix=''):
"""Attach the Assessment Feedback text to some submission."""
assessment_ui_model = self.get_assessment_module('peer-assessment') or {}
assessment_feedback = data.get('feedback', '')
if not assessment_feedback:
return {
......@@ -84,7 +81,6 @@ class GradeMixin(object):
}
try:
peer_api.set_assessment_feedback(
assessment_ui_model['must_grade'],
{
'submission_uuid': self.submission_uuid,
'feedback': assessment_feedback,
......
......@@ -3,7 +3,7 @@ from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment import self_api
from submissions import api as submission_api
logger = logging.getLogger(__name__)
......@@ -29,10 +29,11 @@ class SelfAssessmentMixin(object):
if not workflow:
return self.render_assessment(path, context)
try:
submission, assessment = self_api.get_submission_and_assessment(
submission = submission_api.get_submission(self.submission_uuid)
assessment = self_api.get_assessment(
workflow["submission_uuid"]
)
except self_api.SelfAssessmentRequestError:
except (submission_api.SubmissionError, self_api.SelfAssessmentRequestError):
logger.exception(
u"Could not retrieve self assessment for submission {}"
.format(workflow["submission_uuid"])
......
......@@ -7,7 +7,6 @@ from collections import namedtuple
import copy
import json
from openassessment.assessment import peer_api
from submissions import api as submission_api
from .base import XBlockHandlerTestCase, scenario
......@@ -97,7 +96,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent
actual = peer_api.get_assessments(submission['uuid'])
actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
self.assertEqual(actual[0]['points_earned'], 5)
......
......@@ -35,7 +35,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertTrue(resp['success'])
# Expect that a self-assessment was created
_, assessment = self_api.get_submission_and_assessment(submission["uuid"])
assessment = self_api.get_assessment(submission["uuid"])
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
......@@ -117,7 +117,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
# Simulate an error and expect a failure response
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_api:
mock_api.SelfAssessmentRequestError = self_api.SelfAssessmentRequestError
mock_api.get_submission_and_assessment.side_effect = self_api.SelfAssessmentRequestError
mock_api.get_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("error", resp.lower())
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment