Commit 84dd9255 by Stephen Sanchez

Updating the Assessment Models

parent c2f6aceb
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PeerWorkflow.graded_count'
db.delete_column('assessment_peerworkflow', 'graded_count')
# Adding field 'PeerWorkflow.grading_completed_at'
db.add_column('assessment_peerworkflow', 'grading_completed_at',
self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'PeerWorkflow.graded_count'
db.add_column('assessment_peerworkflow', 'graded_count',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# Deleting field 'PeerWorkflow.grading_completed_at'
db.delete_column('assessment_peerworkflow', 'grading_completed_at')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
...@@ -501,7 +501,7 @@ class PeerWorkflow(models.Model): ...@@ -501,7 +501,7 @@ class PeerWorkflow(models.Model):
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True) submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True) created_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True, db_index=True) completed_at = models.DateTimeField(null=True, db_index=True)
graded_count = models.PositiveIntegerField(default=0, db_index=True) grading_completed_at = models.DateTimeField(null=True, db_index=True)
class Meta: class Meta:
ordering = ["created_at", "id"] ordering = ["created_at", "id"]
......
...@@ -75,9 +75,15 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -75,9 +75,15 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def is_complete(submission_uuid, requirements): def is_complete(submission_uuid, requirements):
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
return True
elif _num_peers_graded(workflow) >= requirements["must_grade"]:
workflow.completed_at = timezone.now()
workflow.save()
return True
return False
except PeerWorkflow.DoesNotExist: except PeerWorkflow.DoesNotExist:
return False return False
return _num_peers_graded(workflow) >= requirements["must_grade"]
def get_score(submission_uuid, requirements): def get_score(submission_uuid, requirements):
...@@ -121,8 +127,6 @@ def get_score(submission_uuid, requirements): ...@@ -121,8 +127,6 @@ def get_score(submission_uuid, requirements):
scored_item.scored = True scored_item.scored = True
scored_item.save() scored_item.save()
workflow.completed_at = timezone.now()
workflow.save()
return { return {
"points_earned": sum( "points_earned": sum(
get_assessment_median_scores(submission_uuid).values() get_assessment_median_scores(submission_uuid).values()
...@@ -131,7 +135,13 @@ def get_score(submission_uuid, requirements): ...@@ -131,7 +135,13 @@ def get_score(submission_uuid, requirements):
} }
def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict, scored_at=None): def create_assessment(
submission_uuid,
scorer_id,
assessment_dict,
rubric_dict,
graded_by,
scored_at=None):
"""Creates an assessment on the given submission. """Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular Assessments are created based on feedback associated with a particular
...@@ -145,6 +155,9 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict, ...@@ -145,6 +155,9 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict,
is required to create an assessment on a submission. is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback. assessment contains points_earned, points_possible, and feedback.
graded_by (int): The required number of assessments a submission
requires before it is completed. If this number of assessments is
reached, the grading_completed_at timestamp is set for the Workflow.
Kwargs: Kwargs:
scored_at (datetime): Optional argument to override the time in which scored_at (datetime): Optional argument to override the time in which
...@@ -220,7 +233,7 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict, ...@@ -220,7 +233,7 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict,
"assessment cannot be submitted unless the associated " "assessment cannot be submitted unless the associated "
"submission came from the peer workflow.")) "submission came from the peer workflow."))
# Close the active assessment # Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment) _close_active_assessment(scorer_workflow, submission_uuid, assessment, graded_by)
assessment_dict = full_assessment_dict(assessment) assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, student_item, scorer_item) _log_assessment(assessment, student_item, scorer_item)
...@@ -852,7 +865,7 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False): ...@@ -852,7 +865,7 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False):
"where pw.item_id=%s " "where pw.item_id=%s "
"and pw.course_id=%s " "and pw.course_id=%s "
"and pw.student_id<>%s " "and pw.student_id<>%s "
"and pw.graded_count < %s " "and pw.grading_completed_at is NULL "
"and pw.id not in (" "and pw.id not in ("
" select pwi.author_id " " select pwi.author_id "
" from assessment_peerworkflowitem pwi " " from assessment_peerworkflowitem pwi "
...@@ -870,7 +883,6 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False): ...@@ -870,7 +883,6 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False):
workflow.item_id, workflow.item_id,
workflow.course_id, workflow.course_id,
workflow.student_id, workflow.student_id,
graded_by,
workflow.id, workflow.id,
timeout, timeout,
graded_by graded_by
...@@ -949,7 +961,7 @@ def _get_submission_for_over_grading(workflow): ...@@ -949,7 +961,7 @@ def _get_submission_for_over_grading(workflow):
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def _close_active_assessment(workflow, submission_uuid, assessment): def _close_active_assessment(workflow, submission_uuid, assessment, graded_by):
"""Associate the work item with a complete assessment. """Associate the work item with a complete assessment.
Updates a workflow item on the student's workflow with the associated Updates a workflow item on the student's workflow with the associated
...@@ -960,6 +972,8 @@ def _close_active_assessment(workflow, submission_uuid, assessment): ...@@ -960,6 +972,8 @@ def _close_active_assessment(workflow, submission_uuid, assessment):
workflow (PeerWorkflow): The scorer's workflow workflow (PeerWorkflow): The scorer's workflow
submission_uuid (str): The submission the scorer is grading. submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action. assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Examples: Examples:
>>> student_item_dict = dict( >>> student_item_dict = dict(
...@@ -970,13 +984,14 @@ def _close_active_assessment(workflow, submission_uuid, assessment): ...@@ -970,13 +984,14 @@ def _close_active_assessment(workflow, submission_uuid, assessment):
>>> ) >>> )
>>> workflow = _get_latest_workflow(student_item_dict) >>> workflow = _get_latest_workflow(student_item_dict)
>>> assessment = Assessment.objects.all()[0] >>> assessment = Assessment.objects.all()[0]
>>> _close_active_assessment(workflow, "1", assessment) >>> _close_active_assessment(workflow, "1", assessment, 3)
""" """
try: try:
item = workflow.graded.get(submission_uuid=submission_uuid) item = workflow.graded.get(submission_uuid=submission_uuid)
item.assessment = assessment item.assessment = assessment
item.author.graded_count += 1 if item.author.graded_by.all().count() >= graded_by:
item.author.grading_completed_at = timezone.now()
item.author.save() item.author.save()
item.save() item.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist): except (DatabaseError, PeerWorkflowItem.DoesNotExist):
......
...@@ -131,6 +131,7 @@ class TestPeerApi(CacheResetTest): ...@@ -131,6 +131,7 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
self.assertEqual(assessment["points_earned"], 6) self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14) self.assertEqual(assessment["points_possible"], 14)
...@@ -146,6 +147,7 @@ class TestPeerApi(CacheResetTest): ...@@ -146,6 +147,7 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT_PASS_HUGE, ASSESSMENT_DICT_PASS_HUGE,
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
@file_data('valid_assessments.json') @file_data('valid_assessments.json')
...@@ -158,6 +160,7 @@ class TestPeerApi(CacheResetTest): ...@@ -158,6 +160,7 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False) assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
...@@ -172,7 +175,8 @@ class TestPeerApi(CacheResetTest): ...@@ -172,7 +175,8 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY REQUIRED_GRADED_BY,
MONDAY,
) )
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False) assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
...@@ -191,7 +195,8 @@ class TestPeerApi(CacheResetTest): ...@@ -191,7 +195,8 @@ class TestPeerApi(CacheResetTest):
self.assertFalse(finished) self.assertFalse(finished)
self.assertEqual(count, 0) self.assertEqual(count, 0)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
finished, count = peer_api.has_finished_required_evaluating(bob, 1) finished, count = peer_api.has_finished_required_evaluating(bob, 1)
self.assertTrue(finished) self.assertTrue(finished)
...@@ -222,7 +227,8 @@ class TestPeerApi(CacheResetTest): ...@@ -222,7 +227,8 @@ class TestPeerApi(CacheResetTest):
self.assertEquals((False, i), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED)) self.assertEquals((False, i), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(tim, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(tim, REQUIRED_GRADED)
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED)) self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
...@@ -234,19 +240,22 @@ class TestPeerApi(CacheResetTest): ...@@ -234,19 +240,22 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# Tim has met the critera, and should now be complete. # Tim has met the critera, and should now be complete.
...@@ -330,17 +339,21 @@ class TestPeerApi(CacheResetTest): ...@@ -330,17 +339,21 @@ class TestPeerApi(CacheResetTest):
# 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more. # 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertIsNone(sub) self.assertIsNone(sub)
...@@ -353,17 +366,20 @@ class TestPeerApi(CacheResetTest): ...@@ -353,17 +366,20 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"]) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# Tim has met the critera, and should now have a score. # Tim has met the critera, and should now have a score.
...@@ -380,7 +396,8 @@ class TestPeerApi(CacheResetTest): ...@@ -380,7 +396,8 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"]) self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# 14) Spike submits. # 14) Spike submits.
...@@ -390,34 +407,41 @@ class TestPeerApi(CacheResetTest): ...@@ -390,34 +407,41 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"]) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"]) self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# 16) Buffy reviews Spike # 16) Buffy reviews Spike
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
self.assertEquals(spike_sub["uuid"], sub["uuid"]) self.assertEquals(spike_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# 17) Willow comes along and submits # 17) Willow comes along and submits
...@@ -427,14 +451,16 @@ class TestPeerApi(CacheResetTest): ...@@ -427,14 +451,16 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(willow, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(willow, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# 19) Xander comes back and gets Buffy's submission, and grades it. # 19) Xander comes back and gets Buffy's submission, and grades it.
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
# 20) Buffy should now have a grade. # 20) Buffy should now have a grade.
...@@ -515,6 +541,7 @@ class TestPeerApi(CacheResetTest): ...@@ -515,6 +541,7 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(tim, 1) sub = peer_api.get_submission_to_assess(tim, 1)
peer_api.create_assessment( peer_api.create_assessment(
...@@ -522,6 +549,7 @@ class TestPeerApi(CacheResetTest): ...@@ -522,6 +549,7 @@ class TestPeerApi(CacheResetTest):
tim["student_id"], tim["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
peer_api.get_score( peer_api.get_score(
tim_sub["uuid"], tim_sub["uuid"],
...@@ -565,12 +593,13 @@ class TestPeerApi(CacheResetTest): ...@@ -565,12 +593,13 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission["uuid"]) self.assertEqual(xander_answer["uuid"], submission["uuid"])
assessment_dict = peer_api.create_assessment( assessment_dict = peer_api.create_assessment(
xander_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT xander_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
) )
assessment = Assessment.objects.filter( assessment = Assessment.objects.filter(
scorer_id=assessment_dict["scorer_id"], scorer_id=assessment_dict["scorer_id"],
scored_at=assessment_dict["scored_at"])[0] scored_at=assessment_dict["scored_at"])[0]
peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment) peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)
item = peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"]) item = peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
self.assertEqual(xander_answer["uuid"], submission["uuid"]) self.assertEqual(xander_answer["uuid"], submission["uuid"])
...@@ -669,6 +698,7 @@ class TestPeerApi(CacheResetTest): ...@@ -669,6 +698,7 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
1
) )
self.assertEqual(assessment["points_earned"], 6) self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14) self.assertEqual(assessment["points_possible"], 14)
...@@ -710,7 +740,8 @@ class TestPeerApi(CacheResetTest): ...@@ -710,7 +740,8 @@ class TestPeerApi(CacheResetTest):
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY REQUIRED_GRADED_BY,
MONDAY,
) )
@patch.object(PeerWorkflowItem, 'get_scored_assessments') @patch.object(PeerWorkflowItem, 'get_scored_assessments')
...@@ -724,7 +755,8 @@ class TestPeerApi(CacheResetTest): ...@@ -724,7 +755,8 @@ class TestPeerApi(CacheResetTest):
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY REQUIRED_GRADED_BY,
MONDAY,
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(sub["uuid"]) peer_api.get_assessments(sub["uuid"])
...@@ -750,7 +782,8 @@ class TestPeerApi(CacheResetTest): ...@@ -750,7 +782,8 @@ class TestPeerApi(CacheResetTest):
"another_student", "another_student",
ASSESSMENT_DICT, ASSESSMENT_DICT,
RUBRIC_DICT, RUBRIC_DICT,
MONDAY REQUIRED_GRADED_BY,
MONDAY,
) )
@staticmethod @staticmethod
......
...@@ -97,7 +97,13 @@ class Command(BaseCommand): ...@@ -97,7 +97,13 @@ class Command(BaseCommand):
'options_selected': options_selected, 'options_selected': options_selected,
'feedback': " ".join(loremipsum.get_paragraphs(2)) 'feedback': " ".join(loremipsum.get_paragraphs(2))
} }
peer_api.create_assessment(submission_uuid, scorer_id, assessment, rubric) peer_api.create_assessment(
submission_uuid,
scorer_id,
assessment,
rubric,
self.NUM_PEER_ASSESSMENTS
)
# Create a self-assessment # Create a self-assessment
print "-- Creating self assessment" print "-- Creating self assessment"
......
...@@ -69,6 +69,7 @@ class PeerAssessmentMixin(object): ...@@ -69,6 +69,7 @@ class PeerAssessmentMixin(object):
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
assessment_ui_model['must_be_graded_by']
) )
# Emit analytics event... # Emit analytics event...
self.runtime.publish( self.runtime.publish(
......
...@@ -139,14 +139,16 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -139,14 +139,16 @@ class TestGrade(XBlockHandlerTestCase):
# Create an assessment of the user's submission # Create an assessment of the user's submission
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], scorer_name, submission['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria} assessment, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
# Have our user make assessments (so she can get a score) # Have our user make assessments (so she can get a score)
for asmnt in peer_assessments: for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers)) new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
peer_api.create_assessment( peer_api.create_assessment(
new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria} new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
# Have the user submit a self-assessment (so she can get a score) # Have the user submit a self-assessment (so she can get a score)
......
...@@ -44,7 +44,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -44,7 +44,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub['uuid'], sub['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# Now Sally will assess Hal. # Now Sally will assess Hal.
...@@ -55,7 +56,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -55,7 +56,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub['uuid'], sub['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# If Over Grading is on, this should now return Sally's response to Bob. # If Over Grading is on, this should now return Sally's response to Bob.
...@@ -182,7 +184,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -182,7 +184,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub['uuid'], sally_sub['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# Now Sally will assess Hal. # Now Sally will assess Hal.
...@@ -193,7 +196,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -193,7 +196,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub['uuid'], hal_sub['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# If Over Grading is on, this should now return Sally's response to Bob. # If Over Grading is on, this should now return Sally's response to Bob.
...@@ -214,7 +218,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -214,7 +218,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub['uuid'], sally_sub['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# Validate Submission Rendering. # Validate Submission Rendering.
...@@ -230,7 +235,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -230,7 +235,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub['uuid'], hal_sub['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# A Final over grading will not return anything. # A Final over grading will not return anything.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment