Commit 50b300f3 by Stephen Sanchez

Merge pull request #228 from edx/sanchez/update_peer_workflow_timestamps

Updating the Assessment Models
parents c2f6aceb acb51e34
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PeerWorkflow.graded_count'
db.delete_column('assessment_peerworkflow', 'graded_count')
# Adding field 'PeerWorkflow.grading_completed_at'
db.add_column('assessment_peerworkflow', 'grading_completed_at',
self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'PeerWorkflow.graded_count'
db.add_column('assessment_peerworkflow', 'graded_count',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# Deleting field 'PeerWorkflow.grading_completed_at'
db.delete_column('assessment_peerworkflow', 'grading_completed_at')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
...@@ -501,7 +501,7 @@ class PeerWorkflow(models.Model): ...@@ -501,7 +501,7 @@ class PeerWorkflow(models.Model):
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True) submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True) created_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True, db_index=True) completed_at = models.DateTimeField(null=True, db_index=True)
graded_count = models.PositiveIntegerField(default=0, db_index=True) grading_completed_at = models.DateTimeField(null=True, db_index=True)
class Meta: class Meta:
ordering = ["created_at", "id"] ordering = ["created_at", "id"]
......
...@@ -11,6 +11,8 @@ from django.utils import timezone ...@@ -11,6 +11,8 @@ from django.utils import timezone
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from django.db.models import Q
import random
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart, Assessment, AssessmentFeedback, AssessmentPart,
...@@ -75,9 +77,15 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -75,9 +77,15 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def is_complete(submission_uuid, requirements): def is_complete(submission_uuid, requirements):
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
return True
elif _num_peers_graded(workflow) >= requirements["must_grade"]:
workflow.completed_at = timezone.now()
workflow.save()
return True
return False
except PeerWorkflow.DoesNotExist: except PeerWorkflow.DoesNotExist:
return False return False
return _num_peers_graded(workflow) >= requirements["must_grade"]
def get_score(submission_uuid, requirements): def get_score(submission_uuid, requirements):
...@@ -121,8 +129,6 @@ def get_score(submission_uuid, requirements): ...@@ -121,8 +129,6 @@ def get_score(submission_uuid, requirements):
scored_item.scored = True scored_item.scored = True
scored_item.save() scored_item.save()
workflow.completed_at = timezone.now()
workflow.save()
return { return {
"points_earned": sum( "points_earned": sum(
get_assessment_median_scores(submission_uuid).values() get_assessment_median_scores(submission_uuid).values()
...@@ -131,7 +137,13 @@ def get_score(submission_uuid, requirements): ...@@ -131,7 +137,13 @@ def get_score(submission_uuid, requirements):
} }
def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict, scored_at=None): def create_assessment(
submission_uuid,
scorer_id,
assessment_dict,
rubric_dict,
num_required_grades,
scored_at=None):
"""Creates an assessment on the given submission. """Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular Assessments are created based on feedback associated with a particular
...@@ -145,6 +157,10 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict, ...@@ -145,6 +157,10 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict,
is required to create an assessment on a submission. is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback. assessment contains points_earned, points_possible, and feedback.
num_required_grades (int): The required number of assessments a
submission requires before it is completed. If this number of
assessments is reached, the grading_completed_at timestamp is set
for the Workflow.
Kwargs: Kwargs:
scored_at (datetime): Optional argument to override the time in which scored_at (datetime): Optional argument to override the time in which
...@@ -220,7 +236,7 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict, ...@@ -220,7 +236,7 @@ def create_assessment(submission_uuid, scorer_id, assessment_dict, rubric_dict,
"assessment cannot be submitted unless the associated " "assessment cannot be submitted unless the associated "
"submission came from the peer workflow.")) "submission came from the peer workflow."))
# Close the active assessment # Close the active assessment
_close_active_assessment(scorer_workflow, submission_uuid, assessment) _close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment) assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, student_item, scorer_item) _log_assessment(assessment, student_item, scorer_item)
...@@ -852,7 +868,7 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False): ...@@ -852,7 +868,7 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False):
"where pw.item_id=%s " "where pw.item_id=%s "
"and pw.course_id=%s " "and pw.course_id=%s "
"and pw.student_id<>%s " "and pw.student_id<>%s "
"and pw.graded_count < %s " "and pw.grading_completed_at is NULL "
"and pw.id not in (" "and pw.id not in ("
" select pwi.author_id " " select pwi.author_id "
" from assessment_peerworkflowitem pwi " " from assessment_peerworkflowitem pwi "
...@@ -870,7 +886,6 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False): ...@@ -870,7 +886,6 @@ def _get_submission_for_review(workflow, graded_by, over_grading=False):
workflow.item_id, workflow.item_id,
workflow.course_id, workflow.course_id,
workflow.student_id, workflow.student_id,
graded_by,
workflow.id, workflow.id,
timeout, timeout,
graded_by graded_by
...@@ -893,53 +908,34 @@ def _get_submission_for_over_grading(workflow): ...@@ -893,53 +908,34 @@ def _get_submission_for_over_grading(workflow):
"""Retrieve the next submission uuid for over grading """Retrieve the next submission uuid for over grading
Gets the next submission uuid for over grading in peer assessment. Gets the next submission uuid for over grading in peer assessment.
Specifically, this will construct a query that:
1) selects all the peer workflows for the current course and item,
excluding the current student
2) checks all the assessments associated with those workflows, excluding
the current student's assessments, and any workflows connected to them.
3) checks to see if any unfinished assessments are expired
4) Groups all the workflows with their collective assessments
5) Orders them but their total assessments
6) Returns the workflow with the fewest assessments.
""" """
# The follow query behaves as the Peer Assessment Over Grading Queue. This # The follow query behaves as the Peer Assessment Over Grading Queue. This
# will find the next submission (via PeerWorkflow) in this course / question # will find a random submission (via PeerWorkflow) in this course / question
# that: # that:
# 1) Does not belong to you # 1) Does not belong to you
# 2) Is not something you have already scored # 2) Is not something you have already scored
# 3) Has the fewest current assessments.
try: try:
peer_workflows = list(PeerWorkflow.objects.raw( query = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid, (" "select pw.id, pw.submission_uuid "
" select count(pwi.id) as c "
" from assessment_peerworkflowitem pwi "
" where pwi.author_id=pw.id "
") as c "
"from assessment_peerworkflow pw " "from assessment_peerworkflow pw "
"where pw.item_id=%s " "where course_id=%s "
"and pw.course_id=%s " "and item_id=%s "
"and pw.student_id<>%s " "and student_id<>%s "
"and pw.id not in (" "and pw.id not in ( "
" select pwi.author_id " "select pwi.author_id "
" from assessment_peerworkflowitem pwi " "from assessment_peerworkflowitem pwi "
" where pwi.scorer_id=%s " "where pwi.scorer_id=%s); ",
") " [workflow.course_id, workflow.item_id, workflow.student_id, workflow.id]
"order by c, pw.created_at, pw.id "
"limit 1; ",
[
workflow.item_id,
workflow.course_id,
workflow.student_id,
workflow.id
]
)) ))
if not peer_workflows: workflow_count = len(query)
if workflow_count < 1:
return None return None
return peer_workflows[0].submission_uuid random_int = random.randint(0, workflow_count - 1)
random_workflow = query[random_int]
return random_workflow.submission_uuid
except DatabaseError: except DatabaseError:
error_message = _( error_message = _(
u"An internal error occurred while retrieving a peer submission " u"An internal error occurred while retrieving a peer submission "
...@@ -949,7 +945,12 @@ def _get_submission_for_over_grading(workflow): ...@@ -949,7 +945,12 @@ def _get_submission_for_over_grading(workflow):
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def _close_active_assessment(workflow, submission_uuid, assessment): def _close_active_assessment(
workflow,
submission_uuid,
assessment,
num_required_grades
):
"""Associate the work item with a complete assessment. """Associate the work item with a complete assessment.
Updates a workflow item on the student's workflow with the associated Updates a workflow item on the student's workflow with the associated
...@@ -960,6 +961,8 @@ def _close_active_assessment(workflow, submission_uuid, assessment): ...@@ -960,6 +961,8 @@ def _close_active_assessment(workflow, submission_uuid, assessment):
workflow (PeerWorkflow): The scorer's workflow workflow (PeerWorkflow): The scorer's workflow
submission_uuid (str): The submission the scorer is grading. submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action. assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Examples: Examples:
>>> student_item_dict = dict( >>> student_item_dict = dict(
...@@ -970,14 +973,16 @@ def _close_active_assessment(workflow, submission_uuid, assessment): ...@@ -970,14 +973,16 @@ def _close_active_assessment(workflow, submission_uuid, assessment):
>>> ) >>> )
>>> workflow = _get_latest_workflow(student_item_dict) >>> workflow = _get_latest_workflow(student_item_dict)
>>> assessment = Assessment.objects.all()[0] >>> assessment = Assessment.objects.all()[0]
>>> _close_active_assessment(workflow, "1", assessment) >>> _close_active_assessment(workflow, "1", assessment, 3)
""" """
try: try:
item = workflow.graded.get(submission_uuid=submission_uuid) item = workflow.graded.get(submission_uuid=submission_uuid)
item.assessment = assessment item.assessment = assessment
item.author.graded_count += 1 if (not item.author.grading_completed_at
item.author.save() and item.author.graded_by.all().count() >= num_required_grades):
item.author.grading_completed_at = timezone.now()
item.author.save()
item.save() item.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist): except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = _( error_message = _(
......
...@@ -97,7 +97,13 @@ class Command(BaseCommand): ...@@ -97,7 +97,13 @@ class Command(BaseCommand):
'options_selected': options_selected, 'options_selected': options_selected,
'feedback': " ".join(loremipsum.get_paragraphs(2)) 'feedback': " ".join(loremipsum.get_paragraphs(2))
} }
peer_api.create_assessment(submission_uuid, scorer_id, assessment, rubric) peer_api.create_assessment(
submission_uuid,
scorer_id,
assessment,
rubric,
self.NUM_PEER_ASSESSMENTS
)
# Create a self-assessment # Create a self-assessment
print "-- Creating self assessment" print "-- Creating self assessment"
......
...@@ -69,6 +69,7 @@ class PeerAssessmentMixin(object): ...@@ -69,6 +69,7 @@ class PeerAssessmentMixin(object):
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
assessment_ui_model['must_be_graded_by']
) )
# Emit analytics event... # Emit analytics event...
self.runtime.publish( self.runtime.publish(
...@@ -183,14 +184,14 @@ class PeerAssessmentMixin(object): ...@@ -183,14 +184,14 @@ class PeerAssessmentMixin(object):
context_dict["peer_start"] = self.format_datetime_string(date) context_dict["peer_start"] = self.format_datetime_string(date)
path = 'openassessmentblock/peer/oa_peer_unavailable.html' path = 'openassessmentblock/peer/oa_peer_unavailable.html'
elif workflow.get("status") == "peer": elif workflow.get("status") == "peer":
peer_sub = self.get_peer_submission(student_item, assessment, submissions_closed) peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub: if peer_sub:
path = 'openassessmentblock/peer/oa_peer_assessment.html' path = 'openassessmentblock/peer/oa_peer_assessment.html'
context_dict["peer_submission"] = peer_sub context_dict["peer_submission"] = peer_sub
else: else:
path = 'openassessmentblock/peer/oa_peer_waiting.html' path = 'openassessmentblock/peer/oa_peer_waiting.html'
elif continue_grading and student_item: elif continue_grading and student_item:
peer_sub = self.get_peer_submission(student_item, assessment, continue_grading) peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub: if peer_sub:
path = 'openassessmentblock/peer/oa_peer_turbo_mode.html' path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
context_dict["peer_submission"] = peer_sub context_dict["peer_submission"] = peer_sub
...@@ -204,16 +205,14 @@ class PeerAssessmentMixin(object): ...@@ -204,16 +205,14 @@ class PeerAssessmentMixin(object):
def get_peer_submission( def get_peer_submission(
self, self,
student_item_dict, student_item_dict,
assessment, assessment
over_grading
): ):
submissions_closed, __, __ = self.is_closed(step="submission")
peer_submission = False peer_submission = False
try: try:
peer_submission = peer_api.get_submission_to_assess( peer_submission = peer_api.get_submission_to_assess(
student_item_dict, student_item_dict,
assessment["must_be_graded_by"], assessment["must_be_graded_by"],
over_grading True
) )
self.runtime.publish( self.runtime.publish(
self, self,
......
...@@ -139,14 +139,16 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -139,14 +139,16 @@ class TestGrade(XBlockHandlerTestCase):
# Create an assessment of the user's submission # Create an assessment of the user's submission
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], scorer_name, submission['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria} assessment, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
# Have our user make assessments (so she can get a score) # Have our user make assessments (so she can get a score)
for asmnt in peer_assessments: for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers)) new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
peer_api.create_assessment( peer_api.create_assessment(
new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria} new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
# Have the user submit a self-assessment (so she can get a score) # Have the user submit a self-assessment (so she can get a score)
......
...@@ -44,7 +44,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -44,7 +44,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub['uuid'], sub['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# Now Sally will assess Hal. # Now Sally will assess Hal.
...@@ -55,10 +56,12 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -55,10 +56,12 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sub['uuid'], sub['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# If Over Grading is on, this should now return Sally's response to Bob. # If Over Grading is on, this should now return Sally or Hal's response
# to Bob.
submission = xblock.create_submission(student_item, u"Bob's answer") submission = xblock.create_submission(student_item, u"Bob's answer")
workflow_info = xblock.get_workflow_info() workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer') self.assertEqual(workflow_info["status"], u'peer')
...@@ -71,7 +74,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -71,7 +74,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
#Validate Peer Rendering. #Validate Peer Rendering.
self.assertIn("Sally".encode('utf-8'), peer_response.body) self.assertTrue("Sally".encode('utf-8') in peer_response.body or
"Hal".encode('utf-8') in peer_response.body)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_handler(self, xblock): def test_peer_assess_handler(self, xblock):
...@@ -182,7 +186,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -182,7 +186,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
sally_sub['uuid'], sally_sub['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# Now Sally will assess Hal. # Now Sally will assess Hal.
...@@ -193,7 +198,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -193,7 +198,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
hal_sub['uuid'], hal_sub['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# If Over Grading is on, this should now return Sally's response to Bob. # If Over Grading is on, this should now return Sally's response to Bob.
...@@ -208,13 +214,23 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -208,13 +214,23 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response) self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
hal_response = "Hal".encode('utf-8') in peer_response.body
sally_response = "Sally".encode('utf-8') in peer_response.body
# Validate Peer Rendering. # Validate Peer Rendering.
self.assertIn("Sally".encode('utf-8'), peer_response.body) if hal_response:
peer_uuid = hal_sub['uuid']
elif sally_response:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment( peer_api.create_assessment(
sally_sub['uuid'], peer_uuid,
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# Validate Submission Rendering. # Validate Submission Rendering.
...@@ -224,13 +240,21 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -224,13 +240,21 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response) self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering. # Validate Peer Rendering. Check that if Sally or Hal were selected
self.assertIn("Hal".encode('utf-8'), peer_response.body) # the first time around, the other is selected this time.
if not hal_response and "Hal".encode('utf-8') in peer_response.body:
peer_uuid = hal_sub['uuid']
elif not sally_response and "Sally".encode('utf-8') in peer_response.body:
peer_uuid = sally_sub['uuid']
else:
self.fail("Response was neither Hal or Sally's submission.")
peer_api.create_assessment( peer_api.create_assessment(
hal_sub['uuid'], peer_uuid,
student_item['student_id'], student_item['student_id'],
assessment, assessment,
{'criteria': xblock.rubric_criteria} {'criteria': xblock.rubric_criteria},
1
) )
# A Final over grading will not return anything. # A Final over grading will not return anything.
...@@ -239,4 +263,4 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -239,4 +263,4 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_response = xblock.render_peer_assessment(request) peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response) self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body) self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
self.assertIn("Congratulations".encode('utf-8'), peer_response.body) self.assertIn("Complete".encode('utf-8'), peer_response.body)
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment