Commit 7d44ba8c by Will Daly

Implement feedback per criterion (XML, backend, JavaScript, and templates)

Remove unused start/due date args in xml parsing
Use textarea attribute to limit submission/feedback text length instead of JavaScript checks.
Server truncates submissions/feedback that are too long instead of raising an exception.
Refactor peer step JS into its own source file.
Move JS namespace and gettext stub into a shared file.
Add scrollTo for turbo grade submission.
parent 570d587b
...@@ -86,11 +86,12 @@ class AssessmentAdmin(admin.ModelAdmin): ...@@ -86,11 +86,12 @@ class AssessmentAdmin(admin.ModelAdmin):
def parts_summary(self, assessment_obj): def parts_summary(self, assessment_obj):
return "<br/>".join( return "<br/>".join(
html.escape( html.escape(
u"{}/{} - {}: {}".format( u"{}/{} - {}: {} - {}".format(
part.points_earned, part.points_earned,
part.points_possible, part.points_possible,
part.option.criterion.name, part.option.criterion.name,
part.option.name, part.option.name,
part.feedback,
) )
) )
for part in assessment_obj.parts.all() for part in assessment_obj.parts.all()
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AssessmentPart.feedback'
db.add_column('assessment_assessmentpart', 'feedback',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AssessmentPart.feedback'
db.delete_column('assessment_assessmentpart', 'feedback')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
...@@ -398,10 +398,15 @@ class AssessmentPart(models.Model): ...@@ -398,10 +398,15 @@ class AssessmentPart(models.Model):
by this assessor for this `Criterion`. So basically, think of this class by this assessor for this `Criterion`. So basically, think of this class
as :class:`CriterionOption` + student state. as :class:`CriterionOption` + student state.
""" """
MAX_FEEDBACK_SIZE = 1024 * 100
assessment = models.ForeignKey(Assessment, related_name='parts') assessment = models.ForeignKey(Assessment, related_name='parts')
option = models.ForeignKey(CriterionOption, related_name="+")
# criterion = models.ForeignKey(Criterion) ? # Free-form text feedback for the specific criterion
option = models.ForeignKey(CriterionOption) # TODO: no reverse # Note that the `Assessment` model also has a feedback field,
# which is feedback on the submission as a whole.
feedback = models.TextField(default="", blank=True)
@property @property
def points_earned(self): def points_earned(self):
...@@ -412,13 +417,36 @@ class AssessmentPart(models.Model): ...@@ -412,13 +417,36 @@ class AssessmentPart(models.Model):
return self.option.criterion.points_possible return self.option.criterion.points_possible
@classmethod @classmethod
def add_to_assessment(cls, assessment, option_ids): def add_to_assessment(cls, assessment, option_ids, criterion_feedback=None):
"""Creates AssessmentParts and adds them to `assessment`.""" """
Creates AssessmentParts and adds them to `assessment`.
Args:
assessment (Assessment): The assessment model we're adding parts to.
option_ids (list of int): List of primary keys for options the user selected.
Kwargs:
criterion_feedback (dict): Dictionary mapping criterion names
to free-form text feedback on the criterion.
You don't need to include all the rubric criteria,
and keys that don't match any criterion will be ignored.
Returns:
None
"""
cls.objects.bulk_create([ cls.objects.bulk_create([
cls(assessment=assessment, option_id=option_id) cls(assessment=assessment, option_id=option_id)
for option_id in option_ids for option_id in option_ids
]) ])
if criterion_feedback is not None:
for criterion_name, feedback in criterion_feedback.iteritems():
feedback = feedback[0:cls.MAX_FEEDBACK_SIZE]
assessment.parts.filter(
option__criterion__name=criterion_name
).update(feedback=feedback)
class AssessmentFeedbackOption(models.Model): class AssessmentFeedbackOption(models.Model):
""" """
...@@ -447,7 +475,7 @@ class AssessmentFeedback(models.Model): ...@@ -447,7 +475,7 @@ class AssessmentFeedback(models.Model):
as well as zero or more feedback options as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience") ("Please select the statements below that reflect what you think of this peer grading experience")
""" """
MAXSIZE = 1024*100 # 100KB MAXSIZE = 1024 * 100 # 100KB
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True) submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None) assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
......
...@@ -11,7 +11,6 @@ from django.utils import timezone ...@@ -11,7 +11,6 @@ from django.utils import timezone
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from django.db.models import Q
import random import random
from openassessment.assessment.models import ( from openassessment.assessment.models import (
...@@ -139,7 +138,9 @@ def get_score(submission_uuid, requirements): ...@@ -139,7 +138,9 @@ def get_score(submission_uuid, requirements):
def create_assessment( def create_assessment(
scorer_submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
assessment_dict, options_selected,
criterion_feedback,
overall_feedback,
rubric_dict, rubric_dict,
num_required_grades, num_required_grades,
scored_at=None): scored_at=None):
...@@ -154,8 +155,13 @@ def create_assessment( ...@@ -154,8 +155,13 @@ def create_assessment(
peer workflow of the grading student. peer workflow of the grading student.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An options_selected (dict): Dictionary mapping criterion names to the
assessment contains points_earned, points_possible, and feedback. option names the user selected for that criterion.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
num_required_grades (int): The required number of assessments a num_required_grades (int): The required number of assessments a
submission requires before it is completed. If this number of submission requires before it is completed. If this number of
assessments is reached, the grading_completed_at timestamp is set assessments is reached, the grading_completed_at timestamp is set
...@@ -177,11 +183,10 @@ def create_assessment( ...@@ -177,11 +183,10 @@ def create_assessment(
while creating a new assessment. while creating a new assessment.
Examples: Examples:
>>> assessment_dict = dict( >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
>>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"}, >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
>>> feedback="Your submission was thrilling.", >>> feedback = "Your submission was thrilling."
>>> ) >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
>>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
""" """
try: try:
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
...@@ -189,13 +194,12 @@ def create_assessment( ...@@ -189,13 +194,12 @@ def create_assessment(
# Validate that the selected options matched the rubric # Validate that the selected options matched the rubric
# and raise an error if this is not the case # and raise an error if this is not the case
try: try:
option_ids = rubric.options_ids(assessment_dict["options_selected"]) option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection as ex: except InvalidOptionSelection as ex:
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message) msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg) raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
feedback = assessment_dict.get('feedback', u'')
peer_workflow_item = _get_latest_open_workflow_item(scorer_workflow) peer_workflow_item = _get_latest_open_workflow_item(scorer_workflow)
if peer_workflow_item is None: if peer_workflow_item is None:
...@@ -212,7 +216,7 @@ def create_assessment( ...@@ -212,7 +216,7 @@ def create_assessment(
"scorer_id": scorer_id, "scorer_id": scorer_id,
"submission_uuid": peer_submission_uuid, "submission_uuid": peer_submission_uuid,
"score_type": PEER_TYPE, "score_type": PEER_TYPE,
"feedback": feedback, "feedback": overall_feedback[0:Assessment.MAXSIZE],
} }
if scored_at is not None: if scored_at is not None:
...@@ -228,7 +232,7 @@ def create_assessment( ...@@ -228,7 +232,7 @@ def create_assessment(
# We do this to do a run around django-rest-framework serializer # We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per # validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above. # option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids) AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)
# Close the active assessment # Close the active assessment
_close_active_assessment(scorer_workflow, peer_submission_uuid, assessment, num_required_grades) _close_active_assessment(scorer_workflow, peer_submission_uuid, assessment, num_required_grades)
......
...@@ -154,19 +154,12 @@ class AssessmentPartSerializer(serializers.ModelSerializer): ...@@ -154,19 +154,12 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = AssessmentPart model = AssessmentPart
fields = ('option',) # TODO: Direct link to Criterion? fields = ('option', 'feedback')
class AssessmentSerializer(serializers.ModelSerializer): class AssessmentSerializer(serializers.ModelSerializer):
"""Simplified serializer for :class:`Assessment` that's lighter on the DB.""" """Simplified serializer for :class:`Assessment` that's lighter on the DB."""
def validate_feedback(self, attrs, source):
"""Check that the feedback is within an acceptable size range."""
value = attrs[source]
if len(value) > Assessment.MAXSIZE:
raise serializers.ValidationError("Maximum feedback size exceeded.")
return attrs
class Meta: class Meta:
model = Assessment model = Assessment
fields = ( fields = (
...@@ -235,7 +228,8 @@ def full_assessment_dict(assessment, rubric_dict=None): ...@@ -235,7 +228,8 @@ def full_assessment_dict(assessment, rubric_dict=None):
options_dict = criterion_dict["options"][part.option.order_num] options_dict = criterion_dict["options"][part.option.order_num]
options_dict["criterion"] = criterion_dict options_dict["criterion"] = criterion_dict
parts.append({ parts.append({
"option": options_dict "option": options_dict,
"feedback": part.feedback
}) })
# Now manually built up the dynamically calculated values on the # Now manually built up the dynamically calculated values on the
......
...@@ -10,7 +10,10 @@ from nose.tools import raises ...@@ -10,7 +10,10 @@ from nose.tools import raises
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from openassessment.assessment.models import Assessment, PeerWorkflow, PeerWorkflowItem, AssessmentFeedback from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback,
PeerWorkflow, PeerWorkflowItem
)
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
...@@ -67,48 +70,61 @@ RUBRIC_DICT = { ...@@ -67,48 +70,61 @@ RUBRIC_DICT = {
} }
# Answers are against RUBRIC_DICT -- this is worth 6 points # Answers are against RUBRIC_DICT -- this is worth 6 points
ASSESSMENT_DICT = dict( ASSESSMENT_DICT = {
feedback=u"这是中国", 'overall_feedback': u"这是中国",
options_selected={ 'criterion_feedback': {
"giveup": u"𝓨𝓸𝓾 𝓼𝓱𝓸𝓾𝓵𝓭𝓷'𝓽 𝓰𝓲𝓿𝓮 𝓾𝓹!"
},
'options_selected': {
"secret": "yes", "secret": "yes",
u"ⓢⓐⓕⓔ": "no", u"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant", "giveup": "reluctant",
"singing": "no", "singing": "no",
} },
) }
# Answers are against RUBRIC_DICT -- this is worth 0 points # Answers are against RUBRIC_DICT -- this is worth 0 points
ASSESSMENT_DICT_FAIL = dict( ASSESSMENT_DICT_FAIL = {
feedback=u"fail", 'overall_feedback': u"fail",
options_selected={ 'criterion_feedback': {},
'options_selected': {
"secret": "no", "secret": "no",
u"ⓢⓐⓕⓔ": "no", u"ⓢⓐⓕⓔ": "no",
"giveup": "unwilling", "giveup": "unwilling",
"singing": "yes", "singing": "yes",
} }
) }
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 12 points
ASSESSMENT_DICT_PASS = dict( ASSESSMENT_DICT_PASS = {
feedback=u"这是中国", 'overall_feedback': u"这是中国",
options_selected={ 'criterion_feedback': {},
'options_selected': {
"secret": "yes", "secret": "yes",
u"ⓢⓐⓕⓔ": "yes", u"ⓢⓐⓕⓔ": "yes",
"giveup": "eager", "giveup": "eager",
"singing": "no", "singing": "no",
} }
) }
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 12 points
ASSESSMENT_DICT_PASS_HUGE = dict( # Feedback text is one character over the limit.
feedback=u"这是中国" * Assessment.MAXSIZE, LONG_FEEDBACK_TEXT = u"是" * Assessment.MAXSIZE + "."
options_selected={ ASSESSMENT_DICT_HUGE = {
'overall_feedback': LONG_FEEDBACK_TEXT,
'criterion_feedback': {
"secret": LONG_FEEDBACK_TEXT,
u"ⓢⓐⓕⓔ": LONG_FEEDBACK_TEXT,
"giveup": LONG_FEEDBACK_TEXT,
"singing": LONG_FEEDBACK_TEXT,
},
'options_selected': {
"secret": "yes", "secret": "yes",
u"ⓢⓐⓕⓔ": "yes", u"ⓢⓐⓕⓔ": "yes",
"giveup": "eager", "giveup": "eager",
"singing": "no", "singing": "no",
} },
) }
REQUIRED_GRADED = 5 REQUIRED_GRADED = 5
REQUIRED_GRADED_BY = 3 REQUIRED_GRADED_BY = 3
...@@ -121,33 +137,124 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -121,33 +137,124 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt @ddt
class TestPeerApi(CacheResetTest): class TestPeerApi(CacheResetTest):
def test_create_assessment(self): """
Tests for the peer assessment API functions.
"""
CREATE_ASSESSMENT_NUM_QUERIES = 60
def test_create_assessment_points(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1) peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
assessment = peer_api.create_assessment(
bob_sub["uuid"], with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES):
bob["student_id"], assessment = peer_api.create_assessment(
ASSESSMENT_DICT, bob_sub["uuid"],
RUBRIC_DICT, bob["student_id"],
REQUIRED_GRADED_BY, ASSESSMENT_DICT['options_selected'], dict(), "",
) RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
self.assertEqual(assessment["points_earned"], 6) self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14) self.assertEqual(assessment["points_possible"], 14)
self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
def test_create_huge_assessment_fails(self): def test_create_assessment_with_feedback(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1) peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment( # Creating feedback per criterion should need one additional query to update
# for each criterion that has feedback.
with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES + 1):
assessment = peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT_PASS_HUGE, ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["overall_feedback"])
# The parts are not guaranteed to be in any particular order,
# so we need to iterate through and check them by name.
# If we haven't explicitly set feedback for the criterion, expect
# that it defaults to an empty string.
for part in assessment['parts']:
criterion_name = part['option']['criterion']['name']
expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "")
self.assertEqual(part['feedback'], expected_feedback)
def test_create_assessment_unknown_criterion_feedback(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Create an assessment where the criterion feedback uses
# a criterion name that isn't in the rubric.
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT['options_selected'],
{'unknown': 'Unknown criterion has feedback!'},
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
# The criterion feedback should be ignored
for part_num in range(3):
self.assertEqual(assessment["parts"][part_num]["feedback"], "")
def test_create_huge_overall_feedback_error(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Huge overall feedback text
assessment_dict = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT_HUGE['options_selected'],
dict(),
ASSESSMENT_DICT_HUGE['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
# The assessment feedback text should be truncated
self.assertEqual(len(assessment_dict['feedback']), Assessment.MAXSIZE)
# The length of the feedback text in the database should
# equal what we got from the API.
assessment = Assessment.objects.get()
self.assertEqual(len(assessment.feedback), Assessment.MAXSIZE)
def test_create_huge_per_criterion_feedback_error(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Huge per-criterion feedback text
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT_HUGE['options_selected'],
ASSESSMENT_DICT_HUGE['criterion_feedback'],
"",
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
# Verify that the feedback has been truncated
for part in assessment['parts']:
self.assertEqual(len(part['feedback']), Assessment.MAXSIZE)
# Verify that the feedback in the database matches what we got back from the API
for part in AssessmentPart.objects.all():
self.assertEqual(len(part.feedback), Assessment.MAXSIZE)
@file_data('valid_assessments.json') @file_data('valid_assessments.json')
def test_get_assessments(self, assessment_dict): def test_get_assessments(self, assessment_dict):
...@@ -157,7 +264,9 @@ class TestPeerApi(CacheResetTest): ...@@ -157,7 +264,9 @@ class TestPeerApi(CacheResetTest):
peer_api.create_assessment( peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
assessment_dict, assessment_dict['options_selected'],
assessment_dict['criterion_feedback'],
assessment_dict['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -172,7 +281,9 @@ class TestPeerApi(CacheResetTest): ...@@ -172,7 +281,9 @@ class TestPeerApi(CacheResetTest):
peer_api.create_assessment( peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
assessment_dict, assessment_dict['options_selected'],
assessment_dict['criterion_feedback'],
assessment_dict['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
MONDAY, MONDAY,
...@@ -186,7 +297,7 @@ class TestPeerApi(CacheResetTest): ...@@ -186,7 +297,7 @@ class TestPeerApi(CacheResetTest):
Verify unfinished assessments do not get counted when determining a Verify unfinished assessments do not get counted when determining a
complete workflow. complete workflow.
""" """
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer") tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1) sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
...@@ -194,7 +305,11 @@ class TestPeerApi(CacheResetTest): ...@@ -194,7 +305,11 @@ class TestPeerApi(CacheResetTest):
self.assertFalse(finished) self.assertFalse(finished)
self.assertEqual(count, 0) self.assertEqual(count, 0)
peer_api.create_assessment( peer_api.create_assessment(
bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, bob_sub["uuid"], bob["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
1, 1,
) )
finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1) finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
...@@ -229,7 +344,11 @@ class TestPeerApi(CacheResetTest): ...@@ -229,7 +344,11 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(u"Bob's answer", sub['answer']) self.assertEqual(u"Bob's answer", sub['answer'])
peer_api.create_assessment( peer_api.create_assessment(
tim_sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, tim_sub["uuid"], tim["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -245,7 +364,11 @@ class TestPeerApi(CacheResetTest): ...@@ -245,7 +364,11 @@ class TestPeerApi(CacheResetTest):
""" """
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.create_assessment( peer_api.create_assessment(
tim_sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, tim_sub["uuid"], tim["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -274,7 +397,11 @@ class TestPeerApi(CacheResetTest): ...@@ -274,7 +397,11 @@ class TestPeerApi(CacheResetTest):
self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED)) self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
peer_api.create_assessment( peer_api.create_assessment(
tim_sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, tim_sub["uuid"], tim["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -287,21 +414,33 @@ class TestPeerApi(CacheResetTest): ...@@ -287,21 +414,33 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, bob_sub["uuid"], bob["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
sally_sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT, sally_sub["uuid"], sally["student_id"],
ASSESSMENT_DICT_FAIL['options_selected'],
ASSESSMENT_DICT_FAIL['criterion_feedback'],
ASSESSMENT_DICT_FAIL['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED) sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
jim_sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT, jim_sub["uuid"], jim["student_id"],
ASSESSMENT_DICT_PASS['options_selected'],
ASSESSMENT_DICT_PASS['criterion_feedback'],
ASSESSMENT_DICT_PASS['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -351,7 +490,11 @@ class TestPeerApi(CacheResetTest): ...@@ -351,7 +490,11 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(sub["uuid"], tim_sub["uuid"]) self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
jim_sub["uuid"], jim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, jim_sub["uuid"], jim["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -365,12 +508,20 @@ class TestPeerApi(CacheResetTest): ...@@ -365,12 +508,20 @@ class TestPeerApi(CacheResetTest):
self.assertIsNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at) self.assertIsNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at)
peer_api.create_assessment( peer_api.create_assessment(
bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, bob_sub["uuid"], bob["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
peer_api.create_assessment( peer_api.create_assessment(
sally_sub["uuid"], sally["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, sally_sub["uuid"], sally["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -420,29 +571,29 @@ class TestPeerApi(CacheResetTest): ...@@ -420,29 +571,29 @@ class TestPeerApi(CacheResetTest):
"peer": { "peer": {
"must_grade": REQUIRED_GRADED, "must_grade": REQUIRED_GRADED,
"must_be_graded_by": REQUIRED_GRADED_BY, "must_be_graded_by": REQUIRED_GRADED_BY,
} }
} }
# 1) Angel Submits # 1) Angel Submits
angel_sub, angel = self._create_student_and_submission("Angel", "Angel's answer") angel_sub, _ = self._create_student_and_submission("Angel", "Angel's answer")
# 2) Angel waits for peers # 2) Angel waits for peers
sub = peer_api.get_submission_to_assess(angel_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(angel_sub['uuid'], REQUIRED_GRADED_BY)
self.assertIsNone(sub) self.assertIsNone(sub)
# 3) Bob submits # 3) Bob submits
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, _ = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"]) self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 4) Sally submits # 4) Sally submits
sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer") sally_sub, _ = self._create_student_and_submission("Sally", "Sally's answer")
# 5) Sally pulls Angel's Submission but never reviews it. # 5) Sally pulls Angel's Submission but never reviews it.
sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"]) self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 6) Jim submits # 6) Jim submits
jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer") jim_sub, _ = self._create_student_and_submission("Jim", "Jim's answer")
# 7) Jim also doesn't care about Angel and does not bother to review. # 7) Jim also doesn't care about Angel and does not bother to review.
sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY)
...@@ -458,21 +609,32 @@ class TestPeerApi(CacheResetTest): ...@@ -458,21 +609,32 @@ class TestPeerApi(CacheResetTest):
# 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more. # 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
peer_api.create_assessment( peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertIsNone(sub) self.assertIsNone(sub)
...@@ -485,37 +647,44 @@ class TestPeerApi(CacheResetTest): ...@@ -485,37 +647,44 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"]) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
# Tim has met the critera, and should now have a score.
# We patch the call to `self_api.is_complete()` simulate having completed a self-assessment.
# TODO: currently, we need to import `self_api` within the `_is_self_complete` method
# to avoid circular imports. This means we can't patch self_api directly.
from openassessment.workflow.models import AssessmentWorkflow
with patch.object(AssessmentWorkflow, '_is_self_complete') as mock_complete:
mock_complete.return_value = True
score = workflow_api.get_workflow_for_submission(sub["uuid"], requirements)["score"]
# 13) Buffy is waiting in the wings. She pulls Xander's submission and # 13) Buffy is waiting in the wings. She pulls Xander's submission and
# grades it. # grades it.
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"]) self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -526,32 +695,52 @@ class TestPeerApi(CacheResetTest): ...@@ -526,32 +695,52 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"]) self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"]) self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"]) self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"]) self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, spike_sub["uuid"], spike["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -559,7 +748,11 @@ class TestPeerApi(CacheResetTest): ...@@ -559,7 +748,11 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(spike_sub["uuid"], sub["uuid"]) self.assertEquals(spike_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, buffy_sub["uuid"], buffy["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -570,7 +763,11 @@ class TestPeerApi(CacheResetTest): ...@@ -570,7 +763,11 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(willow_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(willow_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
willow_sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, willow_sub["uuid"], willow["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -578,7 +775,11 @@ class TestPeerApi(CacheResetTest): ...@@ -578,7 +775,11 @@ class TestPeerApi(CacheResetTest):
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY) sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"]) self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT, xander_sub["uuid"], xander["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -590,8 +791,8 @@ class TestPeerApi(CacheResetTest): ...@@ -590,8 +791,8 @@ class TestPeerApi(CacheResetTest):
self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements)) self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements))
def test_find_active_assessments(self): def test_find_active_assessments(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer") xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
# Check for a workflow for Buffy. # Check for a workflow for Buffy.
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid']) buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
...@@ -611,18 +812,18 @@ class TestPeerApi(CacheResetTest): ...@@ -611,18 +812,18 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission_uuid) self.assertEqual(xander_answer["uuid"], submission_uuid)
def test_get_workflow_by_uuid(self): def test_get_workflow_by_uuid(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
self._create_student_and_submission("Xander", "Xander's answer") self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer") self._create_student_and_submission("Willow", "Willow's answer")
buffy_answer_two, buffy = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer_two, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer_two['uuid']) workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer_two['uuid'])
self.assertNotEqual(buffy_answer["uuid"], workflow.submission_uuid) self.assertNotEqual(buffy_answer["uuid"], workflow.submission_uuid)
self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid) self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid)
def test_get_submission_for_review(self): def test_get_submission_for_review(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer") xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer") self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid']) buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
...@@ -632,9 +833,9 @@ class TestPeerApi(CacheResetTest): ...@@ -632,9 +833,9 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission_uuid) self.assertEqual(xander_answer["uuid"], submission_uuid)
def test_get_submission_for_over_grading(self): def test_get_submission_for_over_grading(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer") xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
willow_answer, willow = self._create_student_and_submission("Willow", "Willow's answer") willow_answer, _ = self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid']) buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
xander_workflow = peer_api._get_workflow_by_submission_uuid(xander_answer['uuid']) xander_workflow = peer_api._get_workflow_by_submission_uuid(xander_answer['uuid'])
...@@ -647,28 +848,32 @@ class TestPeerApi(CacheResetTest): ...@@ -647,28 +848,32 @@ class TestPeerApi(CacheResetTest):
peer_api._create_peer_workflow_item(buffy_workflow, willow_answer["uuid"]) peer_api._create_peer_workflow_item(buffy_workflow, willow_answer["uuid"])
peer_api._create_peer_workflow_item(xander_workflow, willow_answer["uuid"]) peer_api._create_peer_workflow_item(xander_workflow, willow_answer["uuid"])
#Get the next submission for review # Get the next submission for review
submission_uuid = peer_api._get_submission_for_over_grading(xander_workflow) submission_uuid = peer_api._get_submission_for_over_grading(xander_workflow)
if not (buffy_answer["uuid"] == submission_uuid or willow_answer["uuid"] == submission_uuid): if not (buffy_answer["uuid"] == submission_uuid or willow_answer["uuid"] == submission_uuid):
self.fail("Submission was not Buffy or Willow's.") self.fail("Submission was not Buffy or Willow's.")
def test_create_assessment_feedback(self): def test_create_feedback_on_an_assessment(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer") tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1) peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
sub = peer_api.get_submission_to_assess(tim_sub['uuid'], 1) peer_api.get_submission_to_assess(tim_sub['uuid'], 1)
peer_api.create_assessment( peer_api.create_assessment(
tim_sub["uuid"], tim_sub["uuid"],
tim["student_id"], tim["student_id"],
ASSESSMENT_DICT, ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
...@@ -702,8 +907,8 @@ class TestPeerApi(CacheResetTest): ...@@ -702,8 +907,8 @@ class TestPeerApi(CacheResetTest):
self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"]) self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
def test_close_active_assessment(self): def test_close_active_assessment(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer") xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
# Create a workflow for Buffy. # Create a workflow for Buffy.
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid']) buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
...@@ -714,7 +919,11 @@ class TestPeerApi(CacheResetTest): ...@@ -714,7 +919,11 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission["uuid"]) self.assertEqual(xander_answer["uuid"], submission["uuid"])
assessment_dict = peer_api.create_assessment( assessment_dict = peer_api.create_assessment(
buffy_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT, buffy_answer["uuid"], "Buffy",
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
assessment = Assessment.objects.filter( assessment = Assessment.objects.filter(
...@@ -729,7 +938,7 @@ class TestPeerApi(CacheResetTest): ...@@ -729,7 +938,7 @@ class TestPeerApi(CacheResetTest):
@patch.object(PeerWorkflow.objects, 'raw') @patch.object(PeerWorkflow.objects, 'raw')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_failure_to_get_review_submission(self, mock_filter): def test_failure_to_get_review_submission(self, mock_filter):
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
tim_workflow = peer_api._get_workflow_by_submission_uuid(tim_answer['uuid']) tim_workflow = peer_api._get_workflow_by_submission_uuid(tim_answer['uuid'])
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
peer_api._get_submission_for_review(tim_workflow, 3) peer_api._get_submission_for_review(tim_workflow, 3)
...@@ -745,14 +954,14 @@ class TestPeerApi(CacheResetTest): ...@@ -745,14 +954,14 @@ class TestPeerApi(CacheResetTest):
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error(self, mock_filter): def test_set_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback({'submission_uuid': tim_answer['uuid']}) peer_api.set_assessment_feedback({'submission_uuid': tim_answer['uuid']})
@patch.object(AssessmentFeedback, 'save') @patch.object(AssessmentFeedback, 'save')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error_on_save(self, mock_filter): def test_set_assessment_feedback_error_on_save(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback( peer_api.set_assessment_feedback(
{ {
'submission_uuid': tim_answer['uuid'], 'submission_uuid': tim_answer['uuid'],
...@@ -763,7 +972,7 @@ class TestPeerApi(CacheResetTest): ...@@ -763,7 +972,7 @@ class TestPeerApi(CacheResetTest):
@patch.object(AssessmentFeedback, 'save') @patch.object(AssessmentFeedback, 'save')
@raises(peer_api.PeerAssessmentRequestError) @raises(peer_api.PeerAssessmentRequestError)
def test_set_assessment_feedback_error_on_huge_save(self, mock_filter): def test_set_assessment_feedback_error_on_huge_save(self, mock_filter):
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback( peer_api.set_assessment_feedback(
{ {
'submission_uuid': tim_answer['uuid'], 'submission_uuid': tim_answer['uuid'],
...@@ -775,7 +984,7 @@ class TestPeerApi(CacheResetTest): ...@@ -775,7 +984,7 @@ class TestPeerApi(CacheResetTest):
@raises(peer_api.PeerAssessmentWorkflowError) @raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter): def test_failure_to_get_latest_workflow(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api._get_workflow_by_submission_uuid(tim_answer['uuid']) peer_api._get_workflow_by_submission_uuid(tim_answer['uuid'])
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch.object(PeerWorkflow.objects, 'get_or_create')
...@@ -815,15 +1024,13 @@ class TestPeerApi(CacheResetTest): ...@@ -815,15 +1024,13 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1) sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"], bob["student_id"],
bob["student_id"], ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT, ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
1 1
) )
self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
max_scores = peer_api.get_rubric_max_scores(sub["uuid"]) max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
self.assertEqual(max_scores['secret'], 1) self.assertEqual(max_scores['secret'], 1)
...@@ -834,9 +1041,10 @@ class TestPeerApi(CacheResetTest): ...@@ -834,9 +1041,10 @@ class TestPeerApi(CacheResetTest):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.create_assessment( peer_api.create_assessment(
bob_sub['uuid'], bob_sub['uuid'], bob['student_id'],
bob['student_id'], ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT, ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
1 1
) )
...@@ -869,9 +1077,10 @@ class TestPeerApi(CacheResetTest): ...@@ -869,9 +1077,10 @@ class TestPeerApi(CacheResetTest):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_peer_workflow(submission["uuid"]) peer_api.create_peer_workflow(submission["uuid"])
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"], STUDENT_ITEM["student_id"],
STUDENT_ITEM["student_id"], ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT, ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
MONDAY, MONDAY,
...@@ -884,9 +1093,10 @@ class TestPeerApi(CacheResetTest): ...@@ -884,9 +1093,10 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3) sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
peer_api.create_assessment( peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"], bob["student_id"],
bob["student_id"], ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT, ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
MONDAY, MONDAY,
...@@ -913,7 +1123,9 @@ class TestPeerApi(CacheResetTest): ...@@ -913,7 +1123,9 @@ class TestPeerApi(CacheResetTest):
peer_api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
"another_student", "another_student",
ASSESSMENT_DICT, ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
MONDAY, MONDAY,
......
{ {
"unicode_evaluation": { "unicode_evaluation": {
"feedback": "这是中国", "overall_feedback": "这是中国",
"criterion_feedback": {},
"options_selected": { "options_selected": {
"secret": "yes", "secret": "yes",
"ⓢⓐⓕⓔ": "no", "ⓢⓐⓕⓔ": "no",
...@@ -9,7 +10,8 @@ ...@@ -9,7 +10,8 @@
} }
}, },
"basic_evaluation": { "basic_evaluation": {
"feedback": "Your submission was thrilling.", "overall_feedback": "Your submission was thrilling.",
"criterion_feedback": {},
"options_selected": { "options_selected": {
"secret": "yes", "secret": "yes",
"ⓢⓐⓕⓔ": "no", "ⓢⓐⓕⓔ": "no",
...@@ -17,4 +19,4 @@ ...@@ -17,4 +19,4 @@
"singing": "no" "singing": "no"
} }
} }
} }
\ No newline at end of file
...@@ -93,14 +93,10 @@ class Command(BaseCommand): ...@@ -93,14 +93,10 @@ class Command(BaseCommand):
peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid) peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)
# Create the peer assessment # Create the peer assessment
assessment = {
'options_selected': options_selected,
'feedback': " ".join(loremipsum.get_paragraphs(2))
}
peer_api.create_assessment( peer_api.create_assessment(
scorer_submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
assessment, options_selected, {}, " ".join(loremipsum.get_paragraphs(2)),
rubric, rubric,
self.NUM_PEER_ASSESSMENTS self.NUM_PEER_ASSESSMENTS
) )
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
{% with peer_num=forloop.counter %} {% with peer_num=forloop.counter %}
{% for part in assessment.parts %} {% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %} {% if part.option.criterion.name == criterion.name %}
<li class="answer peer-assessment--{{ peer_num}}" <li class="answer peer-assessment--{{ peer_num }}"
id="question--{{ criterion_num }}__answer-{{ peer_num }}"> id="question--{{ criterion_num }}__answer-{{ peer_num }}">
<h5 class="answer__title"> <h5 class="answer__title">
<span class="answer__source"> <span class="answer__source">
...@@ -78,6 +78,7 @@ ...@@ -78,6 +78,7 @@
<i class="ico icon-info-sign" <i class="ico icon-info-sign"
title="{% blocktrans with name=part.option.name %}More information about {{ name }}{% endblocktrans %}"></i> title="{% blocktrans with name=part.option.name %}More information about {{ name }}{% endblocktrans %}"></i>
</span> </span>
</span> </span>
</span> </span>
</h5> </h5>
...@@ -114,6 +115,30 @@ ...@@ -114,6 +115,30 @@
</li> </li>
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% if criterion.feedback %}
<li class="answer--feedback ui-toggle-visibility is--collapsed">
<h5 class="answer--feedback__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span>
</h5>
<ul class="answer--feedback__content ui-toggle-visibility__content">
{% for feedback in criterion.feedback %}
<li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source">
{% trans "Peer" %} {{ forloop.counter }}
</h6>
<div class="feedback__value">
{{ feedback }}
</div>
</li>
{% endfor %}
</ul>
</li>
{% endif %}
</ul> </ul>
</li> </li>
{% endwith %} {% endwith %}
...@@ -221,7 +246,13 @@ ...@@ -221,7 +246,13 @@
</li> </li>
<li class="field field--textarea feedback__remarks" id="feedback__remarks"> <li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">{% trans "Please provide any feedback on the grade or comments that you received from your peers." %}</label> <label for="feedback__remarks__value">{% trans "Please provide any feedback on the grade or comments that you received from your peers." %}</label>
<textarea id="feedback__remarks__value" placeholder="{% trans "I feel the feedback I received was..." %}">{{ feedback_text }}</textarea> <textarea
id="feedback__remarks__value"
placeholder="{% trans "I feel the feedback I received was..." %}"
maxlength="100000"
>
{{ feedback_text }}
</textarea>
</li> </li>
</ol> </ol>
......
...@@ -94,10 +94,26 @@ ...@@ -94,10 +94,26 @@
</div> </div>
<div class="wrapper--metadata"> <div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span> <span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="answer__points__label">{% trans "points" %}</span></span> <span class="answer__points">{{ option.points }} <span class="answer__points__label">{% trans "points" %}</span></span>
</div> </div>
</li> </li>
{% endfor %} {% endfor %}
{% if criterion.feedback == 'optional' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
>
</textarea>
</div>
</li>
{% endif %}
</ol> </ol>
</div> </div>
</li> </li>
...@@ -108,7 +124,12 @@ ...@@ -108,7 +124,12 @@
</label> </label>
<div class="wrapper--input"> <div class="wrapper--input">
<textarea id="assessment__rubric__question--feedback__value" placeholder="{% trans "I noticed that this response..." %}"></textarea> <textarea
id="assessment__rubric__question--feedback__value"
placeholder="{% trans "I noticed that this response..." %}"
maxlength="500"
>
</textarea>
</div> </div>
</li> </li>
</ol> </ol>
......
...@@ -80,7 +80,23 @@ ...@@ -80,7 +80,23 @@
</div> </div>
</li> </li>
{% endfor %} {% endfor %}
</ol>
{% if criterion.feedback == 'optional' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
>
</textarea>
</div>
</li>
{% endif %}
</ol>
</div> </div>
</li> </li>
{% endfor %} {% endfor %}
...@@ -90,7 +106,12 @@ ...@@ -90,7 +106,12 @@
<span class="question__title__copy">{{ rubric_feedback_prompt }}</span> <span class="question__title__copy">{{ rubric_feedback_prompt }}</span>
</label> </label>
<div class="wrapper--input"> <div class="wrapper--input">
<textarea id="assessment__rubric__question--feedback__value" placeholder="{% trans "I noticed that this response..." %}"></textarea> <textarea
id="assessment__rubric__question--feedback__value"
placeholder="{% trans "I noticed that this response..." %}"
maxlength="500"
>
</textarea>
</div> </div>
</li> </li>
</ol> </ol>
......
...@@ -58,7 +58,13 @@ ...@@ -58,7 +58,13 @@
<ol class="list list--fields response__submission__content"> <ol class="list list--fields response__submission__content">
<li class="field field--textarea submission__answer" id="submission__answer"> <li class="field field--textarea submission__answer" id="submission__answer">
<label class="sr" for="submission__answer__value">{% trans "Enter your response to the question." %}</label> <label class="sr" for="submission__answer__value">{% trans "Enter your response to the question." %}</label>
<textarea id="submission__answer__value" placeholder="">{{ saved_response }}</textarea> <textarea
id="submission__answer__value"
placeholder=""
maxlength="100000"
>
{{ saved_response }}
</textarea>
<span class="tip">{% trans "You may continue to work on your response until you submit it." %}</span> <span class="tip">{% trans "You may continue to work on your response until you submit it." %}</span>
</li> </li>
</ol> </ol>
......
...@@ -17,6 +17,7 @@ DEFAULT_RUBRIC_CRITERIA = [ ...@@ -17,6 +17,7 @@ DEFAULT_RUBRIC_CRITERIA = [
'name': "Ideas", 'name': "Ideas",
'prompt': "Determine if there is a unifying theme or main idea.", 'prompt': "Determine if there is a unifying theme or main idea.",
'order_num': 0, 'order_num': 0,
'feedback': 'optional',
'options': [ 'options': [
{ {
'order_num': 0, 'points': 0, 'name': 'Poor', 'order_num': 0, 'points': 0, 'name': 'Poor',
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
Grade step in the OpenAssessment XBlock. Grade step in the OpenAssessment XBlock.
""" """
import copy import copy
from collections import defaultdict
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
...@@ -88,7 +89,7 @@ class GradeMixin(object): ...@@ -88,7 +89,7 @@ class GradeMixin(object):
'student_submission': student_submission, 'student_submission': student_submission,
'peer_assessments': peer_assessments, 'peer_assessments': peer_assessments,
'self_assessment': self_assessment, 'self_assessment': self_assessment,
'rubric_criteria': copy.deepcopy(self.rubric_criteria), 'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
'has_submitted_feedback': has_submitted_feedback, 'has_submitted_feedback': has_submitted_feedback,
} }
...@@ -161,3 +162,44 @@ class GradeMixin(object): ...@@ -161,3 +162,44 @@ class GradeMixin(object):
} }
) )
return {'success': True, 'msg': _(u"Feedback saved.")} return {'success': True, 'msg': _(u"Feedback saved.")}
def _rubric_criteria_with_feedback(self, peer_assessments):
"""
Add per-criterion feedback from peer assessments to the rubric criteria.
Filters out empty feedback.
Args:
peer_assessments (list of dict): Serialized assessment models from the peer API.
Returns:
list of criterion dictionaries
Example:
[
{
'name': 'Test name',
'prompt': 'Test prompt',
'order_num': 2,
'options': [...]
'feedback': [
'Good job!',
'Excellent work!',
]
},
...
]
"""
criteria = copy.deepcopy(self.rubric_criteria)
criteria_feedback = defaultdict(list)
for assessment in peer_assessments:
for part in assessment['parts']:
if part['feedback']:
part_criterion_name = part['option']['criterion']['name']
criteria_feedback[part_criterion_name].append(part['feedback'])
for criterion in criteria:
criterion_name = criterion['name']
criterion['feedback'] = criteria_feedback[criterion_name]
return criteria
...@@ -45,54 +45,35 @@ class PeerAssessmentMixin(object): ...@@ -45,54 +45,35 @@ class PeerAssessmentMixin(object):
""" """
# Validate the request # Validate the request
if 'feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback in the assessment')}
if 'options_selected' not in data: if 'options_selected' not in data:
return {'success': False, 'msg': _('Must provide options selected in the assessment')} return {'success': False, 'msg': _('Must provide options selected in the assessment')}
if 'overall_feedback' not in data:
return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}
assessment_ui_model = self.get_assessment_module('peer-assessment') assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model: if assessment_ui_model:
rubric_dict = { rubric_dict = {
'criteria': self.rubric_criteria 'criteria': self.rubric_criteria
} }
assessment_dict = {
"feedback": data['feedback'],
"options_selected": data["options_selected"],
}
try: try:
# Create the assessment
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
self.submission_uuid, self.submission_uuid,
self.get_student_item_dict()["student_id"], self.get_student_item_dict()["student_id"],
assessment_dict, data['options_selected'],
self._clean_criterion_feedback(data['criterion_feedback']),
data['overall_feedback'],
rubric_dict, rubric_dict,
assessment_ui_model['must_be_graded_by'] assessment_ui_model['must_be_graded_by']
) )
# Emit analytics event... # Emit analytics event...
self.runtime.publish( self._publish_peer_assessment_event(assessment)
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"]
}
}
for part in assessment["parts"]
]
}
)
except PeerAssessmentRequestError as ex: except PeerAssessmentRequestError as ex:
return {'success': False, 'msg': ex.message} return {'success': False, 'msg': ex.message}
except PeerAssessmentInternalError as ex: except PeerAssessmentInternalError as ex:
...@@ -258,3 +239,58 @@ class PeerAssessmentMixin(object): ...@@ -258,3 +239,58 @@ class PeerAssessmentMixin(object):
logger.exception(err) logger.exception(err)
return peer_submission return peer_submission
def _publish_peer_assessment_event(self, assessment):
"""
Emit an analytics event for the peer assessment.
Args:
assessment (dict): The serialized assessment model.
Returns:
None
"""
self.runtime.publish(
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"],
},
"feedback": part["feedback"],
}
for part in assessment["parts"]
]
}
)
def _clean_criterion_feedback(self, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') == 'optional'
}
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -55,8 +55,76 @@ ...@@ -55,8 +55,76 @@
{ {
"template": "openassessmentblock/peer/oa_peer_assessment.html", "template": "openassessmentblock/peer/oa_peer_assessment.html",
"context": { "context": {
"rubric_criteria": [], "rubric_criteria": [
"peer_submission": {} {
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
}
]
}, },
"output": "oa_peer_assessment.html" "output": "oa_peer_assessment.html"
}, },
......
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.renderPeerAssessmentStep();this.renderSelfAssessmentStep();this.gradeView.load();courseStaffDebug=$(".wrapper--staff-info");if(courseStaffDebug.length>0){this.setUpCollapseExpand(courseStaffDebug,function(){})}},renderPeerAssessmentStep:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",view.element);view.setUpCollapseExpand(sel,$.proxy(view.renderContinuedPeerAssessmentStep,view));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.peerAssess()})}).fail(function(errMsg){view.showLoadError("peer-assessment")})},renderContinuedPeerAssessmentStep:function(){var view=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",view.element);view.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)})}).fail(function(errMsg){view.showLoadError("peer-assessment")})},renderSelfAssessmentStep:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);var sel=$("#openassessment__self-assessment",view.element);view.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",view.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})}).fail(function(errMsg){view.showLoadError("self-assessment")})},peerSubmitEnabled:function(enabled){var button=$("#peer-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},selfSubmitEnabled:function(enabled){var button=$("#self-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},peerAssess:function(){var view=this;this.peerAssessRequest(function(){view.renderPeerAssessmentStep();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()})},continuedPeerAssess:function(){var view=this;view.peerAssessRequest(function(){view.renderContinuedPeerAssessmentStep();view.gradeView.load()})},peerAssessRequest:function(successFunction){var optionsSelected={};$("#peer-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var feedback=$("#assessment__rubric__question--feedback__value",this.element).val();var view=this;view.toggleActionError("peer",null);view.peerSubmitEnabled(false);this.server.peerAssess(optionsSelected,feedback).done(successFunction).fail(function(errMsg){view.toggleActionError("peer",errMsg);view.peerSubmitEnabled(true)})},selfAssess:function(){var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var view=this;view.toggleActionError("self",null);view.selfSubmitEnabled(false);this.server.selfAssess(optionsSelected).done(function(){view.renderPeerAssessmentStep();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()}).fail(function(errMsg){view.toggleActionError("self",errMsg);view.selfSubmitEnabled(true)})},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html(gettext("Unable to Load"))},getStepActionsErrorMessage:function(){return $(".step__actions .message__content").html()}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg=gettext("This problem has already been released. Any changes will apply only to future assessments.");if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse=""};OpenAssessment.ResponseView.prototype={load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.responseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{var label=gettext("Status of Your Response");sel.html('<span class="sr">'+label+":"+"</span>\n"+msg)}},unsavedWarningEnabled:function(enabled){if(typeof enabled==="undefined"){return window.onbeforeunload!==null}else{if(enabled){window.onbeforeunload=function(){return"If you leave this page without saving or submitting your response, "+"you'll lose any work you've done on the response."}}else{window.onbeforeunload=null}}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var isBlank=currentResponse!=="";this.submitEnabled(isBlank);if($.trim(this.savedResponse)!==currentResponse){this.saveEnabled(isBlank);this.saveStatus(gettext("This response has not been saved."));this.unsavedWarningEnabled(true)}},save:function(){this.saveStatus(gettext("Saving..."));this.baseView.toggleActionError("save",null);this.unsavedWarningEnabled(false);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus(gettext("This response has been saved but not submitted."))}}).fail(function(errMsg){view.saveStatus(gettext("Error"));view.baseView.toggleActionError("save",errMsg)})},submit:function(){this.submitEnabled(false);var view=this;var baseView=this.baseView;this.confirmSubmission().pipe(function(){var submission=$("#submission__answer__value",view.element).val();baseView.toggleActionError("response",null);return view.server.submit(submission)}).done($.proxy(view.moveToNextStep,view)).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){view.moveToNextStep()}else{if(errMsg){baseView.toggleActionError("submit",errMsg)}view.submitEnabled(true)}})},moveToNextStep:function(){this.load();this.baseView.renderPeerAssessmentStep();this.baseView.gradeView.load();this.unsavedWarningEnabled(false)},confirmSubmission:function(){var msg="You're about to submit your response for this assignment. "+"After you submit this response, you can't change it or submit a new response.";return $.Deferred(function(defer){if(confirm(msg)){defer.resolve()}else{defer.reject()}})}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},maxInputSize:1024*64,render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},submit:function(submission){var url=this.url("submit");if(submission.length>this.maxInputSize){return $.Deferred(function(defer){var errorMsg=gettext("This response is too long. Please shorten the response and try to submit it again.");defer.rejectWith(this,["submit",errorMsg])}).promise()}return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX",gettext("This response could not be submitted.")])})}).promise()},save:function(submission){var url=this.url("save_submission");if(submission.length>this.maxInputSize){return $.Deferred(function(defer){var errorMsg=gettext("This response is too long. Please shorten the response and try to save it again.");defer.rejectWith(this,[errorMsg])}).promise()}return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This response could not be saved.")])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");if(text.length>this.maxInputSize){return $.Deferred(function(defer){var errorMsg=gettext("This feedback is too long. Please shorten your feedback and try to submit it again.");defer.rejectWith(this,[errorMsg])}).promise()}var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This feedback could not be submitted.")])})}).promise()},peerAssess:function(optionsSelected,feedback){var url=this.url("peer_assess");if(feedback.length>this.maxInputSize){return $.Deferred(function(defer){var errorMsg=gettext("The comments on this assessment are too long. Please shorten your comments and try to submit them again.");defer.rejectWith(this,[errorMsg])}).promise()}var payload=JSON.stringify({options_selected:optionsSelected,feedback:feedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be loaded.")])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be saved.")])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("The server could not be contacted.")])})}).promise()}}; if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.peerView=new OpenAssessment.PeerView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.peerView.load();this.renderSelfAssessmentStep();this.gradeView.load();courseStaffDebug=$(".wrapper--staff-info");if(courseStaffDebug.length>0){this.setUpCollapseExpand(courseStaffDebug,function(){})}},renderSelfAssessmentStep:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);var sel=$("#openassessment__self-assessment",view.element);view.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",view.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})}).fail(function(errMsg){view.showLoadError("self-assessment")})},selfSubmitEnabled:function(enabled){var button=$("#self-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},selfAssess:function(){var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var view=this;view.toggleActionError("self",null);view.selfSubmitEnabled(false);this.server.selfAssess(optionsSelected).done(function(){view.peerView.load();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()}).fail(function(errMsg){view.toggleActionError("self",errMsg);view.selfSubmitEnabled(true)})},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html(gettext("Unable to Load"))},getStepActionsErrorMessage:function(){return $(".step__actions .message__content").html()}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()})}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg=gettext("This problem has already been released. Any changes will apply only to future assessments.");if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()})}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};OpenAssessment.PeerView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.PeerView.prototype={load:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.showLoadError("peer-assessment")})},loadContinuedAssessment:function(){var view=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);view.installHandlersForContinuedAssessment()}).fail(function(errMsg){view.showLoadError("peer-assessment")})},installHandlers:function(){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel,$.proxy(view.loadContinuedAssessment,view));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;view.peerSubmitEnabled(numChecked==numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.peerAssess()})},installHandlersForContinuedAssessment:function(){var sel=$("#openassessment__peer-assessment",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;view.peerSubmitEnabled(numChecked==numAvailable)})},peerSubmitEnabled:function(enabled){var button=$("#peer-assessment--001__assessment__submit",this.element);if(typeof enabled==="undefined"){return!button.hasClass("is--disabled")}else{button.toggleClass("is--disabled",!enabled)}},peerAssess:function(){var view=this;var baseView=view.baseView;this.peerAssessRequest(function(){view.load();baseView.renderSelfAssessmentStep();baseView.gradeView.load();baseView.scrollToTop()})},continuedPeerAssess:function(){var view=this;var gradeView=this.baseView.gradeView;var baseView=view.baseView;view.peerAssessRequest(function(){view.loadContinuedAssessment();gradeView.load();baseView.scrollToTop()})},overallFeedback:function(overallFeedback){var selector="#assessment__rubric__question--feedback__value";if(typeof overallFeedback==="undefined"){return $(selector,this.element).val()}else{$(selector,this.element).val(overallFeedback)}},criterionFeedback:function(criterionFeedback){var selector="#peer-assessment--001__assessment textarea.answer__value";var feedback={};$(selector,this.element).each(function(index,sel){if(typeof criterionFeedback!=="undefined"){$(sel).val(criterionFeedback[sel.name]);feedback[sel.name]=criterionFeedback[sel.name]}else{feedback[sel.name]=$(sel).val()}});return feedback},optionsSelected:function(optionsSelected){var selector="#peer-assessment--001__assessment input[type=radio]";if(typeof optionsSelected==="undefined"){var options={};$(selector+":checked",this.element).each(function(index,sel){options[sel.name]=sel.value});return options}else{$(selector,this.element).prop("checked",false);$(selector,this.element).each(function(index,sel){if(optionsSelected.hasOwnProperty(sel.name)){if(sel.value==optionsSelected[sel.name]){$(sel).prop("checked",true)}}})}},peerAssessRequest:function(successFunction){var view=this;view.baseView.toggleActionError("peer",null);view.peerSubmitEnabled(false);this.server.peerAssess(this.optionsSelected(),this.criterionFeedback(),this.overallFeedback()).done(successFunction).fail(function(errMsg){view.baseView.toggleActionError("peer",errMsg);view.peerSubmitEnabled(true)})}};OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse=""};OpenAssessment.ResponseView.prototype={load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.responseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{var label=gettext("Status of Your Response");sel.html('<span class="sr">'+label+":"+"</span>\n"+msg)}},unsavedWarningEnabled:function(enabled){if(typeof enabled==="undefined"){return window.onbeforeunload!==null}else{if(enabled){window.onbeforeunload=function(){return"If you leave this page without saving or submitting your response, "+"you'll lose any work you've done on the response."}}else{window.onbeforeunload=null}}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var isBlank=currentResponse!=="";this.submitEnabled(isBlank);if($.trim(this.savedResponse)!==currentResponse){this.saveEnabled(isBlank);this.saveStatus(gettext("This response has not been saved."));this.unsavedWarningEnabled(true)}},save:function(){this.saveStatus(gettext("Saving..."));this.baseView.toggleActionError("save",null);this.unsavedWarningEnabled(false);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus(gettext("This response has been saved but not submitted."))}}).fail(function(errMsg){view.saveStatus(gettext("Error"));view.baseView.toggleActionError("save",errMsg)})},submit:function(){this.submitEnabled(false);var view=this;var baseView=this.baseView;this.confirmSubmission().pipe(function(){var submission=$("#submission__answer__value",view.element).val();baseView.toggleActionError("response",null);return view.server.submit(submission)}).done($.proxy(view.moveToNextStep,view)).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){view.moveToNextStep()}else{if(errMsg){baseView.toggleActionError("submit",errMsg)}view.submitEnabled(true)}})},moveToNextStep:function(){this.load();this.baseView.peerView.load();this.baseView.gradeView.load();this.unsavedWarningEnabled(false)},confirmSubmission:function(){var msg="You're about to submit your response for this assignment. "+"After you submit this response, you can't change it or submit a new response.";return $.Deferred(function(defer){if(confirm(msg)){defer.resolve()}else{defer.reject()}})}};OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,[gettext("This section could not be loaded.")])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX",gettext("This response could not be submitted.")])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This response could not be saved.")])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This feedback could not be submitted.")])})}).promise()},peerAssess:function(optionsSelected,criterionFeedback,overallFeedback){var url=this.url("peer_assess");var payload=JSON.stringify({options_selected:optionsSelected,criterion_feedback:criterionFeedback,overall_feedback:overallFeedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This assessment could not be submitted.")])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be loaded.")])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("This problem could not be saved.")])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,[gettext("The server could not be contacted.")])})}).promise()}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}if(typeof window.gettext==="undefined"){window.gettext=function(text){return text}}
\ No newline at end of file \ No newline at end of file
...@@ -15,10 +15,6 @@ describe("OpenAssessment.BaseView", function() { ...@@ -15,10 +15,6 @@ describe("OpenAssessment.BaseView", function() {
grade: readFixtures("oa_grade_complete.html") grade: readFixtures("oa_grade_complete.html")
}; };
this.peerAssess = function(optionsSelected, feedback) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
this.selfAssess = function(optionsSelected) { this.selfAssess = function(optionsSelected) {
return $.Deferred(function(defer) { defer.resolve(); }).promise(); return $.Deferred(function(defer) { defer.resolve(); }).promise();
}; };
...@@ -70,14 +66,6 @@ describe("OpenAssessment.BaseView", function() { ...@@ -70,14 +66,6 @@ describe("OpenAssessment.BaseView", function() {
view = new OpenAssessment.BaseView(runtime, el, server); view = new OpenAssessment.BaseView(runtime, el, server);
}); });
it("Sends a peer assessment to the server", function() {
loadSubviews(function() {
spyOn(server, 'peerAssess').andCallThrough();
view.peerAssess();
expect(server.peerAssess).toHaveBeenCalled();
});
});
it("Sends a self assessment to the server", function() { it("Sends a self assessment to the server", function() {
loadSubviews(function() { loadSubviews(function() {
spyOn(server, 'selfAssess').andCallThrough(); spyOn(server, 'selfAssess').andCallThrough();
......
...@@ -20,10 +20,19 @@ describe("OpenAssessment.PeerView", function() { ...@@ -20,10 +20,19 @@ describe("OpenAssessment.PeerView", function() {
return successPromise; return successPromise;
}; };
}; };
// Stub runtime
var runtime = {}; // Stub base view
var StubBaseView = function() {
this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {};
this.renderSelfAssessmentStep = function() {};
this.scrollToTop = function() {};
this.gradeView = { load: function() {} };
};
// Stubs // Stubs
var baseView = null;
var server = null; var server = null;
// View under test // View under test
...@@ -37,12 +46,46 @@ describe("OpenAssessment.PeerView", function() { ...@@ -37,12 +46,46 @@ describe("OpenAssessment.PeerView", function() {
// Create a new stub server // Create a new stub server
server = new StubServer(); server = new StubServer();
// Create the stub base view
baseView = new StubBaseView();
// Create the object under test // Create the object under test
var el = $("#openassessment").get(0); var el = $("#openassessment-base").get(0);
view = new OpenAssessment.BaseView(runtime, el, server); view = new OpenAssessment.PeerView(el, server, baseView);
view.installHandlers();
});
it("Sends a peer assessment to the server", function() {
spyOn(server, 'peerAssess').andCallThrough();
// Select options in the rubric
var optionsSelected = {};
optionsSelected['Criterion 1'] = 'Poor';
optionsSelected['Criterion 2'] = 'Fair';
optionsSelected['Criterion 3'] = 'Good';
view.optionsSelected(optionsSelected);
// Provide per-criterion feedback
var criterionFeedback = {};
criterionFeedback['Criterion 1'] = "You did a fair job";
criterionFeedback['Criterion 3'] = "You did a good job";
view.criterionFeedback(criterionFeedback);
// Provide overall feedback
var overallFeedback = "Good job!";
view.overallFeedback(overallFeedback);
// Submit the peer assessment
view.peerAssess();
// Expect that the peer assessment was sent to the server
// with the options and feedback we selected
expect(server.peerAssess).toHaveBeenCalledWith(
optionsSelected, criterionFeedback, overallFeedback
);
}); });
it("re-enables the peer assess button on error", function() { it("Re-enables the peer assess button on error", function() {
// Simulate a server error // Simulate a server error
spyOn(server, 'peerAssess').andCallFake(function() { spyOn(server, 'peerAssess').andCallFake(function() {
expect(view.peerSubmitEnabled()).toBe(false); expect(view.peerSubmitEnabled()).toBe(false);
...@@ -51,6 +94,7 @@ describe("OpenAssessment.PeerView", function() { ...@@ -51,6 +94,7 @@ describe("OpenAssessment.PeerView", function() {
}).promise(); }).promise();
}); });
view.peerAssess(); view.peerAssess();
// Expect the submit button to have been re-enabled // Expect the submit button to have been re-enabled
expect(view.peerSubmitEnabled()).toBe(true); expect(view.peerSubmitEnabled()).toBe(true);
}); });
......
...@@ -27,13 +27,11 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -27,13 +27,11 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view // Stub base view
var StubBaseView = function() { var StubBaseView = function() {
this.gradeView = { this.peerView = { load: function() {} };
load: function(){} this.gradeView = { load: function() {} };
};
this.showLoadError = function(msg) {}; this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {}; this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {}; this.setUpCollapseExpand = function(sel) {};
this.renderPeerAssessmentStep = function() {};
}; };
// Stubs // Stubs
...@@ -223,14 +221,14 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -223,14 +221,14 @@ describe("OpenAssessment.ResponseView", function() {
}).promise(); }).promise();
}); });
spyOn(view, 'load'); spyOn(view, 'load');
spyOn(baseView, 'renderPeerAssessmentStep'); spyOn(baseView.peerView, 'load');
view.response('Test response'); view.response('Test response');
view.submit(); view.submit();
// Expect the current and next step to have been reloaded // Expect the current and next step to have been reloaded
expect(view.load).toHaveBeenCalled(); expect(view.load).toHaveBeenCalled();
expect(baseView.renderPeerAssessmentStep).toHaveBeenCalled(); expect(baseView.peerView.load).toHaveBeenCalled();
}); });
it("enables the unsaved work warning when the user changes the response text", function() { it("enables the unsaved work warning when the user changes the response text", function() {
......
...@@ -30,13 +30,6 @@ describe("OpenAssessment.Server", function() { ...@@ -30,13 +30,6 @@ describe("OpenAssessment.Server", function() {
); );
}; };
var getHugeTestString = function() {
var testStringSize = server.maxInputSize + 1;
var testString = '';
for (i = 0; i < (testStringSize); i++) { testString += 'x'; }
return testString;
};
beforeEach(function() { beforeEach(function() {
// Create the server // Create the server
// Since the runtime is a stub implementation that ignores the element passed to it, // Since the runtime is a stub implementation that ignores the element passed to it,
...@@ -97,9 +90,10 @@ describe("OpenAssessment.Server", function() { ...@@ -97,9 +90,10 @@ describe("OpenAssessment.Server", function() {
var success = false; var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"}; var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess(options, "Excellent job!").done(function() { var criterionFeedback = {clarity: "This essay was very clear."};
success = true; server.peerAssess(options, criterionFeedback, "Excellent job!").done(
}); function() { success = true; }
);
expect(success).toBe(true); expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({ expect($.ajax).toHaveBeenCalledWith({
...@@ -107,7 +101,8 @@ describe("OpenAssessment.Server", function() { ...@@ -107,7 +101,8 @@ describe("OpenAssessment.Server", function() {
type: "POST", type: "POST",
data: JSON.stringify({ data: JSON.stringify({
options_selected: options, options_selected: options,
feedback: "Excellent job!" criterion_feedback: criterionFeedback,
overall_feedback: "Excellent job!"
}) })
}); });
}); });
...@@ -197,20 +192,6 @@ describe("OpenAssessment.Server", function() { ...@@ -197,20 +192,6 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toContain("This response could not be submitted"); expect(receivedErrorMsg).toContain("This response could not be submitted");
}); });
it("confirms that very long submissions fail with an error without ajax", function() {
var receivedErrorCode = "";
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.submit(testString).fail(
function(errorCode, errorMsg) {
receivedErrorCode = errorCode;
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorCode).toEqual("submit");
expect(receivedErrorMsg).toContain("This response is too long");
});
it("informs the caller of an server error when sending a submission", function() { it("informs the caller of an server error when sending a submission", function() {
stubAjax(true, [false, "ENODATA", "Error occurred!"]); stubAjax(true, [false, "ENODATA", "Error occurred!"]);
...@@ -227,15 +208,6 @@ describe("OpenAssessment.Server", function() { ...@@ -227,15 +208,6 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toEqual("Error occurred!"); expect(receivedErrorMsg).toEqual("Error occurred!");
}); });
it("confirms that very long saves fail with an error without ajax", function() {
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.save(testString).fail(
function(errorMsg) { receivedErrorMsg = errorMsg; }
);
expect(receivedErrorMsg).toContain("This response is too long");
});
it("informs the caller of an AJAX error when saving a submission", function() { it("informs the caller of an AJAX error when saving a submission", function() {
stubAjax(false, null); stubAjax(false, null);
var receivedMsg = null; var receivedMsg = null;
...@@ -301,24 +273,12 @@ describe("OpenAssessment.Server", function() { ...@@ -301,24 +273,12 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error"); expect(receivedMsg).toEqual("Test error");
}); });
it("confirms that very long peer assessments fail with an error without ajax", function() {
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.peerAssess(options, testString).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain("The comments on this assessment are too long");
});
it("informs the caller of a server error when sending a peer assessment", function() { it("informs the caller of a server error when sending a peer assessment", function() {
stubAjax(true, {success:false, msg:'Test error!'}); stubAjax(true, {success:false, msg:'Test error!'});
var receivedMsg = null; var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"}; var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess(options, "Excellent job!").fail(function(msg) { server.peerAssess(options, {}, "Excellent job!").fail(function(msg) {
receivedMsg = msg; receivedMsg = msg;
}); });
...@@ -330,7 +290,7 @@ describe("OpenAssessment.Server", function() { ...@@ -330,7 +290,7 @@ describe("OpenAssessment.Server", function() {
var receivedMsg = null; var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"}; var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess(options, "Excellent job!").fail(function(msg) { server.peerAssess(options, {}, "Excellent job!").fail(function(msg) {
receivedMsg = msg; receivedMsg = msg;
}); });
...@@ -360,18 +320,6 @@ describe("OpenAssessment.Server", function() { ...@@ -360,18 +320,6 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error"); expect(receivedMsg).toEqual("Test error");
}); });
it("confirms that very long assessment feedback fails with an error without ajax", function() {
var options = ["Option 1", "Option 2"];
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.submitFeedbackOnAssessment(testString, options).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain("This feedback is too long");
});
it("informs the caller of an AJAX error when sending feedback on submission", function() { it("informs the caller of an AJAX error when sending feedback on submission", function() {
stubAjax(false, null); stubAjax(false, null);
......
/* JavaScript for student-facing views of Open Assessment XBlock */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/** /**
Interface for student-facing views. Interface for student-facing views.
...@@ -29,6 +15,7 @@ OpenAssessment.BaseView = function(runtime, element, server) { ...@@ -29,6 +15,7 @@ OpenAssessment.BaseView = function(runtime, element, server) {
this.server = server; this.server = server;
this.responseView = new OpenAssessment.ResponseView(this.element, this.server, this); this.responseView = new OpenAssessment.ResponseView(this.element, this.server, this);
this.peerView = new OpenAssessment.PeerView(this.element, this.server, this);
this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this); this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this);
}; };
...@@ -75,7 +62,7 @@ OpenAssessment.BaseView.prototype = { ...@@ -75,7 +62,7 @@ OpenAssessment.BaseView.prototype = {
*/ */
load: function() { load: function() {
this.responseView.load(); this.responseView.load();
this.renderPeerAssessmentStep(); this.peerView.load();
this.renderSelfAssessmentStep(); this.renderSelfAssessmentStep();
this.gradeView.load(); this.gradeView.load();
...@@ -87,93 +74,6 @@ OpenAssessment.BaseView.prototype = { ...@@ -87,93 +74,6 @@ OpenAssessment.BaseView.prototype = {
}, },
/** /**
Render the peer-assessment step.
**/
renderPeerAssessmentStep: function() {
var view = this;
this.server.render('peer_assessment').done(
function(html) {
// Load the HTML
$('#openassessment__peer-assessment', view.element).replaceWith(html);
var sel = $('#openassessment__peer-assessment', view.element);
// Install a click handler for collapse/expand
view.setUpCollapseExpand(sel, $.proxy(view.renderContinuedPeerAssessmentStep, view));
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
$("#peer-assessment--001__assessment__submit", view.element).toggleClass(
'is--disabled', numChecked != numAvailable
);
}
);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.peerAssess();
}
);
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
* Render the peer-assessment step for continued grading. Always renders as
* expanded, since this should be called for an explicit continuation of the
* peer grading process.
*/
renderContinuedPeerAssessmentStep: function() {
var view = this;
this.server.renderContinuedPeer().done(
function(html) {
// Load the HTML
$('#openassessment__peer-assessment', view.element).replaceWith(html);
var sel = $('#openassessment__peer-assessment', view.element);
// Install a click handler for collapse/expand
view.setUpCollapseExpand(sel);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.continuedPeerAssess();
}
);
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
$("#peer-assessment--001__assessment__submit", view.element).toggleClass(
'is--disabled', numChecked != numAvailable
);
}
);
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
Render the self-assessment step. Render the self-assessment step.
**/ **/
renderSelfAssessmentStep: function() { renderSelfAssessmentStep: function() {
...@@ -216,30 +116,6 @@ OpenAssessment.BaseView.prototype = { ...@@ -216,30 +116,6 @@ OpenAssessment.BaseView.prototype = {
}, },
/** /**
Enable/disable the peer assess button button.
Check that whether the peer assess button is enabled.
Args:
enabled (bool): If specified, set the state of the button.
Returns:
bool: Whether the button is enabled.
Examples:
>> view.peerSubmitEnabled(true); // enable the button
>> view.peerSubmitEnabled(); // check whether the button is enabled
>> true
**/
peerSubmitEnabled: function(enabled) {
var button = $('#peer-assessment--001__assessment__submit', this.element);
if (typeof enabled === 'undefined') {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled)
}
},
/**
Enable/disable the self assess button. Enable/disable the self assess button.
Check that whether the self assess button is enabled. Check that whether the self assess button is enabled.
...@@ -259,68 +135,11 @@ OpenAssessment.BaseView.prototype = { ...@@ -259,68 +135,11 @@ OpenAssessment.BaseView.prototype = {
if (typeof enabled === 'undefined') { if (typeof enabled === 'undefined') {
return !button.hasClass('is--disabled'); return !button.hasClass('is--disabled');
} else { } else {
button.toggleClass('is--disabled', !enabled) button.toggleClass('is--disabled', !enabled);
} }
}, },
/** /**
Send an assessment to the server and update the view.
**/
peerAssess: function() {
var view = this;
this.peerAssessRequest(function() {
view.renderPeerAssessmentStep();
view.renderSelfAssessmentStep();
view.gradeView.load();
view.scrollToTop();
});
},
/**
* Send an assessment to the server and update the view, with the assumption
* that we are continuing peer assessments beyond the required amount.
*/
continuedPeerAssess: function() {
var view = this;
view.peerAssessRequest(function() {
view.renderContinuedPeerAssessmentStep();
view.gradeView.load();
});
},
/**
* Common peer assessment request building, used for all types of peer
* assessments.
*
* Args:
* successFunction (function): The function called if the request is
* successful. This varies based on the type of request to submit
* a peer assessment.
*/
peerAssessRequest: function(successFunction) {
// Retrieve assessment info from the DOM
var optionsSelected = {};
$("#peer-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
optionsSelected[sel.name] = sel.value;
}
);
var feedback = $('#assessment__rubric__question--feedback__value', this.element).val();
// Send the assessment to the server
var view = this;
view.toggleActionError('peer', null);
view.peerSubmitEnabled(false);
this.server.peerAssess(optionsSelected, feedback).done(
successFunction
).fail(function(errMsg) {
view.toggleActionError('peer', errMsg);
view.peerSubmitEnabled(true);
});
},
/**
Send a self-assessment to the server and update the view. Send a self-assessment to the server and update the view.
**/ **/
selfAssess: function() { selfAssess: function() {
...@@ -339,7 +158,7 @@ OpenAssessment.BaseView.prototype = { ...@@ -339,7 +158,7 @@ OpenAssessment.BaseView.prototype = {
this.server.selfAssess(optionsSelected).done( this.server.selfAssess(optionsSelected).done(
function() { function() {
view.renderPeerAssessmentStep(); view.peerView.load();
view.renderSelfAssessmentStep(); view.renderSelfAssessmentStep();
view.gradeView.load(); view.gradeView.load();
view.scrollToTop(); view.scrollToTop();
......
/* JavaScript for Studio editing view of Open Assessment XBlock */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/** /**
Interface for editing view in Studio. Interface for editing view in Studio.
The constructor initializes the DOM for editing. The constructor initializes the DOM for editing.
......
/* JavaScript for grade view */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/** /**
Interface for grade view. Interface for grade view.
......
/**
Interface for peer asssessment view.
Args:
element (DOM element): The DOM element representing the XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
baseView (OpenAssessment.BaseView): Container view.
Returns:
OpenAssessment.PeerView
**/
OpenAssessment.PeerView = function(element, server, baseView) {
this.element = element;
this.server = server;
this.baseView = baseView;
};
OpenAssessment.PeerView.prototype = {
/**
Load the peer assessment view.
**/
load: function() {
var view = this;
this.server.render('peer_assessment').done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__peer-assessment', view.element).replaceWith(html);
view.installHandlers();
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
Load the continued grading version of the view.
This is a version of the peer grading step that a student
can use to continue assessing peers after they've completed
their peer assessment requirements.
**/
loadContinuedAssessment: function() {
var view = this;
this.server.renderContinuedPeer().done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__peer-assessment', view.element).replaceWith(html);
view.installHandlersForContinuedAssessment();
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
Install event handlers for the view.
**/
installHandlers: function() {
var sel = $('#openassessment__peer-assessment', this.element);
var view = this;
// Install a click handler for collapse/expand
this.baseView.setUpCollapseExpand(sel, $.proxy(view.loadContinuedAssessment, view));
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
view.peerSubmitEnabled(numChecked == numAvailable);
}
);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.peerAssess();
}
);
},
/**
Install event handlers for the continued grading version of the view.
**/
installHandlersForContinuedAssessment: function() {
var sel = $('#openassessment__peer-assessment', this.element);
var view = this;
// Install a click handler for collapse/expand
this.baseView.setUpCollapseExpand(sel);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.continuedPeerAssess();
}
);
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
view.peerSubmitEnabled(numChecked == numAvailable);
}
);
},
/**
Enable/disable the peer assess button button.
Check that whether the peer assess button is enabled.
Args:
enabled (bool): If specified, set the state of the button.
Returns:
bool: Whether the button is enabled.
Examples:
>> view.peerSubmitEnabled(true); // enable the button
>> view.peerSubmitEnabled(); // check whether the button is enabled
>> true
**/
peerSubmitEnabled: function(enabled) {
var button = $('#peer-assessment--001__assessment__submit', this.element);
if (typeof enabled === 'undefined') {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled);
}
},
/**
Send an assessment to the server and update the view.
**/
peerAssess: function() {
var view = this;
var baseView = view.baseView;
this.peerAssessRequest(function() {
view.load();
baseView.renderSelfAssessmentStep();
baseView.gradeView.load();
baseView.scrollToTop();
});
},
/**
* Send an assessment to the server and update the view, with the assumption
* that we are continuing peer assessments beyond the required amount.
*/
continuedPeerAssess: function() {
var view = this;
var gradeView = this.baseView.gradeView;
var baseView = view.baseView;
view.peerAssessRequest(function() {
view.loadContinuedAssessment();
gradeView.load();
baseView.scrollToTop();
});
},
/**
Get or set overall feedback on the submission.
Args:
overallFeedback (string or undefined): The overall feedback text (optional).
Returns:
string or undefined
Example usage:
>>> view.overallFeedback('Good job!'); // Set the feedback text
>>> view.overallFeedback(); // Retrieve the feedback text
'Good job!'
**/
overallFeedback: function(overallFeedback) {
var selector = '#assessment__rubric__question--feedback__value';
if (typeof overallFeedback === 'undefined') {
return $(selector, this.element).val();
}
else {
$(selector, this.element).val(overallFeedback);
}
},
/**
Get or set per-criterion feedback.
Args:
criterionFeedback (object literal or undefined):
Map of criterion names to feedback strings.
Returns:
object literal or undefined
Example usage:
>>> view.criterionFeedback({'ideas': 'Good ideas'}); // Set per-criterion feedback
>>> view.criterionFeedback(); // Retrieve criterion feedback
{'ideas': 'Good ideas'}
**/
criterionFeedback: function(criterionFeedback) {
var selector = '#peer-assessment--001__assessment textarea.answer__value';
var feedback = {};
$(selector, this.element).each(
function(index, sel) {
if (typeof criterionFeedback !== 'undefined') {
$(sel).val(criterionFeedback[sel.name]);
feedback[sel.name] = criterionFeedback[sel.name];
}
else {
feedback[sel.name] = $(sel).val();
}
}
);
return feedback;
},
/**
Get or set the options selected in the rubric.
Args:
optionsSelected (object literal or undefined):
Map of criterion names to option values.
Returns:
object literal or undefined
Example usage:
>>> view.optionsSelected({'ideas': 'Good'}); // Set the criterion option
>>> view.optionsSelected(); // Retrieve the options selected
{'ideas': 'Good'}
**/
optionsSelected: function(optionsSelected) {
var selector = "#peer-assessment--001__assessment input[type=radio]";
if (typeof optionsSelected === 'undefined') {
var options = {};
$(selector + ":checked", this.element).each(
function(index, sel) {
options[sel.name] = sel.value;
}
);
return options;
}
else {
// Uncheck all the options
$(selector, this.element).prop('checked', false);
// Check the selected options
$(selector, this.element).each(function(index, sel) {
if (optionsSelected.hasOwnProperty(sel.name)) {
if (sel.value == optionsSelected[sel.name]) {
$(sel).prop('checked', true);
}
}
});
}
},
/**
Common peer assessment request building, used for all types of peer assessments.
Args:
successFunction (function): The function called if the request is
successful. This varies based on the type of request to submit
a peer assessment.
**/
peerAssessRequest: function(successFunction) {
var view = this;
view.baseView.toggleActionError('peer', null);
view.peerSubmitEnabled(false);
// Pull the assessment info from the DOM and send it to the server
this.server.peerAssess(
this.optionsSelected(),
this.criterionFeedback(),
this.overallFeedback()
).done(
successFunction
).fail(function(errMsg) {
view.baseView.toggleActionError('peer', errMsg);
view.peerSubmitEnabled(true);
});
},
};
/* JavaScript for response (submission) view */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/** /**
Interface for response (submission) view. Interface for response (submission) view.
...@@ -305,7 +291,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -305,7 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/ **/
moveToNextStep: function() { moveToNextStep: function() {
this.load(); this.load();
this.baseView.renderPeerAssessmentStep(); this.baseView.peerView.load();
this.baseView.gradeView.load(); this.baseView.gradeView.load();
// Disable the "unsaved changes" warning if the user // Disable the "unsaved changes" warning if the user
......
/* JavaScript interface for interacting with server-side OpenAssessment XBlock */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/** /**
Interface for server-side XBlock handlers. Interface for server-side XBlock handlers.
...@@ -43,11 +29,6 @@ OpenAssessment.Server.prototype = { ...@@ -43,11 +29,6 @@ OpenAssessment.Server.prototype = {
return this.runtime.handlerUrl(this.element, handler); return this.runtime.handlerUrl(this.element, handler);
}, },
/*
* Get maximum size of input
*/
maxInputSize: 1024 * 64, /* 64KB should be enough for anybody, right? ;^P */
/** /**
Render the XBlock. Render the XBlock.
...@@ -123,12 +104,6 @@ OpenAssessment.Server.prototype = { ...@@ -123,12 +104,6 @@ OpenAssessment.Server.prototype = {
**/ **/
submit: function(submission) { submit: function(submission) {
var url = this.url('submit'); var url = this.url('submit');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("This response is too long. Please shorten the response and try to submit it again.");
defer.rejectWith(this, ["submit", errorMsg]);
}).promise();
}
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
$.ajax({ $.ajax({
type: "POST", type: "POST",
...@@ -164,12 +139,6 @@ OpenAssessment.Server.prototype = { ...@@ -164,12 +139,6 @@ OpenAssessment.Server.prototype = {
**/ **/
save: function(submission) { save: function(submission) {
var url = this.url('save_submission'); var url = this.url('save_submission');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("This response is too long. Please shorten the response and try to save it again.");
defer.rejectWith(this, [errorMsg]);
}).promise();
}
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
$.ajax({ $.ajax({
type: "POST", type: "POST",
...@@ -205,12 +174,6 @@ OpenAssessment.Server.prototype = { ...@@ -205,12 +174,6 @@ OpenAssessment.Server.prototype = {
*/ */
submitFeedbackOnAssessment: function(text, options) { submitFeedbackOnAssessment: function(text, options) {
var url = this.url('submit_feedback'); var url = this.url('submit_feedback');
if (text.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("This feedback is too long. Please shorten your feedback and try to submit it again.");
defer.rejectWith(this, [errorMsg]);
}).promise();
}
var payload = JSON.stringify({ var payload = JSON.stringify({
'feedback_text': text, 'feedback_text': text,
'feedback_options': options 'feedback_options': options
...@@ -232,7 +195,9 @@ OpenAssessment.Server.prototype = { ...@@ -232,7 +195,9 @@ OpenAssessment.Server.prototype = {
Args: Args:
optionsSelected (object literal): Keys are criteria names, optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion. values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission. criterionFeedback (object literal): Written feedback on a particular criterion,
where keys are criteria names and values are the feedback strings.
overallFeedback (string): Written feedback on the submission as a whole.
Returns: Returns:
A JQuery promise, which resolves with no args if successful A JQuery promise, which resolves with no args if successful
...@@ -240,24 +205,20 @@ OpenAssessment.Server.prototype = { ...@@ -240,24 +205,20 @@ OpenAssessment.Server.prototype = {
Example: Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" }; var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!"; var criterionFeedback = { clarity: "The essay was very clear." };
server.peerAssess(options, feedback).done( var overallFeedback = "Good job!";
server.peerAssess(options, criterionFeedback, overallFeedback).done(
function() { console.log("Success!"); } function() { console.log("Success!"); }
).fail( ).fail(
function(errorMsg) { console.log(errorMsg); } function(errorMsg) { console.log(errorMsg); }
); );
**/ **/
peerAssess: function(optionsSelected, feedback) { peerAssess: function(optionsSelected, criterionFeedback, overallFeedback) {
var url = this.url('peer_assess'); var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("The comments on this assessment are too long. Please shorten your comments and try to submit them again.");
defer.rejectWith(this, [errorMsg]);
}).promise();
}
var payload = JSON.stringify({ var payload = JSON.stringify({
options_selected: optionsSelected, options_selected: optionsSelected,
feedback: feedback criterion_feedback: criterionFeedback,
overall_feedback: overallFeedback
}); });
return $.Deferred(function(defer) { return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done( $.ajax({ type: "POST", url: url, data: payload }).done(
......
/**
JavaScript shared between all open assessment modules.
WARNING: Don't add anything to this file until you're
absolutely sure there isn't a way to encapsulate it in
an object!
**/
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
...@@ -183,8 +183,9 @@ ...@@ -183,8 +183,9 @@
%ui-rubric-answers { %ui-rubric-answers {
margin-top: $baseline-v; margin-top: $baseline-v;
margin-bottom: $baseline-v; margin-bottom: $baseline-v;
margin-left: ($baseline-h/4);
.answer { .answer, .answer--feedback {
@include row(); @include row();
@extend %wipe-last-child; @extend %wipe-last-child;
margin-bottom: ($baseline-v/2); margin-bottom: ($baseline-v/2);
...@@ -195,25 +196,6 @@ ...@@ -195,25 +196,6 @@
.wrapper--input { .wrapper--input {
margin-bottom: ($baseline-v/4); margin-bottom: ($baseline-v/4);
@include media($bp-ds) {
@include span-columns(6 of 6);
}
@include media($bp-dm) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dl) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dx) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
.answer__value, .answer__label { .answer__value, .answer__label {
display: inline-block; display: inline-block;
vertical-align: middle; vertical-align: middle;
...@@ -317,4 +299,44 @@ ...@@ -317,4 +299,44 @@
margin-left: ($baseline-v/4); margin-left: ($baseline-v/4);
color: $copy-secondary-color; color: $copy-secondary-color;
} }
// ELEM: criterion selects
.answer {
.wrapper--input {
@include media($bp-ds) {
@include span-columns(6 of 6);
}
@include media($bp-dm) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dl) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dx) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
}
}
// ELEM: open text feedback for answer
.answer--feedback {
margin-top: ($baseline-v);
.answer__label {
margin-bottom: ($baseline-v/4);
}
.answer__value {
@extend %ui-content-longanswer;
min-height: ($baseline-v*5);
margin-right: 0;
}
}
} }
...@@ -839,7 +839,7 @@ ...@@ -839,7 +839,7 @@
// individual question // individual question
.question { .question {
margin-bottom: $baseline-v; margin-bottom: ($baseline-v*1.5);
@extend %wipe-last-child; @extend %wipe-last-child;
} }
...@@ -960,22 +960,61 @@ ...@@ -960,22 +960,61 @@
display: block; display: block;
color: $heading-primary-color; color: $heading-primary-color;
} }
}
// open feedback question // criterion-based feedback
.question--feedback { .answer--feedback {
// individual answers @include media($bp-ds) {
.answer { @include span-columns(6 of 6);
@include fill-parent(); }
}
.answer__value { @include media($bp-dm) {
@extend %copy-2; @include span-columns(12 of 12);
} }
@include media($bp-dl) {
@include span-columns(12 of 12);
}
@include media($bp-dx) {
@include span-columns(12 of 12);
}
.answer--feedback__title {
@extend %action-2;
}
.answer--feedback__title__copy {
margin-left: ($baseline-h/4);
}
.answer--feedback__content {
margin-top: ($baseline-v);
}
.feedback {
@extend %no-list;
@extend %wipe-last-child;
margin-bottom: $baseline-v;
}
.feedback__source {
@extend %hd-5;
@extend %t-strong;
@extend %t-titlecase;
display: block;
color: $heading-secondary-color;
}
.feedback__value {
@extend %copy-3;
display: block;
} }
} }
// feedback form
// overall feedback form
.submission__feedback { .submission__feedback {
@extend %ui-subsection; @extend %ui-subsection;
} }
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
Read for conciseness, clarity of thought, and form. Read for conciseness, clarity of thought, and form.
</prompt> </prompt>
<criterion> <criterion feedback='optional'>
<name>concise</name> <name>concise</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
<option points="0"> <option points="0">
...@@ -75,7 +75,7 @@ ...@@ -75,7 +75,7 @@
</explanation> </explanation>
</option> </option>
</criterion> </criterion>
<criterion> <criterion feedback='optional'>
<name>form</name> <name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt> <prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0"> <option points="0">
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion feedback="optional">
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion feedback="optional">
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -639,5 +639,70 @@ ...@@ -639,5 +639,70 @@
"</rubric>", "</rubric>",
"</openassessment>" "</openassessment>"
] ]
},
"criterion_feedback_optional": {
"title": "Foo",
"prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": null,
"due": null
}
],
"expected_xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion feedback=\"optional\">",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<feedbackprompt>Test Feedback Prompt</feedbackprompt>",
"</rubric>",
"</openassessment>"
]
} }
} }
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -89,6 +90,7 @@ ...@@ -89,6 +90,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -143,6 +145,7 @@ ...@@ -143,6 +145,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -197,6 +200,7 @@ ...@@ -197,6 +200,7 @@
"order_num": 0, "order_num": 0,
"name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷", "name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷",
"prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ", "prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -258,6 +262,7 @@ ...@@ -258,6 +262,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -277,6 +282,7 @@ ...@@ -277,6 +282,7 @@
"order_num": 1, "order_num": 1,
"name": "Second criterion", "name": "Second criterion",
"prompt": "Second criterion prompt", "prompt": "Second criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -327,6 +333,7 @@ ...@@ -327,6 +333,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -383,6 +390,7 @@ ...@@ -383,6 +390,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -439,6 +447,7 @@ ...@@ -439,6 +447,7 @@
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
...@@ -464,5 +473,82 @@ ...@@ -464,5 +473,82 @@
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ]
},
"criterion_feedback_optional": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<criterion feedback=\"optional\">",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
} }
} }
...@@ -297,5 +297,26 @@ ...@@ -297,5 +297,26 @@
"</rubric>", "</rubric>",
"</openassessment>" "</openassessment>"
] ]
},
"invalid_criterion_feedback_value": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"1\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion feedback=\"invalid\">",
"<name>Test criterion</name>",
"<prompt>Test prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
} }
} }
...@@ -20,11 +20,18 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -20,11 +20,18 @@ class TestGrade(XBlockHandlerTestCase):
ASSESSMENTS = [ ASSESSMENTS = [
{ {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'}, 'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'єאςєɭɭєภՇ ฬ๏гк!', 'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!'
},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}, },
{ {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'}, 'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'Good job!', 'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Form': u'Peer 2: ƒαιя נσв'
},
'overall_feedback': u'Good job!',
}, },
] ]
...@@ -58,6 +65,37 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -58,6 +65,37 @@ class TestGrade(XBlockHandlerTestCase):
self.assertIn('self', resp.lower()) self.assertIn('self', resp.lower())
self.assertIn('complete', resp.lower()) self.assertIn('complete', resp.lower())
@scenario('data/feedback_per_criterion.xml', user_id='Bernard')
def test_render_grade_feedback_per_criterion(self, xblock):
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Verify that the context for the grade complete page contains the feedback
_, context = xblock.render_grade_complete(xblock.get_workflow_info())
criteria = context['rubric_criteria']
self.assertEqual(criteria[0]['feedback'], [
u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!',
])
self.assertEqual(criteria[1]['feedback'], [u'Peer 2: ƒαιя נσв'])
# The order of the peers in the per-criterion feedback needs
# to match the order of the peer assessments
# We verify this by checking that the first peer assessment
# has the criteria feedback matching the first feedback
# for each criterion.
assessments = context['peer_assessments']
first_peer_feedback = [part['feedback'] for part in assessments[0]['parts']]
self.assertItemsEqual(first_peer_feedback, [u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!', u'Peer 2: ƒαιя נσв'])
# Integration test: verify that the context makes it to the rendered template
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
self.assertIn(u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!', resp.decode('utf-8'))
self.assertIn(u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!', resp.decode('utf-8'))
self.assertIn(u'Peer 2: ƒαιя נσв', resp.decode('utf-8'))
@scenario('data/grade_scenario.xml', user_id='Omar') @scenario('data/grade_scenario.xml', user_id='Omar')
def test_grade_waiting(self, xblock): def test_grade_waiting(self, xblock):
# Waiting to be assessed by a peer # Waiting to be assessed by a peer
...@@ -197,15 +235,23 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -197,15 +235,23 @@ class TestGrade(XBlockHandlerTestCase):
if not waiting_for_peer: if not waiting_for_peer:
peer_api.create_assessment( peer_api.create_assessment(
scorer_sub['uuid'], scorer_name, scorer_sub['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria}, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by'] xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
# Have our user make assessments (so she can get a score) # Have our user make assessments (so she can get a score)
for asmnt in peer_assessments: for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(submission['uuid'], len(peers)) peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria}, submission['uuid'],
student_id,
asmnt['options_selected'],
asmnt['criterion_feedback'],
asmnt['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by'] xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
) )
......
...@@ -15,10 +15,14 @@ from .base import XBlockHandlerTestCase, scenario ...@@ -15,10 +15,14 @@ from .base import XBlockHandlerTestCase, scenario
class TestPeerAssessment(XBlockHandlerTestCase): class TestPeerAssessment(XBlockHandlerTestCase):
"""
Test integration of the OpenAssessment XBlock with the peer assessment API.
"""
ASSESSMENT = { ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'}, 'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'єאςєɭɭєภՇ ฬ๏гк!', 'criterion_feedback': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ı ʇɥonƃɥʇ ʇɥıs ʍɐs ʌǝɹʎ ɔouɔısǝ.'},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
} }
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก' SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
...@@ -42,7 +46,9 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -42,7 +46,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_api.create_assessment( peer_api.create_assessment(
hal_submission['uuid'], hal_submission['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
1 1
) )
...@@ -53,7 +59,9 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -53,7 +59,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_api.create_assessment( peer_api.create_assessment(
sally_submission['uuid'], sally_submission['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
1 1
) )
...@@ -75,104 +83,8 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -75,104 +83,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
"Hal".encode('utf-8') in peer_response.body) "Hal".encode('utf-8') in peer_response.body)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_handler(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_submission['uuid'], 3)
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT)
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent
actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['submission_uuid'], submission['uuid'])
self.assertEqual(actual[0]['points_earned'], 5)
self.assertEqual(actual[0]['points_possible'], 6)
self.assertEqual(actual[0]['scorer_id'], 'Bob')
self.assertEqual(actual[0]['score_type'], 'PE')
self.assertEqual(len(actual[0]['parts']), 2)
parts = sorted(actual[0]['parts'])
self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
self.assertEqual(parts[0]['option']['name'], 'Fair')
self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
self.assertEqual(actual[0]['feedback'], assessment['feedback'])
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_submission_uuid_input_regression(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
another_sub = xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_sub['uuid'], 3)
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT)
# An assessment containing a submission_uuid should not be used in the
# request. This does not exercise any current code, but checks for
# regressions on use of an external submission_uuid.
assessment['submission_uuid'] = "Complete and Random Junk."
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent
actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
self.assertEqual(len(actual), 1)
self.assertNotEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
self.assertEqual(actual[0]['points_earned'], 5)
self.assertEqual(actual[0]['points_possible'], 6)
self.assertEqual(actual[0]['scorer_id'], 'Bob')
self.assertEqual(actual[0]['score_type'], 'PE')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_rubric_option_mismatch(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
# Expect an error response
self.assertFalse(resp['success'])
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock): def test_missing_keys_in_request(self, xblock):
for missing in ['feedback', 'options_selected']: for missing in ['criterion_feedback', 'overall_feedback', 'options_selected']:
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
del assessment[missing] del assessment[missing]
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json') resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
...@@ -216,7 +128,9 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -216,7 +128,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_api.create_assessment( peer_api.create_assessment(
hal_submission['uuid'], hal_submission['uuid'],
hal_student_item['student_id'], hal_student_item['student_id'],
assessment, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
1 1
) )
...@@ -228,7 +142,9 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -228,7 +142,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_api.create_assessment( peer_api.create_assessment(
sally_submission['uuid'], sally_submission['uuid'],
sally_student_item['student_id'], sally_student_item['student_id'],
assessment, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
1 1
) )
...@@ -248,7 +164,9 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -248,7 +164,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], submission['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
1 1
) )
...@@ -263,7 +181,9 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -263,7 +181,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], submission['uuid'],
student_item['student_id'], student_item['student_id'],
assessment, assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}, {'criteria': xblock.rubric_criteria},
1 1
) )
...@@ -571,3 +491,144 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase): ...@@ -571,3 +491,144 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
# Verify that we render without error # Verify that we render without error
resp = self.request(xblock, 'render_peer_assessment', json.dumps({})) resp = self.request(xblock, 'render_peer_assessment', json.dumps({}))
self.assertGreater(len(resp), 0) self.assertGreater(len(resp), 0)
class TestPeerAssessHandler(XBlockHandlerTestCase):
"""
Tests for submitting a peer assessment.
"""
ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ı ʇɥonƃɥʇ ʇɥıs ʍɐs ʌǝɹʎ ɔouɔısǝ.'},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}
ASSESSMENT_WITH_SUBMISSION_UUID = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ı ʇɥonƃɥʇ ʇɥıs ʍɐs ʌǝɹʎ ɔouɔısǝ.'},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
'submission_uuid': "Complete and Random Junk."
}
ASSESSMENT_WITH_INVALID_OPTION = {
'options_selected': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ',
u'Form': u'Fair',
u'invalid': 'not a part of the rubric!'
},
'criterion_feedback': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ı ʇɥonƃɥʇ ʇɥıs ʍɐs ʌǝɹʎ ɔouɔısǝ.'},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_handler(self, xblock):
# Submit a peer assessment
submission_uuid, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT)
# Check that the stored assessment matches what we expect
self.assertEqual(assessment['submission_uuid'], submission_uuid)
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'PE')
self.assertEqual(len(assessment['parts']), 2)
parts = sorted(assessment['parts'])
self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
self.assertEqual(parts[0]['option']['name'], 'Fair')
self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
@scenario('data/feedback_per_criterion.xml', user_id='Bob')
def test_peer_assess_feedback(self, xblock):
# Submit a peer assessment
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT)
# Retrieve the assessment and check the feedback
self.assertEqual(assessment['feedback'], self.ASSESSMENT['overall_feedback'])
for part in assessment['parts']:
part_criterion_name = part['option']['criterion']['name']
expected_feedback = self.ASSESSMENT['criterion_feedback'].get(part_criterion_name, '')
self.assertEqual(part['feedback'], expected_feedback)
@scenario('data/grade_scenario.xml', user_id='Bob')
def test_peer_assess_send_unsolicited_criterion_feedback(self, xblock):
# Submit an assessment containing per-criterion feedback,
# even though the rubric in this scenario has per-criterion feedback disabled.
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT)
# Expect that per-criterion feedback were ignored
for part in assessment['parts']:
self.assertEqual(part['feedback'], '')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_submission_uuid_input_regression(self, xblock):
# Submit a peer assessment
submission_uuid, assessment = self._submit_peer_assessment(
xblock, 'Sally', 'Bob', self.ASSESSMENT_WITH_SUBMISSION_UUID
)
# Retrieve the assessment and check that it matches what we sent
self.assertEqual(assessment['submission_uuid'], submission_uuid)
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'PE')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_rubric_option_mismatch(self, xblock):
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
# Expect a failure response
self._submit_peer_assessment(
xblock, 'Sally', 'Bob', self.ASSESSMENT_WITH_INVALID_OPTION,
expect_failure=True
)
def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False):
"""
Create submissions for a student and scorer, then create a peer assessment
from the scorer.
Args:
xblock (OpenAssessmentBlock)
student_id (unicode): The ID of the student being assessed.
scorer_id (unicode): The ID of the student creating the assessment.
assessment (dict): Serialized assessment model.
Kwargs:
expect_failure (bool): If true, expect a failure response and return None
Returns:
dict: The peer assessment retrieved from the API.
"""
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = student_id
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = scorer_id
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
# Pull the submission to assess
peer_api.get_submission_to_assess(another_submission['uuid'], 3)
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(assessment)
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
if expect_failure:
self.assertFalse(resp['success'])
return None
else:
self.assertTrue(resp['success'])
# Retrieve the peer assessment
retrieved_assessment = peer_api.get_assessments(submission['uuid'], scored_only=False)[0]
return submission['uuid'], retrieved_assessment
...@@ -13,7 +13,6 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -13,7 +13,6 @@ class SaveResponseTest(XBlockHandlerTestCase):
@scenario('data/save_scenario.xml', user_id="Daniels") @scenario('data/save_scenario.xml', user_id="Daniels")
def test_default_saved_response_blank(self, xblock): def test_default_saved_response_blank(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps({})) resp = self.request(xblock, 'render_submission', json.dumps({}))
self.assertIn('<textarea id="submission__answer__value" placeholder=""></textarea>', resp)
self.assertIn('response has not been saved', resp) self.assertIn('response has not been saved', resp)
@ddt.file_data('data/save_responses.json') @ddt.file_data('data/save_responses.json')
...@@ -28,10 +27,7 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -28,10 +27,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
# Reload the submission UI # Reload the submission UI
resp = self.request(xblock, 'render_submission', json.dumps({})) resp = self.request(xblock, 'render_submission', json.dumps({}))
expected_html = u'<textarea id="submission__answer__value" placeholder="">{submitted}</textarea>'.format( self.assertIn(submission_text, resp.decode('utf-8'))
submitted=submission_text
)
self.assertIn(expected_html, resp.decode('utf-8'))
self.assertIn('saved but not submitted', resp.lower()) self.assertIn('saved but not submitted', resp.lower())
@scenario('data/save_scenario.xml', user_id="Valchek") @scenario('data/save_scenario.xml', user_id="Valchek")
......
...@@ -41,6 +41,7 @@ class TestSerializeContent(TestCase): ...@@ -41,6 +41,7 @@ class TestSerializeContent(TestCase):
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
......
...@@ -113,6 +113,11 @@ def _serialize_criteria(criteria_root, criteria_list): ...@@ -113,6 +113,11 @@ def _serialize_criteria(criteria_root, criteria_list):
criterion_prompt = etree.SubElement(criterion_el, 'prompt') criterion_prompt = etree.SubElement(criterion_el, 'prompt')
criterion_prompt.text = unicode(criterion.get('prompt', u'')) criterion_prompt.text = unicode(criterion.get('prompt', u''))
# Criterion feedback disabled or optional
# If disabled, do not set the attribute.
if criterion.get('feedback') == "optional":
criterion_el.set('feedback', 'optional')
# Criterion options # Criterion options
options_list = criterion.get('options', None) options_list = criterion.get('options', None)
if isinstance(options_list, list): if isinstance(options_list, list):
...@@ -261,6 +266,13 @@ def _parse_criteria_xml(criteria_root): ...@@ -261,6 +266,13 @@ def _parse_criteria_xml(criteria_root):
else: else:
raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.')) raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criterion feedback (disabled or optional)
criterion_feedback = criterion.get('feedback', 'disabled')
if criterion_feedback in ['optional', 'disabled']:
criterion_dict['feedback'] = criterion_feedback
else:
raise UpdateFromXmlError(_('Invalid value for "feedback" attribute: if specified, it must be set set to "optional"'))
# Criterion options # Criterion options
criterion_dict['options'] = _parse_options_xml(criterion) criterion_dict['options'] = _parse_options_xml(criterion)
...@@ -308,14 +320,12 @@ def _parse_rubric_xml(rubric_root): ...@@ -308,14 +320,12 @@ def _parse_rubric_xml(rubric_root):
return rubric_dict return rubric_dict
def _parse_assessments_xml(assessments_root, start, due): def _parse_assessments_xml(assessments_root):
""" """
Parse the <assessments> element in the OpenAssessment XBlock's content XML. Parse the <assessments> element in the OpenAssessment XBlock's content XML.
Args: Args:
assessments_root (lxml.etree.Element): The root of the <assessments> node in the tree. assessments_root (lxml.etree.Element): The root of the <assessments> node in the tree.
start (unicode): ISO-formatted date string representing the start time of the problem.
due (unicode): ISO-formatted date string representing the due date of the problem.
Returns: Returns:
list of assessment dicts list of assessment dicts
...@@ -513,7 +523,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR): ...@@ -513,7 +523,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if assessments_el is None: if assessments_el is None:
raise UpdateFromXmlError(_('Every assessment must contain an "assessments" element.')) raise UpdateFromXmlError(_('Every assessment must contain an "assessments" element.'))
else: else:
assessments = _parse_assessments_xml(assessments_el, oa_block.start, oa_block.due) assessments = _parse_assessments_xml(assessments_el)
# Validate # Validate
success, msg = validator(rubric, {'due': submission_due}, assessments) success, msg = validator(rubric, {'due': submission_due}, assessments)
......
...@@ -16,6 +16,7 @@ module.exports = function(config) { ...@@ -16,6 +16,7 @@ module.exports = function(config) {
files: [ files: [
'lib/jquery.min.js', 'lib/jquery.min.js',
'lib/*.js', 'lib/*.js',
'src/oa_shared.js',
'src/*.js', 'src/*.js',
'spec/*.js', 'spec/*.js',
......
...@@ -30,4 +30,4 @@ if [[ -n "$DEBUG_JS" ]]; then ...@@ -30,4 +30,4 @@ if [[ -n "$DEBUG_JS" ]]; then
UGLIFY_EXTRA_ARGS="--beautify" UGLIFY_EXTRA_ARGS="--beautify"
fi fi
node_modules/.bin/uglifyjs $STATIC_JS/src/*.js $UGLIFY_EXTRA_ARGS > "$STATIC_JS/openassessment.min.js" node_modules/.bin/uglifyjs $STATIC_JS/src/oa_shared.js $STATIC_JS/src/*.js $UGLIFY_EXTRA_ARGS > "$STATIC_JS/openassessment.min.js"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment