Commit ce6e0b93 by Joe Blaylock Committed by Will Daly

Allow multiple feedback options to be selected

parent 50583fbc
from django.contrib import admin
from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart, Rubric, Criterion, CriterionOption, PeerWorkflow, PeerWorkflowItem
from openassessment.assessment.models import (
Assessment, AssessmentPart, Rubric,
AssessmentFeedback, AssessmentFeedbackOption,
Criterion, CriterionOption,
PeerWorkflow, PeerWorkflowItem,
)
admin.site.register(Assessment)
admin.site.register(AssessmentPart)
admin.site.register(AssessmentFeedback)
admin.site.register(AssessmentFeedbackOption)
admin.site.register(Rubric)
admin.site.register(Criterion)
admin.site.register(CriterionOption)
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentFeedbackOption'
db.create_table('assessment_assessmentfeedbackoption', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('assessment', ['AssessmentFeedbackOption'])
# Deleting field 'AssessmentFeedback.feedback'
db.delete_column('assessment_assessmentfeedback', 'feedback')
# Deleting field 'AssessmentFeedback.helpfulness'
db.delete_column('assessment_assessmentfeedback', 'helpfulness')
# Adding field 'AssessmentFeedback.feedback_text'
db.add_column('assessment_assessmentfeedback', 'feedback_text',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000),
keep_default=False)
# Adding M2M table for field options on 'AssessmentFeedback'
db.create_table('assessment_assessmentfeedback_options', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('assessmentfeedback', models.ForeignKey(orm['assessment.assessmentfeedback'], null=False)),
('assessmentfeedbackoption', models.ForeignKey(orm['assessment.assessmentfeedbackoption'], null=False))
))
db.create_unique('assessment_assessmentfeedback_options', ['assessmentfeedback_id', 'assessmentfeedbackoption_id'])
def backwards(self, orm):
# Deleting model 'AssessmentFeedbackOption'
db.delete_table('assessment_assessmentfeedbackoption')
# Adding field 'AssessmentFeedback.feedback'
db.add_column('assessment_assessmentfeedback', 'feedback',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000),
keep_default=False)
# Adding field 'AssessmentFeedback.helpfulness'
db.add_column('assessment_assessmentfeedback', 'helpfulness',
self.gf('django.db.models.fields.IntegerField')(default=2),
keep_default=False)
# Deleting field 'AssessmentFeedback.feedback_text'
db.delete_column('assessment_assessmentfeedback', 'feedback_text')
# Removing M2M table for field options on 'AssessmentFeedback'
db.delete_table('assessment_assessmentfeedback_options')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': "orm['assessment.AssessmentFeedbackOption']", 'symmetrical': 'False'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
......@@ -405,19 +405,60 @@ class AssessmentPart(models.Model):
])
class AssessmentFeedbackOption(models.Model):
"""
Option a student can select to provide feedback on the feedback they received.
`AssessmentFeedback` stands in a one-to-many relationship with `AssessmentFeedbackOption`s:
a student can select zero or more `AssessmentFeedbackOption`s when providing feedback.
Over time, we may decide to add, delete, or reword assessment feedback options.
To preserve data integrity, we will always get-or-create `AssessmentFeedbackOption`s
based on the option text.
"""
text = models.CharField(max_length=255, unique=True)
class AssessmentFeedback(models.Model):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
"""
Feedback on feedback. When students receive their grades, they
can provide feedback on how they were assessed, to be reviewed by course staff.
This consists of free-form written feedback
("Please provide any thoughts or comments on the feedback you received from your peers")
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
HELPFULNESS_CHOICES = (
(0, 'These results were not at all helpful'),
(1, 'These results were somewhat helpful'),
(2, 'These results were helpful'),
(3, 'These results were very helpful'),
(4, 'These results were extremely helpful'),
)
helpfulness = models.IntegerField(choices=HELPFULNESS_CHOICES, default=2)
feedback = models.TextField(max_length=10000, default="")
feedback_text = models.TextField(max_length=10000, default="")
options = models.ManyToManyField(AssessmentFeedbackOption, related_name='assessment_feedback', default=None)
def add_options(self, selected_options):
"""
Select feedback options for this assessment.
Students can select zero or more options.
Note: you *must* save the model before calling this method.
Args:
option_text_list (list of unicode): List of options that the user selected.
Raises:
DatabaseError
"""
# First, retrieve options that already exist
options = list(AssessmentFeedbackOption.objects.filter(text__in=selected_options))
# If there are additional options that do not yet exist, create them
new_options = [text for text in selected_options if text not in [opt.text for opt in options]]
for new_option_text in new_options:
options.append(AssessmentFeedbackOption.objects.create(text=new_option_text))
# Add all options to the feedback model
# Note that we've already saved each of the AssessmentFeedbackOption models, so they have primary keys
# (required for adding to a many-to-many relationship)
self.options.add(*options)
class PeerWorkflow(models.Model):
......
......@@ -933,19 +933,17 @@ def _num_peers_graded(workflow):
def get_assessment_feedback(submission_uuid):
"""Retrieve a feedback object for an assessment whether it exists or not.
Gets or creates a new Assessment Feedback model for the given submission.
"""
Retrieve a feedback on an assessment.
Args:
submission_uuid: The submission we want to create assessment feedback
for.
submission_uuid: The submission we want to retrieve assessment feedback for.
Returns:
The assessment feedback object that exists, or a newly created model.
Raises:
PeerAssessmentInternalError: Raised when the AssessmentFeedback cannot
be created or retrieved because of internal exceptions.
dict or None
Raises:
PeerAssessmentInternalError: Error occurred while retrieving the feedback.
"""
try:
feedback = AssessmentFeedback.objects.get(
......@@ -964,46 +962,52 @@ def get_assessment_feedback(submission_uuid):
def set_assessment_feedback(feedback_dict):
"""Set a feedback object for an assessment to have some new values.
"""
Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the
dict.
Sets or updates the assessment feedback with the given values in the dict.
Args:
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
Returns:
The modified or created feedback.
None
Raises:
PeerAssessmentRequestError
PeerAssessmentInternalError
"""
submission_uuid = feedback_dict.get('submission_uuid')
if not submission_uuid:
feedback_text = feedback_dict.get('feedback_text')
selected_options = feedback_dict.get('options', list())
try:
# Get or create the assessment model for this submission
# If we receive an integrity error, assume that someone else is trying to create
# another feedback model for this submission, and raise an exception.
if submission_uuid:
feedback, created = AssessmentFeedback.objects.get_or_create(submission_uuid=submission_uuid)
else:
error_message = u"An error occurred creating assessment feedback: bad or missing submission_uuid."
logger.error(error_message)
raise PeerAssessmentRequestError(error_message)
try:
assessments = PeerWorkflowItem.get_scored_assessments(submission_uuid)
except DatabaseError:
error_message = (
u"An error occurred getting database state to set assessment feedback for {}."
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
feedback = AssessmentFeedbackSerializer(data=feedback_dict)
if not feedback.is_valid():
raise PeerAssessmentRequestError(feedback.errors)
try:
feedback_model = feedback.save()
# Assessments associated with feedback must be saved after the row is
# committed to the database in order to associated the PKs across both
# tables.
feedback_model.assessments.add(*assessments)
# Update the feedback text
if feedback_text is not None:
feedback.feedback_text = feedback_text
# Save the feedback model. We need to do this before setting m2m relations.
if created or feedback_text is not None:
feedback.save()
# Associate the feedback with selected options
feedback.add_options(selected_options)
# Associate the feedback with scored assessments
assessments = PeerWorkflowItem.get_scored_assessments(submission_uuid)
feedback.assessments.add(*assessments)
except DatabaseError:
error_message = (
u"An error occurred saving assessment feedback for {}."
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
return feedback.data
msg = u"Error occurred while creating or updating feedback on assessment: {}".format(feedback_dict)
logger.exception(msg)
raise PeerAssessmentInternalError(msg)
......@@ -10,8 +10,10 @@ from django.core.cache import cache
from django.utils.translation import ugettext as _
from rest_framework import serializers
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart, Criterion, CriterionOption, Rubric,
PeerWorkflowItem, PeerWorkflow)
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric,
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflowItem, PeerWorkflow
)
logger = logging.getLogger(__name__)
......@@ -299,15 +301,26 @@ def rubric_from_dict(rubric_dict):
return rubric
class AssessmentFeedbackOptionSerializer(serializers.ModelSerializer):
"""
Serialize an `AssessmentFeedbackOption` model.
"""
class Meta:
model = AssessmentFeedbackOption
fields = ('text',)
class AssessmentFeedbackSerializer(serializers.ModelSerializer):
submission_uuid = serializers.CharField(source='submission_uuid')
helpfulness = serializers.IntegerField(source='helpfulness')
feedback = serializers.CharField(source='feedback')
"""
Serialize feedback in response to an assessment.
"""
assessments = AssessmentSerializer(many=True, default=None, required=False)
options = AssessmentFeedbackOptionSerializer(many=True, default=None, required=False)
class Meta:
model = AssessmentFeedback
fields = ('submission_uuid', 'helpfulness', 'feedback', 'assessments',)
fields = ('submission_uuid', 'feedback_text', 'assessments', 'options')
class PeerWorkflowSerializer(serializers.ModelSerializer):
......
# -*- coding: utf-8 -*-
"""
Tests for assessment models.
"""
from django.test import TestCase
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection
Rubric, Criterion, CriterionOption, InvalidOptionSelection,
AssessmentFeedback, AssessmentFeedbackOption,
)
......@@ -103,3 +105,96 @@ class TestRubricOptionIds(TestCase):
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
class AssessmentFeedbackTest(TestCase):
"""
Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive.
"""
def setUp(self):
self.feedback = AssessmentFeedback.objects.create(
submission_uuid='test_submission',
feedback_text='test feedback',
)
def test_default_options(self):
self.assertEqual(self.feedback.options.count(), 0)
def test_add_options_all_new(self):
# We haven't created any feedback options yet, so these should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_some_new(self):
# Create one feedback option in the database
AssessmentFeedbackOption.objects.create(text='I liked my assessment')
# Add feedback options. The one that's new should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_empty(self):
# No options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 0)
# Add an option
self.feedback.add_options(['test'])
self.assertEqual(len(self.feedback.options.all()), 1)
# Add an empty list of options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 1)
def test_add_options_duplicates(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, one of which is a duplicate
self.feedback.add_options(['I liked my assessment', 'I disliked my assessment'])
# There should be three options
options = self.feedback.options.all()
self.assertEqual(len(options), 3)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
self.assertEqual(options[2].text, 'I disliked my assessment')
# There should be only three options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 3)
def test_add_options_all_old(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, all of which are duplicates
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# There should be two options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
def test_unicode(self):
# Create options with unicode
self.feedback.add_options([u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽', u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'])
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
......@@ -509,18 +509,25 @@ class TestPeerApi(TestCase):
)
feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
self.assertIsNone(feedback)
feedback = peer_api.set_assessment_feedback(
peer_api.set_assessment_feedback(
{
'submission_uuid': tim_sub['uuid'],
'helpfulness': 0,
'feedback': 'Bob is a jerk!'
'feedback_text': 'Bob is a jerk!',
'options': [
'I disliked this assessment',
'I felt this assessment was unfair',
]
}
)
self.assertIsNotNone(feedback)
self.assertEquals(feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
self.assertEquals(feedback, saved_feedback)
self.assertIsNot(saved_feedback, None)
self.assertEquals(saved_feedback['submission_uuid'], assessment['submission_uuid'])
self.assertEquals(saved_feedback['feedback_text'], 'Bob is a jerk!')
self.assertItemsEqual(saved_feedback['options'], [
{'text': 'I disliked this assessment'},
{'text': 'I felt this assessment was unfair'},
])
self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
def test_close_active_assessment(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
......@@ -576,8 +583,7 @@ class TestPeerApi(TestCase):
peer_api.set_assessment_feedback(
{
'submission_uuid': tim_answer['uuid'],
'helpfulness': 0,
'feedback': 'Boo',
'feedback_text': 'Boo',
}
)
......
......@@ -4,9 +4,10 @@ import os.path
from ddt import ddt, file_data
from django.test import TestCase
from openassessment.assessment.models import Criterion, CriterionOption, Rubric
from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback
from openassessment.assessment.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict
InvalidRubric, RubricSerializer, rubric_from_dict,
AssessmentFeedbackSerializer
)
def json_data(filename):
......@@ -82,3 +83,36 @@ class TestCriterionOptionDeserialization(TestCase):
]
}
)
class TestAssessmentFeedbackSerializer(TestCase):
def test_serialize(self):
feedback = AssessmentFeedback.objects.create(
submission_uuid='abc123', feedback_text='Test feedback'
)
feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
serialized = AssessmentFeedbackSerializer(feedback).data
self.assertItemsEqual(serialized, {
'submission_uuid': 'abc123',
'feedback_text': 'Test feedback',
'options': [
{'text': 'I liked my assessment'},
{'text': 'I thought my assessment was unfair'},
],
'assessments': [],
})
def test_empty_options(self):
feedback = AssessmentFeedback.objects.create(
submission_uuid='abc123', feedback_text='Test feedback'
)
serialized = AssessmentFeedbackSerializer(feedback).data
self.assertItemsEqual(serialized, {
'submission_uuid': 'abc123',
'feedback_text': 'Test feedback',
'options': [],
'assessments': [],
})
......@@ -134,7 +134,6 @@
{% endwith %}
{% endfor %}
</ul>
</li>
</ol>
</article>
......@@ -147,23 +146,50 @@
<p>Course staff will be able to see any feedback that you provide here when they review course records.</p>
</div>
<div class="submission__feedback__elements">
<ol class="list list--fields submission__feeedback__fields">
<li class="field field--select feedback__overall" id="feedback__overall">
<label for="feedback__overall__value">Overall how do you consider your peers’ assessments of your response?</label>
<select id="feedback__overall__value">
<option value="This assessment was useful">This assessment was useful</option>
<option value="This assessment was not useful">This assessment was not useful</option>
<option value="I disagree with this assessment">I disagree with this assessment</option>
<option value="This assessment was inappropriate">This assessment was inappropriate</option>
</select>
<ol class="list list--fields submission__feedback__fields">
<li class="field field--radio feedback__overall" id="feedback__overall">
<h4>Please select the statements below that reflect what you think of this peer grading experience:</h4>
<ol class="list--options">
<li class="option option--useful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--useful"
class="feedback__overall__value"
value="These assessments were useful." />
<label for="feedback__overall__value--useful">These assessments were useful.</label>
</li>
<li class="option option--notuseful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--notuseful"
class="feedback__overall__value"
value="These assessments were not useful." />
<label for="feedback__overall__value--notuseful">These assessments were not useful.</label>
</li>
<li class="option option--disagree">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--disagree"
class="feedback__overall__value"
value="I disagree with the ways that my peers assessed me." />
<label for="feedback__overall__value--notuseful">I disagree with the ways that my peers assessed me.</label>
</li>
<li class="option option--inappropriate">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--inappropriate"
class="feedback__overall__value"
value="I received some inappropriate comments." />
<label for="feedback__overall__value--notuseful">I received some inappropriate comments.</label>
</li>
</ol>
</li>
<li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">Provide any thoughts or comments on the feedback you received from your peers here.</label>
<label for="feedback__remarks__value">Please provide any thoughts or comments on the feedback you received from your peers here.</label>
<textarea id="feedback__remarks__value" placeholder="I feel the feedback I received was...">{{ feedback_text }}</textarea>
</li>
</ol>
</div>
<div class="submission__feeedback__actions">
<ul class="list list--actions submission__feeedback__actions">
<li class="list--actions__item">
......@@ -171,8 +197,6 @@
</li>
</ul>
</div>
</div>
</div>
</form>
</div>
</div>
......
......@@ -71,35 +71,30 @@ class GradeMixin(object):
return self.render_assessment(path, context)
@XBlock.json_handler
def feedback_submit(self, data, suffix=''):
"""Attach the Assessment Feedback text to some submission."""
assessment_feedback = data.get('feedback', '')
if not assessment_feedback:
return {
'success': False,
'msg': _(u"No feedback given, so none recorded")
}
def submit_feedback(self, data, suffix=''):
"""
Submit feedback on an assessment.
Args:
data (dict): Can provide keys 'feedback_text' (unicode) and 'feedback_options' (list of unicode).
Kwargs:
suffix (str): Unused
Returns:
Dict with keys 'success' (bool) and 'msg' (unicode)
"""
feedback_text = data.get('feedback_text', u'')
feedback_options = data.get('feedback_options', list())
try:
peer_api.set_assessment_feedback(
{
peer_api.set_assessment_feedback({
'submission_uuid': self.submission_uuid,
'feedback': assessment_feedback,
'helpfulness': 0
}
)
except (
peer_api.PeerAssessmentInternalError,
peer_api.PeerAssessmentRequestError
):
return {
'success': False,
'msg': _(
u"Assessment Feedback could not be saved due to an internal "
u"server error."
),
}
return {
'success': True,
'msg': _(u"Feedback saved!")
}
'feedback_text': feedback_text,
'options': feedback_options,
})
except (peer_api.PeerAssessmentInternalError, peer_api.PeerAssessmentRequestError):
return {'success': False, 'msg': _(u"Assessment feedback could not be saved.")}
else:
return {'success': True, 'msg': _(u"Feedback saved!")}
<div id='openassessment-base'>
<form id="submission__feeedback" class="submission__feeedback" method="post">
<h3 class="submission__feeedback__title">Give Feedback On Peer Evaluations</h3>
<div class="submission__feeedback__content">
<div class="submission__feeedback__instructions">
<p>Course staff will be able to see any feedback that you provide here when they review course records.</p>
</div>
<ol class="list list--fields submission__feedback__fields">
<li class="field field--radio feedback__overall" id="feedback__overall">
<h4>Please select the statements below that reflect what you think of this peer grading experience:</h4>
<ol class="list--options">
<li class="option option--useful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--useful"
class="feedback__overall__value"
value="These assessments were useful." />
<label for="feedback__overall__value--useful">These assessments were useful.</label>
</li>
<li class="option option--notuseful">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--notuseful"
class="feedback__overall__value"
value="These assessments were not useful." />
<label for="feedback__overall__value--notuseful">These assessments were not useful.</label>
</li>
<li class="option option--disagree">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--disagree"
class="feedback__overall__value"
value="I disagree with the ways that my peers assessed me." />
<label for="feedback__overall__value--notuseful">I disagree with the ways that my peers assessed me.</label>
</li>
<li class="option option--inappropriate">
<input type="checkbox"
name="feedback__overall__value"
id="feedback__overall__value--inappropriate"
class="feedback__overall__value"
value="I received some inappropriate comments." />
<label for="feedback__overall__value--notuseful">I received some inappropriate comments.</label>
</li>
</ol>
</li>
<li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">Please provide any thoughts or comments on the feedback you received from your peers here.</label>
<textarea id="feedback__remarks__value" placeholder="I feel the feedback I received was...">{{ feedback_text }}</textarea>
</li>
</ol>
</div>
</form>
</div>
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.BaseUI=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server};OpenAssessment.BaseUI.prototype={setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.renderSubmissionStep();this.renderPeerAssessmentStep();this.renderSelfAssessmentStep();this.renderGradeStep()},renderSubmissionStep:function(){var ui=this;this.server.render("submission").done(function(html){$("#openassessment__response",ui.element).replaceWith(html);var sel=$("#openassessment__response",ui.element);ui.setUpCollapseExpand(sel);ui.responseChanged();sel.find("#submission__answer__value").keyup(function(eventData){ui.responseChanged()});sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();ui.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();ui.save()})}).fail(function(errMsg){ui.showLoadError("response")})},responseChanged:function(){var blankSubmission=$("#submission__answer__value",this.element).val()==="";$("#step--response__submit",this.element).toggleClass("is--disabled",blankSubmission);$("#submission__save",this.element).toggleClass("is--disabled",blankSubmission)},renderPeerAssessmentStep:function(){var ui=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",ui.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",ui.element);ui.setUpCollapseExpand(sel,$.proxy(ui.renderContinuedPeerAssessmentStep,ui));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",ui.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();ui.peerAssess()})}).fail(function(errMsg){ui.showLoadError("peer-assessment")})},renderContinuedPeerAssessmentStep:function(){var ui=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",ui.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",ui.element);ui.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();ui.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",ui.element).toggleClass("is--disabled",numChecked!=numAvailable)})}).fail(function(errMsg){ui.showLoadError("peer-assessment")})},renderSelfAssessmentStep:function(){var ui=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",ui.element).replaceWith(html);var sel=$("#openassessment__self-assessment",ui.element);ui.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",ui.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",ui.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();ui.selfAssess()})}).fail(function(errMsg){ui.showLoadError("self-assessment")})},renderGradeStep:function(){var ui=this;this.server.render("grade").done(function(html){$("#openassessment__grade",ui.element).replaceWith(html);var sel=$("#openassessment__grade",ui.element);ui.setUpCollapseExpand(sel);sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();ui.feedback_assess()})}).fail(function(errMsg){ui.showLoadError("grade",errMsg)})},save:function(){var submission=$("#submission__answer__value",this.element).val();var ui=this;this.setSaveStatus("Saving...");this.toggleActionError("save",null);this.server.save(submission).done(function(){ui.setSaveStatus("Saved but not submitted")}).fail(function(errMsg){ui.setSaveStatus("Error");ui.toggleActionError("save",errMsg)})},setSaveStatus:function(msg){$("#response__save_status h3",this.element).html(msg)},submit:function(){var submission=$("#submission__answer__value",this.element).val();var ui=this;this.toggleActionError("response",null);this.server.submit(submission).done(function(studentId,attemptNum){ui.renderSubmissionStep();ui.renderPeerAssessmentStep()}).fail(function(errCode,errMsg){ui.toggleActionError("submit",errMsg)})},feedback_assess:function(){var feedback=$("#feedback__remarks__value",this.element).val();var ui=this;this.server.feedback_submit(feedback).done(console.log("Feedback to the assessments submitted, thanks!")).fail(function(errMsg){ui.toggleActionError("feedback_assess",errMsg)})},peerAssess:function(){var ui=this;ui.peerAssessRequest(function(){ui.renderPeerAssessmentStep();ui.renderSelfAssessmentStep();ui.renderGradeStep()})},continuedPeerAssess:function(){var ui=this;ui.peerAssessRequest(function(){ui.renderContinuedPeerAssessmentStep();ui.renderGradeStep()})},peerAssessRequest:function(successFunction){var submissionId=$("span#peer_submission_uuid",this.element)[0].innerHTML.trim();var optionsSelected={};$("#peer-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var feedback=$("#assessment__rubric__question--feedback__value",this.element).val();var ui=this;this.toggleActionError("peer",null);this.server.peerAssess(submissionId,optionsSelected,feedback).done(successFunction).fail(function(errMsg){ui.toggleActionError("peer",errMsg)})},selfAssess:function(){var submissionId=$("span#self_submission_uuid",this.element)[0].innerHTML.trim();var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var ui=this;this.toggleActionError("self",null);this.server.selfAssess(submissionId,optionsSelected).done(function(){ui.renderPeerAssessmentStep();ui.renderSelfAssessmentStep();ui.renderGradeStep()}).fail(function(errMsg){ui.toggleActionError("self",errMsg)})},toggleActionError:function(type,msg){var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"){container=".step__actions"}else if(type=="peer"){container=".peer-assessment__actions"}else if(type=="self"){container=".self-assessment__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content").html("<p>"+msg+"</p>");$(container).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html("Unable to Load")}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var ui=new OpenAssessment.BaseUI(runtime,element,server);ui.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.StudioUI=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var ui=this;$(element).find(".openassessment-save-button").click(function(eventData){ui.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){ui.cancel()})};OpenAssessment.StudioUI.prototype={load:function(){var ui=this;this.server.loadXml().done(function(xml){ui.codeBox.setValue(xml)}).fail(function(msg){ui.showError(msg)})},save:function(){var ui=this;this.server.checkReleased().done(function(isReleased){if(isReleased){ui.confirmPostReleaseUpdate($.proxy(ui.updateXml,ui))}else{ui.updateXml()}}).fail(function(errMsg){console.log(errMsg)})},confirmPostReleaseUpdate:function(onConfirm){var msg="This problem has already been released. Any changes will apply only to future assessments.";if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var ui=this;this.server.updateXml(xml).done(function(){ui.runtime.notify("save",{state:"end"});ui.load()}).fail(function(msg){ui.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var ui=new OpenAssessment.StudioUI(runtime,element,server);ui.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX","Could not contact server."])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},feedback_submit:function(feedback){var url=this.url("feedback_submit");var payload=JSON.stringify({feedback:feedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},peerAssess:function(submissionId,optionsSelected,feedback){var url=this.url("peer_assess");var payload=JSON.stringify({submission_uuid:submissionId,options_selected:optionsSelected,feedback:feedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},selfAssess:function(submissionId,optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({submission_uuid:submissionId,options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()}};
\ No newline at end of file
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.BaseUI=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server};OpenAssessment.BaseUI.prototype={setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.renderSubmissionStep();this.renderPeerAssessmentStep();this.renderSelfAssessmentStep();this.renderGradeStep()},renderSubmissionStep:function(){var ui=this;this.server.render("submission").done(function(html){$("#openassessment__response",ui.element).replaceWith(html);var sel=$("#openassessment__response",ui.element);ui.setUpCollapseExpand(sel);ui.responseChanged();sel.find("#submission__answer__value").keyup(function(eventData){ui.responseChanged()});sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();ui.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();ui.save()})}).fail(function(errMsg){ui.showLoadError("response")})},responseChanged:function(){var blankSubmission=$("#submission__answer__value",this.element).val()==="";$("#step--response__submit",this.element).toggleClass("is--disabled",blankSubmission);$("#submission__save",this.element).toggleClass("is--disabled",blankSubmission)},renderPeerAssessmentStep:function(){var ui=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",ui.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",ui.element);ui.setUpCollapseExpand(sel,$.proxy(ui.renderContinuedPeerAssessmentStep,ui));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",ui.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();ui.peerAssess()})}).fail(function(errMsg){ui.showLoadError("peer-assessment")})},renderContinuedPeerAssessmentStep:function(){var ui=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",ui.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",ui.element);ui.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();ui.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",ui.element).toggleClass("is--disabled",numChecked!=numAvailable)})}).fail(function(errMsg){ui.showLoadError("peer-assessment")})},renderSelfAssessmentStep:function(){var ui=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",ui.element).replaceWith(html);var sel=$("#openassessment__self-assessment",ui.element);ui.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",ui.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",ui.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();ui.selfAssess()})}).fail(function(errMsg){ui.showLoadError("self-assessment")})},renderGradeStep:function(){var ui=this;this.server.render("grade").done(function(html){$("#openassessment__grade",ui.element).replaceWith(html);var sel=$("#openassessment__grade",ui.element);ui.setUpCollapseExpand(sel);sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();ui.submitFeedbackOnAssessment()})}).fail(function(errMsg){ui.showLoadError("grade",errMsg)})},save:function(){var ui=this;var submission=$("#submission__answer__value",ui.element).val();ui.setSaveStatus("Saving...");ui.toggleActionError("save",null);ui.server.save(submission).done(function(){ui.setSaveStatus("Saved but not submitted")}).fail(function(errMsg){ui.setSaveStatus("Error");ui.toggleActionError("save",errMsg)})},setSaveStatus:function(msg){$("#response__save_status h3",this.element).html(msg)},submit:function(){var ui=this;var submission=$("#submission__answer__value",ui.element).val();ui.toggleActionError("response",null);ui.server.submit(submission).done(function(studentId,attemptNum){ui.renderSubmissionStep();ui.renderPeerAssessmentStep()}).fail(function(errCode,errMsg){ui.toggleActionError("submit",errMsg)})},submitFeedbackOnAssessment:function(){var ui=this;var text=$("#feedback__remarks__value",ui.element).val();var options=$.map($(".feedback__overall__value:checked",ui.element),function(element,index){return $(element).val()});ui.server.submitFeedbackOnAssessment(text,options).done(function(){console.log("Feedback to the assessments submitted, thanks!")}).fail(function(errMsg){ui.toggleActionError("feedback_assess",errMsg)})},peerAssess:function(){var ui=this;ui.peerAssessRequest(function(){ui.renderPeerAssessmentStep();ui.renderSelfAssessmentStep();ui.renderGradeStep()})},continuedPeerAssess:function(){var ui=this;ui.peerAssessRequest(function(){ui.renderContinuedPeerAssessmentStep();ui.renderGradeStep()})},peerAssessRequest:function(successFunction){var submissionId=$("span#peer_submission_uuid",this.element)[0].innerHTML.trim();var optionsSelected={};$("#peer-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var feedback=$("#assessment__rubric__question--feedback__value",this.element).val();var ui=this;this.toggleActionError("peer",null);this.server.peerAssess(submissionId,optionsSelected,feedback).done(successFunction).fail(function(errMsg){ui.toggleActionError("peer",errMsg)})},selfAssess:function(){var submissionId=$("span#self_submission_uuid",this.element)[0].innerHTML.trim();var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var ui=this;this.toggleActionError("self",null);this.server.selfAssess(submissionId,optionsSelected).done(function(){ui.renderPeerAssessmentStep();ui.renderSelfAssessmentStep();ui.renderGradeStep()}).fail(function(errMsg){ui.toggleActionError("self",errMsg)})},toggleActionError:function(type,msg){var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"){container=".step__actions"}else if(type=="peer"){container=".peer-assessment__actions"}else if(type=="self"){container=".self-assessment__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content").html("<p>"+msg+"</p>");$(container).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html("Unable to Load")}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var ui=new OpenAssessment.BaseUI(runtime,element,server);ui.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.StudioUI=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var ui=this;$(element).find(".openassessment-save-button").click(function(eventData){ui.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){ui.cancel()})};OpenAssessment.StudioUI.prototype={load:function(){var ui=this;this.server.loadXml().done(function(xml){ui.codeBox.setValue(xml)}).fail(function(msg){ui.showError(msg)})},save:function(){var ui=this;this.server.checkReleased().done(function(isReleased){if(isReleased){ui.confirmPostReleaseUpdate($.proxy(ui.updateXml,ui))}else{ui.updateXml()}}).fail(function(errMsg){console.log(errMsg)})},confirmPostReleaseUpdate:function(onConfirm){var msg="This problem has already been released. Any changes will apply only to future assessments.";if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var ui=this;this.server.updateXml(xml).done(function(){ui.runtime.notify("save",{state:"end"});ui.load()}).fail(function(msg){ui.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var ui=new OpenAssessment.StudioUI(runtime,element,server);ui.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},submit:function(submission){var url=this.url("submit");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX","Could not contact server."])})}).promise()},save:function(submission){var url=this.url("save_submission");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},peerAssess:function(submissionId,optionsSelected,feedback){var url=this.url("peer_assess");var payload=JSON.stringify({submission_uuid:submissionId,options_selected:optionsSelected,feedback:feedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},selfAssess:function(submissionId,optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({submission_uuid:submissionId,options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["Could not contact server."])})}).promise()}};
\ No newline at end of file
......@@ -35,6 +35,15 @@ describe("OpenAssessment.BaseUI", function() {
defer.resolveWith(this, [server.fragments[component]]);
}).promise();
};
this.submitFeedbackOnAssessment = function(text, options) {
// Store the args we receive so we can check them later
this.feedbackText = text;
this.feedbackOptions = options;
// Return a promise that always resolves successfully
return $.Deferred(function(defer) { defer.resolve() }).promise();
};
};
// Stub runtime
......@@ -102,4 +111,31 @@ describe("OpenAssessment.BaseUI", function() {
expect(server.selfAssess).toHaveBeenCalled();
});
});
it("Sends feedback on a submission to the server", function() {
jasmine.getFixtures().fixturesPath = 'base/fixtures';
loadFixtures('grade_complete.html');
// Simulate user feedback
$('#feedback__remarks__value').val('I disliked the feedback I received.');
$('#feedback__overall__value--notuseful').attr('checked','checked');
$('#feedback__overall__value--disagree').attr('checked','checked');
// Create a new stub server
server = new StubServer();
// Create the object under test
var el = $("#openassessment-base").get(0);
ui = new OpenAssessment.BaseUI(runtime, el, server);
// Submit feedback on an assessment
ui.submitFeedbackOnAssessment();
// Expect that the feedback was retrieved from the DOM and sent to the server
expect(server.feedbackText).toEqual('I disliked the feedback I received.');
expect(server.feedbackOptions).toEqual([
'These assessments were not useful.',
'I disagree with the ways that my peers assessed me.'
]);
});
});
......@@ -106,6 +106,26 @@ describe("OpenAssessment.Server", function() {
});
});
it("Sends feedback on an assessment to the XBlock", function() {
stubAjax(true, {success: true, msg: ''});
var success = false;
var options = ["Option 1", "Option 2"];
server.submitFeedbackOnAssessment("test feedback", options).done(function() {
success = true;
});
expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({
url: '/submit_feedback',
type: "POST",
data: JSON.stringify({
feedback_text: "test feedback",
feedback_options: options,
})
});
});
it("loads the XBlock's XML definition", function() {
stubAjax(true, { success: true, xml: "<openassessment />" });
......@@ -291,4 +311,26 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("informs the caller of an AJAX error when sending feedback on submission", function() {
stubAjax(false, null);
var receivedMsg = null;
var options = ["Option 1", "Option 2"];
server.submitFeedbackOnAssessment("test feedback", options).fail(
function(errMsg) { receivedMsg = errMsg; }
);
expect(receivedMsg).toEqual("Could not contact server.");
});
it("informs the caller of a server error when sending feedback on submission", function() {
stubAjax(true, { success: false, msg: "Test error" });
var receivedMsg = null;
var options = ["Option 1", "Option 2"];
server.submitFeedbackOnAssessment("test feedback", options).fail(
function(errMsg) { receivedMsg = errMsg; }
);
expect(receivedMsg).toEqual("Test error");
});
});
......@@ -261,7 +261,7 @@ OpenAssessment.BaseUI.prototype = {
// Install a click handler for assessment feedback
sel.find('#feedback__submit').click(function(eventObject) {
eventObject.preventDefault();
ui.feedback_assess();
ui.submitFeedbackOnAssessment();
});
}
).fail(function(errMsg) {
......@@ -275,11 +275,11 @@ OpenAssessment.BaseUI.prototype = {
**/
save: function() {
// Retrieve the student's response from the DOM
var submission = $('#submission__answer__value', this.element).val();
var ui = this;
this.setSaveStatus('Saving...');
this.toggleActionError('save', null);
this.server.save(submission).done(function() {
var submission = $('#submission__answer__value', ui.element).val();
ui.setSaveStatus('Saving...');
ui.toggleActionError('save', null);
ui.server.save(submission).done(function() {
ui.setSaveStatus("Saved but not submitted");
}).fail(function(errMsg) {
ui.setSaveStatus('Error');
......@@ -302,10 +302,10 @@ OpenAssessment.BaseUI.prototype = {
**/
submit: function() {
// Send the submission to the server
var submission = $('#submission__answer__value', this.element).val();
var ui = this;
this.toggleActionError('response', null);
this.server.submit(submission).done(
var submission = $('#submission__answer__value', ui.element).val();
ui.toggleActionError('response', null);
ui.server.submit(submission).done(
// When we have successfully sent the submission, expand the next step
function(studentId, attemptNum) {
ui.renderSubmissionStep();
......@@ -319,14 +319,19 @@ OpenAssessment.BaseUI.prototype = {
/**
Send assessment feedback to the server and update the UI.
**/
feedback_assess: function() {
submitFeedbackOnAssessment: function() {
// Send the submission to the server
var feedback = $('#feedback__remarks__value', this.element).val();
var ui = this;
this.server.feedback_submit(feedback).done(
var text = $('#feedback__remarks__value', ui.element).val();
var options = $.map(
$('.feedback__overall__value:checked', ui.element),
function(element, index) { return $(element).val(); }
);
ui.server.submitFeedbackOnAssessment(text, options).done(function() {
// When we have successfully sent the submission, textarea no longer editable
console.log("Feedback to the assessments submitted, thanks!")
).fail(function(errMsg) {
// TODO
console.log("Feedback to the assessments submitted, thanks!");
}).fail(function(errMsg) {
// TODO: display to the user
ui.toggleActionError('feedback_assess', errMsg);
});
......
......@@ -164,39 +164,38 @@ OpenAssessment.Server.prototype = {
/**
* Send feedback on assessments to the XBlock.
* Args:
* feedback: The feedback given on a series of assessments associated
* with this current submission.
* text (string): Written feedback from the student.
* options (list of strings): One or more options the student selected.
*
* Returns:
* A JQuery promise, which resolves with no args if successful and
* fails with an error message otherwise.
*
* Example:
* server.feedback_submit("I dislike my reviews.").done(
* server.submit_feedback(
* "Good feedback!", ["I liked the feedback I received"]
* ).done(function() {
* console.log("Success!");
* ).fail(function(errMsg) {
* }).fail(function(errMsg) {
* console.log("Error: " + errMsg);
* });
*/
feedback_submit: function(feedback) {
var url = this.url('feedback_submit');
submitFeedbackOnAssessment: function(text, options) {
var url = this.url('submit_feedback');
var payload = JSON.stringify({
feedback: feedback
'feedback_text': text,
'feedback_options': options
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
function(data) {
if (data.success) {
defer.resolve();
}
else {
defer.rejectWith(this, [data.msg]);
}
if (data.success) { defer.resolve(); }
else { defer.rejectWith(this, [data.msg]); }
}
).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
});
}).promise()
}).promise();
},
/**
......
......@@ -107,8 +107,8 @@
<assessment name="peer-assessment"
start="2014-03-11T10:00-18:10"
due="2014-12-21T22:22-7:00"
must_grade="3"
must_be_graded_by="3" />
must_grade="1"
must_be_graded_by="1" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -120,7 +120,7 @@ class SubmissionMixin(object):
student_sub_dict = {'text': student_sub}
submission = api.create_submission(student_item_dict, student_sub_dict)
workflow = workflow_api.create_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"])
self.submission_uuid = submission["uuid"]
return submission
......
......@@ -4,11 +4,18 @@ Tests for grade handlers in Open Assessment XBlock.
"""
import copy
import json
from submissions import api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api, self_api
from .base import XBlockHandlerTestCase, scenario
class TestGrade(XBlockHandlerTestCase):
"""
View-level tests for the XBlock grade handlers.
"""
PEERS = ['McNulty', 'Moreland']
ASSESSMENTS = [
{
......@@ -25,19 +32,107 @@ class TestGrade(XBlockHandlerTestCase):
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock):
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_submit_feedback(self, xblock):
# Create submissions and assessments
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Submit feedback on the assessments
payload = json.dumps({
'feedback_text': u'I disliked my assessment',
'feedback_options': [u'Option 1', u'Option 2'],
})
resp = self.request(xblock, 'submit_feedback', payload, response_format='json')
self.assertTrue(resp['success'])
# Verify that the feedback was created in the database
feedback = peer_api.get_assessment_feedback(xblock.submission_uuid)
self.assertIsNot(feedback, None)
self.assertEqual(feedback['feedback_text'], u'I disliked my assessment')
self.assertItemsEqual(
feedback['options'], [{'text': u'Option 1'}, {'text': u'Option 2'}]
)
@scenario('data/grade_scenario.xml', user_id='Bob')
def test_submit_feedback_no_options(self, xblock):
# Create submissions and assessments
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Submit feedback on the assessments with no options specified
payload = json.dumps({
'feedback_text': u'I disliked my assessment',
'feedback_options': [],
})
resp = self.request(xblock, 'submit_feedback', payload, response_format='json')
self.assertTrue(resp['success'])
# Verify that the feedback was created in the database
feedback = peer_api.get_assessment_feedback(xblock.submission_uuid)
self.assertIsNot(feedback, None)
self.assertItemsEqual(feedback['options'], [])
@scenario('data/grade_scenario.xml', user_id='Bob')
def test_submit_feedback_invalid_options(self, xblock):
# Create submissions and assessments
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Options should be a list, not a string
payload = json.dumps({
'feedback_text': u'I disliked my assessment',
'feedback_options': u'should be a list!',
})
resp = self.request(xblock, 'submit_feedback', payload, response_format='json')
self.assertFalse(resp['success'])
self.assertGreater(len(resp['msg']), 0)
def _create_submission_and_assessments(self, xblock, submission_text, peers, peer_assessments, self_assessment):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
Args:
xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
submission_text (unicode): Text of the submission from the user.
peers (list of unicode): List of user IDs of peers who will assess the user.
peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
self_assessment (dict): Dict of assessment for self-assessment.
Returns:
None
"""
# Create a submission from the user
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
xblock.get_workflow_info()
student_id = student_item['student_id']
submission = xblock.create_submission(student_item, submission_text)
# Create submissions and assessments from other users
scorer_submissions = []
for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS):
# Create a submission for each scorer
for scorer_name, assessment in zip(peers, peer_assessments):
# Create a submission for each scorer for the same problem
scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name
scorer_sub = xblock.create_submission(scorer, self.SUBMISSION)
xblock.get_workflow_info()
submission = peer_api.get_submission_to_assess(scorer, 2)
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'])
submission = peer_api.get_submission_to_assess(scorer, len(peers))
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
......@@ -47,28 +142,15 @@ class TestGrade(XBlockHandlerTestCase):
assessment, {'criteria': xblock.rubric_criteria}
)
# Since xblock.create_submission sets the xblock's submission_uuid,
# we need to set it back to the proper user for this test.
xblock.submission_uuid = submission["uuid"]
# Have our user make assessments (so she can get a score)
for _ in range(2):
new_submission = peer_api.get_submission_to_assess(student_item, 2)
for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
peer_api.create_assessment(
new_submission['uuid'], 'Greggs',
self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria}
new_submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria}
)
# Have the user submit a self-assessment (so she can get a score)
self_api.create_assessment(
submission['uuid'], 'Greggs',
self.ASSESSMENTS[0]['options_selected'],
submission['uuid'], student_id, self_assessment['options_selected'],
{'criteria': xblock.rubric_criteria}
)
# Render the view
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment