Commit 7d44ba8c by Will Daly

Implement feedback per criterion (XML, backend, JavaScript, and templates)

Remove unused start/due date args in xml parsing
Use textarea attribute to limit submission/feedback text length instead of JavaScript checks.
Server truncates submissions/feedback that are too long instead of raising an exception.
Refactor peer step JS into its own source file.
Move JS namespace and gettext stub into a shared file.
Add scrollTo for turbo grade submission.
parent 570d587b
......@@ -86,11 +86,12 @@ class AssessmentAdmin(admin.ModelAdmin):
def parts_summary(self, assessment_obj):
return "<br/>".join(
html.escape(
u"{}/{} - {}: {}".format(
u"{}/{} - {}: {} - {}".format(
part.points_earned,
part.points_possible,
part.option.criterion.name,
part.option.name,
part.feedback,
)
)
for part in assessment_obj.parts.all()
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AssessmentPart.feedback'
db.add_column('assessment_assessmentpart', 'feedback',
self.gf('django.db.models.fields.TextField')(default='', max_length=10000, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AssessmentPart.feedback'
db.delete_column('assessment_assessmentpart', 'feedback')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
......@@ -398,10 +398,15 @@ class AssessmentPart(models.Model):
by this assessor for this `Criterion`. So basically, think of this class
as :class:`CriterionOption` + student state.
"""
MAX_FEEDBACK_SIZE = 1024 * 100
assessment = models.ForeignKey(Assessment, related_name='parts')
option = models.ForeignKey(CriterionOption, related_name="+")
# criterion = models.ForeignKey(Criterion) ?
option = models.ForeignKey(CriterionOption) # TODO: no reverse
# Free-form text feedback for the specific criterion
# Note that the `Assessment` model also has a feedback field,
# which is feedback on the submission as a whole.
feedback = models.TextField(default="", blank=True)
@property
def points_earned(self):
......@@ -412,13 +417,36 @@ class AssessmentPart(models.Model):
return self.option.criterion.points_possible
@classmethod
def add_to_assessment(cls, assessment, option_ids):
"""Creates AssessmentParts and adds them to `assessment`."""
def add_to_assessment(cls, assessment, option_ids, criterion_feedback=None):
"""
Creates AssessmentParts and adds them to `assessment`.
Args:
assessment (Assessment): The assessment model we're adding parts to.
option_ids (list of int): List of primary keys for options the user selected.
Kwargs:
criterion_feedback (dict): Dictionary mapping criterion names
to free-form text feedback on the criterion.
You don't need to include all the rubric criteria,
and keys that don't match any criterion will be ignored.
Returns:
None
"""
cls.objects.bulk_create([
cls(assessment=assessment, option_id=option_id)
for option_id in option_ids
])
if criterion_feedback is not None:
for criterion_name, feedback in criterion_feedback.iteritems():
feedback = feedback[0:cls.MAX_FEEDBACK_SIZE]
assessment.parts.filter(
option__criterion__name=criterion_name
).update(feedback=feedback)
class AssessmentFeedbackOption(models.Model):
"""
......@@ -447,7 +475,7 @@ class AssessmentFeedback(models.Model):
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
MAXSIZE = 1024*100 # 100KB
MAXSIZE = 1024 * 100 # 100KB
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
......
......@@ -11,7 +11,6 @@ from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api
from django.db.models import Q
import random
from openassessment.assessment.models import (
......@@ -139,7 +138,9 @@ def get_score(submission_uuid, requirements):
def create_assessment(
scorer_submission_uuid,
scorer_id,
assessment_dict,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
num_required_grades,
scored_at=None):
......@@ -154,8 +155,13 @@ def create_assessment(
peer workflow of the grading student.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
options_selected (dict): Dictionary mapping criterion names to the
option names the user selected for that criterion.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
num_required_grades (int): The required number of assessments a
submission requires before it is completed. If this number of
assessments is reached, the grading_completed_at timestamp is set
......@@ -177,11 +183,10 @@ def create_assessment(
while creating a new assessment.
Examples:
>>> assessment_dict = dict(
>>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"},
>>> feedback="Your submission was thrilling.",
>>> )
>>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
>>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
>>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
>>> feedback = "Your submission was thrilling."
>>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
"""
try:
rubric = rubric_from_dict(rubric_dict)
......@@ -189,13 +194,12 @@ def create_assessment(
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
try:
option_ids = rubric.options_ids(assessment_dict["options_selected"])
option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection as ex:
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
feedback = assessment_dict.get('feedback', u'')
peer_workflow_item = _get_latest_open_workflow_item(scorer_workflow)
if peer_workflow_item is None:
......@@ -212,7 +216,7 @@ def create_assessment(
"scorer_id": scorer_id,
"submission_uuid": peer_submission_uuid,
"score_type": PEER_TYPE,
"feedback": feedback,
"feedback": overall_feedback[0:Assessment.MAXSIZE],
}
if scored_at is not None:
......@@ -228,7 +232,7 @@ def create_assessment(
# We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)
# Close the active assessment
_close_active_assessment(scorer_workflow, peer_submission_uuid, assessment, num_required_grades)
......
......@@ -154,19 +154,12 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class Meta:
model = AssessmentPart
fields = ('option',) # TODO: Direct link to Criterion?
fields = ('option', 'feedback')
class AssessmentSerializer(serializers.ModelSerializer):
"""Simplified serializer for :class:`Assessment` that's lighter on the DB."""
def validate_feedback(self, attrs, source):
"""Check that the feedback is within an acceptable size range."""
value = attrs[source]
if len(value) > Assessment.MAXSIZE:
raise serializers.ValidationError("Maximum feedback size exceeded.")
return attrs
class Meta:
model = Assessment
fields = (
......@@ -235,7 +228,8 @@ def full_assessment_dict(assessment, rubric_dict=None):
options_dict = criterion_dict["options"][part.option.order_num]
options_dict["criterion"] = criterion_dict
parts.append({
"option": options_dict
"option": options_dict,
"feedback": part.feedback
})
# Now manually built up the dynamically calculated values on the
......
{
"unicode_evaluation": {
"feedback": "这是中国",
"overall_feedback": "这是中国",
"criterion_feedback": {},
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
......@@ -9,7 +10,8 @@
}
},
"basic_evaluation": {
"feedback": "Your submission was thrilling.",
"overall_feedback": "Your submission was thrilling.",
"criterion_feedback": {},
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
......@@ -17,4 +19,4 @@
"singing": "no"
}
}
}
\ No newline at end of file
}
......@@ -93,14 +93,10 @@ class Command(BaseCommand):
peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)
# Create the peer assessment
assessment = {
'options_selected': options_selected,
'feedback': " ".join(loremipsum.get_paragraphs(2))
}
peer_api.create_assessment(
scorer_submission_uuid,
scorer_id,
assessment,
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)),
rubric,
self.NUM_PEER_ASSESSMENTS
)
......
......@@ -58,7 +58,7 @@
{% with peer_num=forloop.counter %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<li class="answer peer-assessment--{{ peer_num}}"
<li class="answer peer-assessment--{{ peer_num }}"
id="question--{{ criterion_num }}__answer-{{ peer_num }}">
<h5 class="answer__title">
<span class="answer__source">
......@@ -78,6 +78,7 @@
<i class="ico icon-info-sign"
title="{% blocktrans with name=part.option.name %}More information about {{ name }}{% endblocktrans %}"></i>
</span>
</span>
</span>
</h5>
......@@ -114,6 +115,30 @@
</li>
{% endif %}
{% endfor %}
{% if criterion.feedback %}
<li class="answer--feedback ui-toggle-visibility is--collapsed">
<h5 class="answer--feedback__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span>
</h5>
<ul class="answer--feedback__content ui-toggle-visibility__content">
{% for feedback in criterion.feedback %}
<li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source">
{% trans "Peer" %} {{ forloop.counter }}
</h6>
<div class="feedback__value">
{{ feedback }}
</div>
</li>
{% endfor %}
</ul>
</li>
{% endif %}
</ul>
</li>
{% endwith %}
......@@ -221,7 +246,13 @@
</li>
<li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">{% trans "Please provide any feedback on the grade or comments that you received from your peers." %}</label>
<textarea id="feedback__remarks__value" placeholder="{% trans "I feel the feedback I received was..." %}">{{ feedback_text }}</textarea>
<textarea
id="feedback__remarks__value"
placeholder="{% trans "I feel the feedback I received was..." %}"
maxlength="100000"
>
{{ feedback_text }}
</textarea>
</li>
</ol>
......
......@@ -94,10 +94,26 @@
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="answer__points__label">{% trans "points" %}</span></span>
<span class="answer__points">{{ option.points }} <span class="answer__points__label">{% trans "points" %}</span></span>
</div>
</li>
{% endfor %}
{% if criterion.feedback == 'optional' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
>
</textarea>
</div>
</li>
{% endif %}
</ol>
</div>
</li>
......@@ -108,7 +124,12 @@
</label>
<div class="wrapper--input">
<textarea id="assessment__rubric__question--feedback__value" placeholder="{% trans "I noticed that this response..." %}"></textarea>
<textarea
id="assessment__rubric__question--feedback__value"
placeholder="{% trans "I noticed that this response..." %}"
maxlength="500"
>
</textarea>
</div>
</li>
</ol>
......
......@@ -80,7 +80,23 @@
</div>
</li>
{% endfor %}
</ol>
{% if criterion.feedback == 'optional' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
>
</textarea>
</div>
</li>
{% endif %}
</ol>
</div>
</li>
{% endfor %}
......@@ -90,7 +106,12 @@
<span class="question__title__copy">{{ rubric_feedback_prompt }}</span>
</label>
<div class="wrapper--input">
<textarea id="assessment__rubric__question--feedback__value" placeholder="{% trans "I noticed that this response..." %}"></textarea>
<textarea
id="assessment__rubric__question--feedback__value"
placeholder="{% trans "I noticed that this response..." %}"
maxlength="500"
>
</textarea>
</div>
</li>
</ol>
......
......@@ -58,7 +58,13 @@
<ol class="list list--fields response__submission__content">
<li class="field field--textarea submission__answer" id="submission__answer">
<label class="sr" for="submission__answer__value">{% trans "Enter your response to the question." %}</label>
<textarea id="submission__answer__value" placeholder="">{{ saved_response }}</textarea>
<textarea
id="submission__answer__value"
placeholder=""
maxlength="100000"
>
{{ saved_response }}
</textarea>
<span class="tip">{% trans "You may continue to work on your response until you submit it." %}</span>
</li>
</ol>
......
......@@ -17,6 +17,7 @@ DEFAULT_RUBRIC_CRITERIA = [
'name': "Ideas",
'prompt': "Determine if there is a unifying theme or main idea.",
'order_num': 0,
'feedback': 'optional',
'options': [
{
'order_num': 0, 'points': 0, 'name': 'Poor',
......
......@@ -2,6 +2,7 @@
Grade step in the OpenAssessment XBlock.
"""
import copy
from collections import defaultdict
from django.utils.translation import ugettext as _
from xblock.core import XBlock
......@@ -88,7 +89,7 @@ class GradeMixin(object):
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'rubric_criteria': copy.deepcopy(self.rubric_criteria),
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
'has_submitted_feedback': has_submitted_feedback,
}
......@@ -161,3 +162,44 @@ class GradeMixin(object):
}
)
return {'success': True, 'msg': _(u"Feedback saved.")}
def _rubric_criteria_with_feedback(self, peer_assessments):
"""
Add per-criterion feedback from peer assessments to the rubric criteria.
Filters out empty feedback.
Args:
peer_assessments (list of dict): Serialized assessment models from the peer API.
Returns:
list of criterion dictionaries
Example:
[
{
'name': 'Test name',
'prompt': 'Test prompt',
'order_num': 2,
'options': [...]
'feedback': [
'Good job!',
'Excellent work!',
]
},
...
]
"""
criteria = copy.deepcopy(self.rubric_criteria)
criteria_feedback = defaultdict(list)
for assessment in peer_assessments:
for part in assessment['parts']:
if part['feedback']:
part_criterion_name = part['option']['criterion']['name']
criteria_feedback[part_criterion_name].append(part['feedback'])
for criterion in criteria:
criterion_name = criterion['name']
criterion['feedback'] = criteria_feedback[criterion_name]
return criteria
......@@ -45,54 +45,35 @@ class PeerAssessmentMixin(object):
"""
# Validate the request
if 'feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback in the assessment')}
if 'options_selected' not in data:
return {'success': False, 'msg': _('Must provide options selected in the assessment')}
if 'overall_feedback' not in data:
return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment_dict = {
"feedback": data['feedback'],
"options_selected": data["options_selected"],
}
try:
# Create the assessment
assessment = peer_api.create_assessment(
self.submission_uuid,
self.get_student_item_dict()["student_id"],
assessment_dict,
data['options_selected'],
self._clean_criterion_feedback(data['criterion_feedback']),
data['overall_feedback'],
rubric_dict,
assessment_ui_model['must_be_graded_by']
)
# Emit analytics event...
self.runtime.publish(
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"]
}
}
for part in assessment["parts"]
]
}
)
self._publish_peer_assessment_event(assessment)
except PeerAssessmentRequestError as ex:
return {'success': False, 'msg': ex.message}
except PeerAssessmentInternalError as ex:
......@@ -258,3 +239,58 @@ class PeerAssessmentMixin(object):
logger.exception(err)
return peer_submission
def _publish_peer_assessment_event(self, assessment):
"""
Emit an analytics event for the peer assessment.
Args:
assessment (dict): The serialized assessment model.
Returns:
None
"""
self.runtime.publish(
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"],
},
"feedback": part["feedback"],
}
for part in assessment["parts"]
]
}
)
def _clean_criterion_feedback(self, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') == 'optional'
}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -55,8 +55,76 @@
{
"template": "openassessmentblock/peer/oa_peer_assessment.html",
"context": {
"rubric_criteria": [],
"peer_submission": {}
"rubric_criteria": [
{
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
}
]
},
"output": "oa_peer_assessment.html"
},
......
......@@ -15,10 +15,6 @@ describe("OpenAssessment.BaseView", function() {
grade: readFixtures("oa_grade_complete.html")
};
this.peerAssess = function(optionsSelected, feedback) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
this.selfAssess = function(optionsSelected) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
......@@ -70,14 +66,6 @@ describe("OpenAssessment.BaseView", function() {
view = new OpenAssessment.BaseView(runtime, el, server);
});
it("Sends a peer assessment to the server", function() {
loadSubviews(function() {
spyOn(server, 'peerAssess').andCallThrough();
view.peerAssess();
expect(server.peerAssess).toHaveBeenCalled();
});
});
it("Sends a self assessment to the server", function() {
loadSubviews(function() {
spyOn(server, 'selfAssess').andCallThrough();
......
......@@ -20,10 +20,19 @@ describe("OpenAssessment.PeerView", function() {
return successPromise;
};
};
// Stub runtime
var runtime = {};
// Stub base view
var StubBaseView = function() {
this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {};
this.renderSelfAssessmentStep = function() {};
this.scrollToTop = function() {};
this.gradeView = { load: function() {} };
};
// Stubs
var baseView = null;
var server = null;
// View under test
......@@ -37,12 +46,46 @@ describe("OpenAssessment.PeerView", function() {
// Create a new stub server
server = new StubServer();
// Create the stub base view
baseView = new StubBaseView();
// Create the object under test
var el = $("#openassessment").get(0);
view = new OpenAssessment.BaseView(runtime, el, server);
var el = $("#openassessment-base").get(0);
view = new OpenAssessment.PeerView(el, server, baseView);
view.installHandlers();
});
it("Sends a peer assessment to the server", function() {
spyOn(server, 'peerAssess').andCallThrough();
// Select options in the rubric
var optionsSelected = {};
optionsSelected['Criterion 1'] = 'Poor';
optionsSelected['Criterion 2'] = 'Fair';
optionsSelected['Criterion 3'] = 'Good';
view.optionsSelected(optionsSelected);
// Provide per-criterion feedback
var criterionFeedback = {};
criterionFeedback['Criterion 1'] = "You did a fair job";
criterionFeedback['Criterion 3'] = "You did a good job";
view.criterionFeedback(criterionFeedback);
// Provide overall feedback
var overallFeedback = "Good job!";
view.overallFeedback(overallFeedback);
// Submit the peer assessment
view.peerAssess();
// Expect that the peer assessment was sent to the server
// with the options and feedback we selected
expect(server.peerAssess).toHaveBeenCalledWith(
optionsSelected, criterionFeedback, overallFeedback
);
});
it("re-enables the peer assess button on error", function() {
it("Re-enables the peer assess button on error", function() {
// Simulate a server error
spyOn(server, 'peerAssess').andCallFake(function() {
expect(view.peerSubmitEnabled()).toBe(false);
......@@ -51,6 +94,7 @@ describe("OpenAssessment.PeerView", function() {
}).promise();
});
view.peerAssess();
// Expect the submit button to have been re-enabled
expect(view.peerSubmitEnabled()).toBe(true);
});
......
......@@ -27,13 +27,11 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view
var StubBaseView = function() {
this.gradeView = {
load: function(){}
};
this.peerView = { load: function() {} };
this.gradeView = { load: function() {} };
this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {};
this.renderPeerAssessmentStep = function() {};
};
// Stubs
......@@ -223,14 +221,14 @@ describe("OpenAssessment.ResponseView", function() {
}).promise();
});
spyOn(view, 'load');
spyOn(baseView, 'renderPeerAssessmentStep');
spyOn(baseView.peerView, 'load');
view.response('Test response');
view.submit();
// Expect the current and next step to have been reloaded
expect(view.load).toHaveBeenCalled();
expect(baseView.renderPeerAssessmentStep).toHaveBeenCalled();
expect(baseView.peerView.load).toHaveBeenCalled();
});
it("enables the unsaved work warning when the user changes the response text", function() {
......
......@@ -30,13 +30,6 @@ describe("OpenAssessment.Server", function() {
);
};
var getHugeTestString = function() {
var testStringSize = server.maxInputSize + 1;
var testString = '';
for (i = 0; i < (testStringSize); i++) { testString += 'x'; }
return testString;
};
beforeEach(function() {
// Create the server
// Since the runtime is a stub implementation that ignores the element passed to it,
......@@ -97,9 +90,10 @@ describe("OpenAssessment.Server", function() {
var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess(options, "Excellent job!").done(function() {
success = true;
});
var criterionFeedback = {clarity: "This essay was very clear."};
server.peerAssess(options, criterionFeedback, "Excellent job!").done(
function() { success = true; }
);
expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({
......@@ -107,7 +101,8 @@ describe("OpenAssessment.Server", function() {
type: "POST",
data: JSON.stringify({
options_selected: options,
feedback: "Excellent job!"
criterion_feedback: criterionFeedback,
overall_feedback: "Excellent job!"
})
});
});
......@@ -197,20 +192,6 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toContain("This response could not be submitted");
});
it("confirms that very long submissions fail with an error without ajax", function() {
var receivedErrorCode = "";
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.submit(testString).fail(
function(errorCode, errorMsg) {
receivedErrorCode = errorCode;
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorCode).toEqual("submit");
expect(receivedErrorMsg).toContain("This response is too long");
});
it("informs the caller of an server error when sending a submission", function() {
stubAjax(true, [false, "ENODATA", "Error occurred!"]);
......@@ -227,15 +208,6 @@ describe("OpenAssessment.Server", function() {
expect(receivedErrorMsg).toEqual("Error occurred!");
});
it("confirms that very long saves fail with an error without ajax", function() {
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.save(testString).fail(
function(errorMsg) { receivedErrorMsg = errorMsg; }
);
expect(receivedErrorMsg).toContain("This response is too long");
});
it("informs the caller of an AJAX error when saving a submission", function() {
stubAjax(false, null);
var receivedMsg = null;
......@@ -301,24 +273,12 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("confirms that very long peer assessments fail with an error without ajax", function() {
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.peerAssess(options, testString).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain("The comments on this assessment are too long");
});
it("informs the caller of a server error when sending a peer assessment", function() {
stubAjax(true, {success:false, msg:'Test error!'});
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess(options, "Excellent job!").fail(function(msg) {
server.peerAssess(options, {}, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
......@@ -330,7 +290,7 @@ describe("OpenAssessment.Server", function() {
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess(options, "Excellent job!").fail(function(msg) {
server.peerAssess(options, {}, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
......@@ -360,18 +320,6 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("confirms that very long assessment feedback fails with an error without ajax", function() {
var options = ["Option 1", "Option 2"];
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.submitFeedbackOnAssessment(testString, options).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
);
expect(receivedErrorMsg).toContain("This feedback is too long");
});
it("informs the caller of an AJAX error when sending feedback on submission", function() {
stubAjax(false, null);
......
/* JavaScript for student-facing views of Open Assessment XBlock */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/**
Interface for student-facing views.
......@@ -29,6 +15,7 @@ OpenAssessment.BaseView = function(runtime, element, server) {
this.server = server;
this.responseView = new OpenAssessment.ResponseView(this.element, this.server, this);
this.peerView = new OpenAssessment.PeerView(this.element, this.server, this);
this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this);
};
......@@ -75,7 +62,7 @@ OpenAssessment.BaseView.prototype = {
*/
load: function() {
this.responseView.load();
this.renderPeerAssessmentStep();
this.peerView.load();
this.renderSelfAssessmentStep();
this.gradeView.load();
......@@ -87,93 +74,6 @@ OpenAssessment.BaseView.prototype = {
},
/**
Render the peer-assessment step.
**/
renderPeerAssessmentStep: function() {
var view = this;
this.server.render('peer_assessment').done(
function(html) {
// Load the HTML
$('#openassessment__peer-assessment', view.element).replaceWith(html);
var sel = $('#openassessment__peer-assessment', view.element);
// Install a click handler for collapse/expand
view.setUpCollapseExpand(sel, $.proxy(view.renderContinuedPeerAssessmentStep, view));
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
$("#peer-assessment--001__assessment__submit", view.element).toggleClass(
'is--disabled', numChecked != numAvailable
);
}
);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.peerAssess();
}
);
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
* Render the peer-assessment step for continued grading. Always renders as
* expanded, since this should be called for an explicit continuation of the
* peer grading process.
*/
renderContinuedPeerAssessmentStep: function() {
var view = this;
this.server.renderContinuedPeer().done(
function(html) {
// Load the HTML
$('#openassessment__peer-assessment', view.element).replaceWith(html);
var sel = $('#openassessment__peer-assessment', view.element);
// Install a click handler for collapse/expand
view.setUpCollapseExpand(sel);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.continuedPeerAssess();
}
);
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
$("#peer-assessment--001__assessment__submit", view.element).toggleClass(
'is--disabled', numChecked != numAvailable
);
}
);
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
Render the self-assessment step.
**/
renderSelfAssessmentStep: function() {
......@@ -216,30 +116,6 @@ OpenAssessment.BaseView.prototype = {
},
/**
Enable/disable the peer assess button button.
Check that whether the peer assess button is enabled.
Args:
enabled (bool): If specified, set the state of the button.
Returns:
bool: Whether the button is enabled.
Examples:
>> view.peerSubmitEnabled(true); // enable the button
>> view.peerSubmitEnabled(); // check whether the button is enabled
>> true
**/
peerSubmitEnabled: function(enabled) {
var button = $('#peer-assessment--001__assessment__submit', this.element);
if (typeof enabled === 'undefined') {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled)
}
},
/**
Enable/disable the self assess button.
Check that whether the self assess button is enabled.
......@@ -259,68 +135,11 @@ OpenAssessment.BaseView.prototype = {
if (typeof enabled === 'undefined') {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled)
button.toggleClass('is--disabled', !enabled);
}
},
/**
Send an assessment to the server and update the view.
**/
peerAssess: function() {
var view = this;
this.peerAssessRequest(function() {
view.renderPeerAssessmentStep();
view.renderSelfAssessmentStep();
view.gradeView.load();
view.scrollToTop();
});
},
/**
* Send an assessment to the server and update the view, with the assumption
* that we are continuing peer assessments beyond the required amount.
*/
continuedPeerAssess: function() {
var view = this;
view.peerAssessRequest(function() {
view.renderContinuedPeerAssessmentStep();
view.gradeView.load();
});
},
/**
* Common peer assessment request building, used for all types of peer
* assessments.
*
* Args:
* successFunction (function): The function called if the request is
* successful. This varies based on the type of request to submit
* a peer assessment.
*/
peerAssessRequest: function(successFunction) {
// Retrieve assessment info from the DOM
var optionsSelected = {};
$("#peer-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
optionsSelected[sel.name] = sel.value;
}
);
var feedback = $('#assessment__rubric__question--feedback__value', this.element).val();
// Send the assessment to the server
var view = this;
view.toggleActionError('peer', null);
view.peerSubmitEnabled(false);
this.server.peerAssess(optionsSelected, feedback).done(
successFunction
).fail(function(errMsg) {
view.toggleActionError('peer', errMsg);
view.peerSubmitEnabled(true);
});
},
/**
Send a self-assessment to the server and update the view.
**/
selfAssess: function() {
......@@ -339,7 +158,7 @@ OpenAssessment.BaseView.prototype = {
this.server.selfAssess(optionsSelected).done(
function() {
view.renderPeerAssessmentStep();
view.peerView.load();
view.renderSelfAssessmentStep();
view.gradeView.load();
view.scrollToTop();
......
/* JavaScript for Studio editing view of Open Assessment XBlock */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/**
Interface for editing view in Studio.
The constructor initializes the DOM for editing.
......
/* JavaScript for grade view */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/**
Interface for grade view.
......
/**
Interface for peer asssessment view.
Args:
element (DOM element): The DOM element representing the XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
baseView (OpenAssessment.BaseView): Container view.
Returns:
OpenAssessment.PeerView
**/
OpenAssessment.PeerView = function(element, server, baseView) {
this.element = element;
this.server = server;
this.baseView = baseView;
};
OpenAssessment.PeerView.prototype = {
/**
Load the peer assessment view.
**/
load: function() {
var view = this;
this.server.render('peer_assessment').done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__peer-assessment', view.element).replaceWith(html);
view.installHandlers();
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
Load the continued grading version of the view.
This is a version of the peer grading step that a student
can use to continue assessing peers after they've completed
their peer assessment requirements.
**/
loadContinuedAssessment: function() {
var view = this;
this.server.renderContinuedPeer().done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__peer-assessment', view.element).replaceWith(html);
view.installHandlersForContinuedAssessment();
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
});
},
/**
Install event handlers for the view.
**/
installHandlers: function() {
var sel = $('#openassessment__peer-assessment', this.element);
var view = this;
// Install a click handler for collapse/expand
this.baseView.setUpCollapseExpand(sel, $.proxy(view.loadContinuedAssessment, view));
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
view.peerSubmitEnabled(numChecked == numAvailable);
}
);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.peerAssess();
}
);
},
/**
Install event handlers for the continued grading version of the view.
**/
installHandlersForContinuedAssessment: function() {
var sel = $('#openassessment__peer-assessment', this.element);
var view = this;
// Install a click handler for collapse/expand
this.baseView.setUpCollapseExpand(sel);
// Install a click handler for assessment
sel.find('#peer-assessment--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.continuedPeerAssess();
}
);
// Install a change handler for rubric options to enable/disable the submit button
sel.find("#peer-assessment--001__assessment").change(
function() {
var numChecked = $('input[type=radio]:checked', this).length;
var numAvailable = $('.field--radio.assessment__rubric__question', this).length;
view.peerSubmitEnabled(numChecked == numAvailable);
}
);
},
/**
Enable/disable the peer assess button button.
Check that whether the peer assess button is enabled.
Args:
enabled (bool): If specified, set the state of the button.
Returns:
bool: Whether the button is enabled.
Examples:
>> view.peerSubmitEnabled(true); // enable the button
>> view.peerSubmitEnabled(); // check whether the button is enabled
>> true
**/
peerSubmitEnabled: function(enabled) {
var button = $('#peer-assessment--001__assessment__submit', this.element);
if (typeof enabled === 'undefined') {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled);
}
},
/**
Send an assessment to the server and update the view.
**/
peerAssess: function() {
var view = this;
var baseView = view.baseView;
this.peerAssessRequest(function() {
view.load();
baseView.renderSelfAssessmentStep();
baseView.gradeView.load();
baseView.scrollToTop();
});
},
/**
* Send an assessment to the server and update the view, with the assumption
* that we are continuing peer assessments beyond the required amount.
*/
continuedPeerAssess: function() {
var view = this;
var gradeView = this.baseView.gradeView;
var baseView = view.baseView;
view.peerAssessRequest(function() {
view.loadContinuedAssessment();
gradeView.load();
baseView.scrollToTop();
});
},
/**
Get or set overall feedback on the submission.
Args:
overallFeedback (string or undefined): The overall feedback text (optional).
Returns:
string or undefined
Example usage:
>>> view.overallFeedback('Good job!'); // Set the feedback text
>>> view.overallFeedback(); // Retrieve the feedback text
'Good job!'
**/
overallFeedback: function(overallFeedback) {
var selector = '#assessment__rubric__question--feedback__value';
if (typeof overallFeedback === 'undefined') {
return $(selector, this.element).val();
}
else {
$(selector, this.element).val(overallFeedback);
}
},
/**
Get or set per-criterion feedback.
Args:
criterionFeedback (object literal or undefined):
Map of criterion names to feedback strings.
Returns:
object literal or undefined
Example usage:
>>> view.criterionFeedback({'ideas': 'Good ideas'}); // Set per-criterion feedback
>>> view.criterionFeedback(); // Retrieve criterion feedback
{'ideas': 'Good ideas'}
**/
criterionFeedback: function(criterionFeedback) {
var selector = '#peer-assessment--001__assessment textarea.answer__value';
var feedback = {};
$(selector, this.element).each(
function(index, sel) {
if (typeof criterionFeedback !== 'undefined') {
$(sel).val(criterionFeedback[sel.name]);
feedback[sel.name] = criterionFeedback[sel.name];
}
else {
feedback[sel.name] = $(sel).val();
}
}
);
return feedback;
},
/**
Get or set the options selected in the rubric.
Args:
optionsSelected (object literal or undefined):
Map of criterion names to option values.
Returns:
object literal or undefined
Example usage:
>>> view.optionsSelected({'ideas': 'Good'}); // Set the criterion option
>>> view.optionsSelected(); // Retrieve the options selected
{'ideas': 'Good'}
**/
optionsSelected: function(optionsSelected) {
var selector = "#peer-assessment--001__assessment input[type=radio]";
if (typeof optionsSelected === 'undefined') {
var options = {};
$(selector + ":checked", this.element).each(
function(index, sel) {
options[sel.name] = sel.value;
}
);
return options;
}
else {
// Uncheck all the options
$(selector, this.element).prop('checked', false);
// Check the selected options
$(selector, this.element).each(function(index, sel) {
if (optionsSelected.hasOwnProperty(sel.name)) {
if (sel.value == optionsSelected[sel.name]) {
$(sel).prop('checked', true);
}
}
});
}
},
/**
Common peer assessment request building, used for all types of peer assessments.
Args:
successFunction (function): The function called if the request is
successful. This varies based on the type of request to submit
a peer assessment.
**/
peerAssessRequest: function(successFunction) {
var view = this;
view.baseView.toggleActionError('peer', null);
view.peerSubmitEnabled(false);
// Pull the assessment info from the DOM and send it to the server
this.server.peerAssess(
this.optionsSelected(),
this.criterionFeedback(),
this.overallFeedback()
).done(
successFunction
).fail(function(errMsg) {
view.baseView.toggleActionError('peer', errMsg);
view.peerSubmitEnabled(true);
});
},
};
/* JavaScript for response (submission) view */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/**
Interface for response (submission) view.
......@@ -305,7 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/
moveToNextStep: function() {
this.load();
this.baseView.renderPeerAssessmentStep();
this.baseView.peerView.load();
this.baseView.gradeView.load();
// Disable the "unsaved changes" warning if the user
......
/* JavaScript interface for interacting with server-side OpenAssessment XBlock */
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
/**
Interface for server-side XBlock handlers.
......@@ -43,11 +29,6 @@ OpenAssessment.Server.prototype = {
return this.runtime.handlerUrl(this.element, handler);
},
/*
* Get maximum size of input
*/
maxInputSize: 1024 * 64, /* 64KB should be enough for anybody, right? ;^P */
/**
Render the XBlock.
......@@ -123,12 +104,6 @@ OpenAssessment.Server.prototype = {
**/
submit: function(submission) {
var url = this.url('submit');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("This response is too long. Please shorten the response and try to submit it again.");
defer.rejectWith(this, ["submit", errorMsg]);
}).promise();
}
return $.Deferred(function(defer) {
$.ajax({
type: "POST",
......@@ -164,12 +139,6 @@ OpenAssessment.Server.prototype = {
**/
save: function(submission) {
var url = this.url('save_submission');
if (submission.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("This response is too long. Please shorten the response and try to save it again.");
defer.rejectWith(this, [errorMsg]);
}).promise();
}
return $.Deferred(function(defer) {
$.ajax({
type: "POST",
......@@ -205,12 +174,6 @@ OpenAssessment.Server.prototype = {
*/
submitFeedbackOnAssessment: function(text, options) {
var url = this.url('submit_feedback');
if (text.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("This feedback is too long. Please shorten your feedback and try to submit it again.");
defer.rejectWith(this, [errorMsg]);
}).promise();
}
var payload = JSON.stringify({
'feedback_text': text,
'feedback_options': options
......@@ -232,7 +195,9 @@ OpenAssessment.Server.prototype = {
Args:
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission.
criterionFeedback (object literal): Written feedback on a particular criterion,
where keys are criteria names and values are the feedback strings.
overallFeedback (string): Written feedback on the submission as a whole.
Returns:
A JQuery promise, which resolves with no args if successful
......@@ -240,24 +205,20 @@ OpenAssessment.Server.prototype = {
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!";
server.peerAssess(options, feedback).done(
var criterionFeedback = { clarity: "The essay was very clear." };
var overallFeedback = "Good job!";
server.peerAssess(options, criterionFeedback, overallFeedback).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
peerAssess: function(optionsSelected, feedback) {
peerAssess: function(optionsSelected, criterionFeedback, overallFeedback) {
var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) {
var errorMsg = gettext("The comments on this assessment are too long. Please shorten your comments and try to submit them again.");
defer.rejectWith(this, [errorMsg]);
}).promise();
}
var payload = JSON.stringify({
options_selected: optionsSelected,
feedback: feedback
criterion_feedback: criterionFeedback,
overall_feedback: overallFeedback
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
......
/**
JavaScript shared between all open assessment modules.
WARNING: Don't add anything to this file until you're
absolutely sure there isn't a way to encapsulate it in
an object!
**/
/* Namespace for open assessment */
if (typeof OpenAssessment == "undefined" || !OpenAssessment) {
OpenAssessment = {};
}
// Stub gettext if the runtime doesn't provide it
if (typeof window.gettext === 'undefined') {
window.gettext = function(text) { return text; };
}
......@@ -183,8 +183,9 @@
%ui-rubric-answers {
margin-top: $baseline-v;
margin-bottom: $baseline-v;
margin-left: ($baseline-h/4);
.answer {
.answer, .answer--feedback {
@include row();
@extend %wipe-last-child;
margin-bottom: ($baseline-v/2);
......@@ -195,25 +196,6 @@
.wrapper--input {
margin-bottom: ($baseline-v/4);
@include media($bp-ds) {
@include span-columns(6 of 6);
}
@include media($bp-dm) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dl) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dx) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
.answer__value, .answer__label {
display: inline-block;
vertical-align: middle;
......@@ -317,4 +299,44 @@
margin-left: ($baseline-v/4);
color: $copy-secondary-color;
}
// ELEM: criterion selects
.answer {
.wrapper--input {
@include media($bp-ds) {
@include span-columns(6 of 6);
}
@include media($bp-dm) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dl) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
@include media($bp-dx) {
@include span-columns(4 of 12);
margin-bottom: 0;
}
}
}
// ELEM: open text feedback for answer
.answer--feedback {
margin-top: ($baseline-v);
.answer__label {
margin-bottom: ($baseline-v/4);
}
.answer__value {
@extend %ui-content-longanswer;
min-height: ($baseline-v*5);
margin-right: 0;
}
}
}
......@@ -839,7 +839,7 @@
// individual question
.question {
margin-bottom: $baseline-v;
margin-bottom: ($baseline-v*1.5);
@extend %wipe-last-child;
}
......@@ -960,22 +960,61 @@
display: block;
color: $heading-primary-color;
}
}
// open feedback question
.question--feedback {
// criterion-based feedback
.answer--feedback {
// individual answers
.answer {
@include fill-parent();
}
@include media($bp-ds) {
@include span-columns(6 of 6);
}
.answer__value {
@extend %copy-2;
}
@include media($bp-dm) {
@include span-columns(12 of 12);
}
@include media($bp-dl) {
@include span-columns(12 of 12);
}
@include media($bp-dx) {
@include span-columns(12 of 12);
}
.answer--feedback__title {
@extend %action-2;
}
.answer--feedback__title__copy {
margin-left: ($baseline-h/4);
}
.answer--feedback__content {
margin-top: ($baseline-v);
}
.feedback {
@extend %no-list;
@extend %wipe-last-child;
margin-bottom: $baseline-v;
}
.feedback__source {
@extend %hd-5;
@extend %t-strong;
@extend %t-titlecase;
display: block;
color: $heading-secondary-color;
}
.feedback__value {
@extend %copy-3;
display: block;
}
}
// feedback form
// overall feedback form
.submission__feedback {
@extend %ui-subsection;
}
......
......@@ -9,7 +9,7 @@
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<criterion feedback='optional'>
<name>concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
......@@ -75,7 +75,7 @@
</explanation>
</option>
</criterion>
<criterion>
<criterion feedback='optional'>
<name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion feedback="optional">
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion feedback="optional">
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -639,5 +639,70 @@
"</rubric>",
"</openassessment>"
]
},
"criterion_feedback_optional": {
"title": "Foo",
"prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": null,
"due": null
}
],
"expected_xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion feedback=\"optional\">",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<feedbackprompt>Test Feedback Prompt</feedbackprompt>",
"</rubric>",
"</openassessment>"
]
}
}
......@@ -29,6 +29,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -89,6 +90,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -143,6 +145,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -197,6 +200,7 @@
"order_num": 0,
"name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷",
"prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -258,6 +262,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -277,6 +282,7 @@
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -327,6 +333,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -383,6 +390,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -439,6 +447,7 @@
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......@@ -464,5 +473,82 @@
"must_be_graded_by": 3
}
]
},
"criterion_feedback_optional": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<criterion feedback=\"optional\">",
"<name>Second criterion</name>",
"<prompt>Second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
},
{
"order_num": 1,
"name": "Second criterion",
"prompt": "Second criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 1,
"name": "Maybe",
"explanation": "Maybe explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
}
}
......@@ -297,5 +297,26 @@
"</rubric>",
"</openassessment>"
]
},
"invalid_criterion_feedback_value": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"1\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion feedback=\"invalid\">",
"<name>Test criterion</name>",
"<prompt>Test prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
}
}
......@@ -20,11 +20,18 @@ class TestGrade(XBlockHandlerTestCase):
ASSESSMENTS = [
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!'
},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
},
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'Good job!',
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Form': u'Peer 2: ƒαιя נσв'
},
'overall_feedback': u'Good job!',
},
]
......@@ -58,6 +65,37 @@ class TestGrade(XBlockHandlerTestCase):
self.assertIn('self', resp.lower())
self.assertIn('complete', resp.lower())
@scenario('data/feedback_per_criterion.xml', user_id='Bernard')
def test_render_grade_feedback_per_criterion(self, xblock):
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Verify that the context for the grade complete page contains the feedback
_, context = xblock.render_grade_complete(xblock.get_workflow_info())
criteria = context['rubric_criteria']
self.assertEqual(criteria[0]['feedback'], [
u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!',
])
self.assertEqual(criteria[1]['feedback'], [u'Peer 2: ƒαιя נσв'])
# The order of the peers in the per-criterion feedback needs
# to match the order of the peer assessments
# We verify this by checking that the first peer assessment
# has the criteria feedback matching the first feedback
# for each criterion.
assessments = context['peer_assessments']
first_peer_feedback = [part['feedback'] for part in assessments[0]['parts']]
self.assertItemsEqual(first_peer_feedback, [u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!', u'Peer 2: ƒαιя נσв'])
# Integration test: verify that the context makes it to the rendered template
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
self.assertIn(u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!', resp.decode('utf-8'))
self.assertIn(u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!', resp.decode('utf-8'))
self.assertIn(u'Peer 2: ƒαιя נσв', resp.decode('utf-8'))
@scenario('data/grade_scenario.xml', user_id='Omar')
def test_grade_waiting(self, xblock):
# Waiting to be assessed by a peer
......@@ -197,15 +235,23 @@ class TestGrade(XBlockHandlerTestCase):
if not waiting_for_peer:
peer_api.create_assessment(
scorer_sub['uuid'], scorer_name,
assessment, {'criteria': xblock.rubric_criteria},
assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have our user make assessments (so she can get a score)
for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment(
submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
submission['uuid'],
student_id,
asmnt['options_selected'],
asmnt['criterion_feedback'],
asmnt['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
......
......@@ -13,7 +13,6 @@ class SaveResponseTest(XBlockHandlerTestCase):
@scenario('data/save_scenario.xml', user_id="Daniels")
def test_default_saved_response_blank(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps({}))
self.assertIn('<textarea id="submission__answer__value" placeholder=""></textarea>', resp)
self.assertIn('response has not been saved', resp)
@ddt.file_data('data/save_responses.json')
......@@ -28,10 +27,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
# Reload the submission UI
resp = self.request(xblock, 'render_submission', json.dumps({}))
expected_html = u'<textarea id="submission__answer__value" placeholder="">{submitted}</textarea>'.format(
submitted=submission_text
)
self.assertIn(expected_html, resp.decode('utf-8'))
self.assertIn(submission_text, resp.decode('utf-8'))
self.assertIn('saved but not submitted', resp.lower())
@scenario('data/save_scenario.xml', user_id="Valchek")
......
......@@ -41,6 +41,7 @@ class TestSerializeContent(TestCase):
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
......
......@@ -113,6 +113,11 @@ def _serialize_criteria(criteria_root, criteria_list):
criterion_prompt = etree.SubElement(criterion_el, 'prompt')
criterion_prompt.text = unicode(criterion.get('prompt', u''))
# Criterion feedback disabled or optional
# If disabled, do not set the attribute.
if criterion.get('feedback') == "optional":
criterion_el.set('feedback', 'optional')
# Criterion options
options_list = criterion.get('options', None)
if isinstance(options_list, list):
......@@ -261,6 +266,13 @@ def _parse_criteria_xml(criteria_root):
else:
raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criterion feedback (disabled or optional)
criterion_feedback = criterion.get('feedback', 'disabled')
if criterion_feedback in ['optional', 'disabled']:
criterion_dict['feedback'] = criterion_feedback
else:
raise UpdateFromXmlError(_('Invalid value for "feedback" attribute: if specified, it must be set set to "optional"'))
# Criterion options
criterion_dict['options'] = _parse_options_xml(criterion)
......@@ -308,14 +320,12 @@ def _parse_rubric_xml(rubric_root):
return rubric_dict
def _parse_assessments_xml(assessments_root, start, due):
def _parse_assessments_xml(assessments_root):
"""
Parse the <assessments> element in the OpenAssessment XBlock's content XML.
Args:
assessments_root (lxml.etree.Element): The root of the <assessments> node in the tree.
start (unicode): ISO-formatted date string representing the start time of the problem.
due (unicode): ISO-formatted date string representing the due date of the problem.
Returns:
list of assessment dicts
......@@ -513,7 +523,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
if assessments_el is None:
raise UpdateFromXmlError(_('Every assessment must contain an "assessments" element.'))
else:
assessments = _parse_assessments_xml(assessments_el, oa_block.start, oa_block.due)
assessments = _parse_assessments_xml(assessments_el)
# Validate
success, msg = validator(rubric, {'due': submission_due}, assessments)
......
......@@ -16,6 +16,7 @@ module.exports = function(config) {
files: [
'lib/jquery.min.js',
'lib/*.js',
'src/oa_shared.js',
'src/*.js',
'spec/*.js',
......
......@@ -30,4 +30,4 @@ if [[ -n "$DEBUG_JS" ]]; then
UGLIFY_EXTRA_ARGS="--beautify"
fi
node_modules/.bin/uglifyjs $STATIC_JS/src/*.js $UGLIFY_EXTRA_ARGS > "$STATIC_JS/openassessment.min.js"
node_modules/.bin/uglifyjs $STATIC_JS/src/oa_shared.js $STATIC_JS/src/*.js $UGLIFY_EXTRA_ARGS > "$STATIC_JS/openassessment.min.js"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment