Commit 45c53fba by gradyward

Merge pull request #532 from edx/will/grady/self-assessment-feedback

Will/grady/self assessment feedback
parents f2f55958 d6012733
......@@ -89,7 +89,15 @@ def get_score(submission_uuid, requirements):
}
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
def create_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at=None
):
"""
Create a self-assessment for a submission.
......@@ -97,6 +105,11 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
submission_uuid (str): The unique identifier for the submission being assessed.
user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model.
Kwargs:
......@@ -143,15 +156,24 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
rubric = rubric_from_dict(rubric_dict)
# Create the self assessment
assessment = Assessment.create(rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at)
AssessmentPart.create_from_option_names(assessment, options_selected)
assessment = Assessment.create(
rubric,
user_id,
submission_uuid,
SELF_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
_log_assessment(assessment, submission)
except InvalidRubric:
msg = "Invalid rubric definition"
except InvalidRubric as ex:
msg = "Invalid rubric definition: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidRubricSelection:
msg = "Selected options do not match the rubric"
except InvalidRubricSelection as ex:
msg = "Selected options do not match the rubric: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
......
......@@ -243,10 +243,19 @@ class RubricIndex(object):
criterion.name: criterion
for criterion in criteria
}
self._option_index = {
(option.criterion.name, option.name): option
for option in options
}
# Finds the set of all criteria which have options by traversing through the options, and adding all of
# the options' associated criteria to an expanding set.
criteria_with_options = set()
option_index = {}
for option in options:
option_index[(option.criterion.name, option.name)] = option
criteria_with_options.add(option.criterion)
# Anything not in the above mentioned set is a zero option criteria, and we save it here for future reference.
self._criteria_without_options = set(self._criteria_index.values()) - criteria_with_options
self._option_index = option_index
# By convention, if multiple options in the same criterion have the
# same point value, we return the *first* option.
......@@ -379,10 +388,7 @@ class RubricIndex(object):
set of `Criterion`
"""
return set(
criterion for criterion in self._criteria_index.values()
if criterion.options.count() == 0
)
return self._criteria_without_options
class Assessment(models.Model):
......@@ -655,8 +661,8 @@ class AssessmentPart(models.Model):
}
# Validate that we have selections for all criteria
# This will raise an exception if we're missing any criteria
cls._check_has_all_criteria(rubric_index, set(selected.keys() + feedback.keys()))
# This will raise an exception if we're missing any selections/feedback required for criteria
cls._check_all_criteria_assessed(rubric_index, selected.keys(), feedback.keys())
# Retrieve the criteria/option/feedback for criteria that have options.
# Since we're using the rubric's index, we'll get an `InvalidRubricSelection` error
......@@ -773,3 +779,35 @@ class AssessmentPart(models.Model):
if len(missing_criteria) > 0:
msg = u"Missing selections for criteria: {missing}".format(missing=missing_criteria)
raise InvalidRubricSelection(msg)
@classmethod
def _check_all_criteria_assessed(cls, rubric_index, selected_criteria, criteria_feedback):
"""
Verify that we've selected options OR have feedback for all criteria in the rubric.
Verifies the predicate for all criteria (X) in the rubric:
has-an-option-selected(X) OR (has-zero-options(X) AND has-criterion-feedback(X))
Args:
rubric_index (RubricIndex): The index of the rubric's data.
selected_criteria (list): list of criterion names that have an option selected
criteria_feedback (list): list of criterion names that have feedback on them
Returns:
None
Raises:
InvalidRubricSelection
"""
missing_option_selections = rubric_index.find_missing_criteria(selected_criteria)
zero_option_criteria = set([c.name for c in rubric_index.find_criteria_without_options()])
zero_option_criteria_missing_feedback = zero_option_criteria - set(criteria_feedback)
optioned_criteria_missing_selection = missing_option_selections - zero_option_criteria
missing_criteria = zero_option_criteria_missing_feedback | optioned_criteria_missing_selection
if len(missing_criteria) > 0:
msg = u"Missing selections for criteria: {missing}".format(missing=', '.join(missing_criteria))
raise InvalidRubricSelection(msg)
\ No newline at end of file
{
"No Option Selected, Has Options, No Feedback": {
"has_option_selected": false,
"has_zero_options": false,
"has_feedback": false,
"expected_error": true
},
"No Option Selected, Has Options, Has Feedback": {
"has_option_selected": false,
"has_zero_options": false,
"has_feedback": true,
"expected_error": true
},
"No Option Selected, No Options, No Feedback": {
"has_option_selected": false,
"has_zero_options": true,
"has_feedback": false,
"expected_error": true
},
"No Option Selected, No Options, Has Feedback": {
"has_option_selected": false,
"has_zero_options": true,
"has_feedback": true,
"expected_error": false
},
"Has Option Selected, Has Options, No Feedback": {
"has_option_selected": true,
"has_zero_options": false,
"has_feedback": false,
"expected_error": false
},
"Has Option Selected, No Options, Has Feedback": {
"has_option_selected": true,
"has_zero_options": true,
"has_feedback": true,
"expected_error": true
},
"Has Option Selected, No Options, No Feedback": {
"has_option_selected": true,
"has_zero_options": true,
"has_feedback": false,
"expected_error": true
},
"Has Option Selected, Has Options, Has Feedback": {
"has_option_selected": true,
"has_zero_options": false,
"has_feedback": true,
"expected_error": false
}
}
\ No newline at end of file
......@@ -2,13 +2,16 @@
"""
Tests for the assessment Django models.
"""
import copy
import copy, ddt
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
from openassessment.assessment.api.self import create_assessment
from submissions.api import create_submission
from openassessment.assessment.errors import SelfAssessmentRequestError
@ddt.ddt
class AssessmentTest(CacheResetTest):
"""
Tests for the `Assessment` and `AssessmentPart` models.
......@@ -148,3 +151,65 @@ class AssessmentTest(CacheResetTest):
criterion['options'] = []
return rubric_from_dict(rubric_dict)
@ddt.file_data('data/models_check_criteria_assessed.json')
def test_check_all_criteria_assessed(self, data):
student_item = {
'student_id': u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
'item_id': 'test_item',
'course_id': 'test_course',
'item_type': 'test_type'
}
submission = create_submission(student_item, "Test answer")
rubric, options_selected, criterion_feedback = self._create_data_structures_with_criterion_properties(
has_option_selected=data['has_option_selected'],
has_zero_options=data['has_zero_options'],
has_feedback=data['has_feedback']
)
error = False
try:
create_assessment(
submission['uuid'], student_item['student_id'], options_selected,
criterion_feedback, "overall feedback", rubric
)
except SelfAssessmentRequestError:
error = True
self.assertTrue(data['expected_error'] == error)
def _create_data_structures_with_criterion_properties(
self,
has_option_selected=True,
has_zero_options=True,
has_feedback=True
):
"""
Generates a dummy set of criterion definition structures that will allow us to specificy a specific combination
of criterion attributes for a test case.
"""
options = []
if not has_zero_options:
options = [{
"name": "Okay",
"points": 1,
"description": "It was okay I guess."
}]
rubric = {
'criteria': [
{
"name": "Quality",
"prompt": "How 'good' was it?",
"options": options
}
]
}
options_selected = {}
if has_option_selected:
options_selected['Quality'] = 'Okay'
criterion_feedback = {}
if has_feedback:
criterion_feedback['Quality'] = "This was an assignment of average quality."
return rubric, options_selected, criterion_feedback
\ No newline at end of file
......@@ -51,6 +51,16 @@ class TestSelfApi(CacheResetTest):
"accuracy": "very accurate",
}
CRITERION_FEEDBACK = {
"clarity": "Like a morning in the restful city of San Fransisco, the piece was indescribable, beautiful, and too foggy to properly comprehend.",
"accuracy": "Like my sister's cutting comments about my weight, I may not have enjoyed the piece, but I cannot fault it for its factual nature."
}
OVERALL_FEEDBACK = (
u"Unfortunately, the nature of being is too complex to comment, judge, or discern any one"
u"arbitrary set of things over another."
)
def test_create_assessment(self):
# Initially, there should be no submission or self assessment
self.assertEqual(get_assessment("5"), None)
......@@ -66,7 +76,7 @@ class TestSelfApi(CacheResetTest):
# Create a self-assessment for the submission
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -82,7 +92,7 @@ class TestSelfApi(CacheResetTest):
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 8)
self.assertEqual(assessment['points_possible'], 10)
self.assertEqual(assessment['feedback'], u'')
self.assertEqual(assessment['feedback'], u'' + self.OVERALL_FEEDBACK)
self.assertEqual(assessment['score_type'], u'SE')
def test_create_assessment_no_submission(self):
......@@ -90,7 +100,7 @@ class TestSelfApi(CacheResetTest):
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -102,7 +112,22 @@ class TestSelfApi(CacheResetTest):
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
'invalid_submission_uuid', u'another user',
self.OPTIONS_SELECTED, self.RUBRIC,
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
def test_create_assessment_invalid_criterion_feedback(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Mutate the criterion feedback to not include all the appropriate criteria.
criterion_feedback = {"clarify": "not", "accurate": "sure"}
# Attempt to create a self-assessment with criterion_feedback that do not match the rubric
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, criterion_feedback, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -118,7 +143,7 @@ class TestSelfApi(CacheResetTest):
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
options, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -134,7 +159,7 @@ class TestSelfApi(CacheResetTest):
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
options, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -150,7 +175,7 @@ class TestSelfApi(CacheResetTest):
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
options, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -165,7 +190,7 @@ class TestSelfApi(CacheResetTest):
# Do not override the scored_at timestamp, so it should be set to the current time
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
)
# Retrieve the self-assessment
......@@ -183,14 +208,14 @@ class TestSelfApi(CacheResetTest):
# Self assess once
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
)
# Attempt to self-assess again, which should raise an exception
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
)
# Expect that we still have the original assessment
......@@ -213,17 +238,20 @@ class TestSelfApi(CacheResetTest):
"options": []
})
criterion_feedback = copy.deepcopy(self.CRITERION_FEEDBACK)
criterion_feedback['feedback only'] = "This is the feedback for the Zero Option Criterion."
# Create a self-assessment for the submission
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, rubric,
self.OPTIONS_SELECTED, criterion_feedback, self.OVERALL_FEEDBACK, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# the criterion with no options to an empty string
self.assertEqual(assessment["parts"][2]["option"], None)
self.assertEqual(assessment["parts"][2]["feedback"], u"")
self.assertEqual(assessment["parts"][2]["feedback"], u"This is the feedback for the Zero Option Criterion.")
def test_create_assessment_all_criteria_have_zero_options(self):
# Create a submission to self-assess
......@@ -237,14 +265,25 @@ class TestSelfApi(CacheResetTest):
# Create a self-assessment for the submission
# We don't select any options, since none of the criteria have options
options_selected = {}
# However, because they don't have options, they need to have criterion feedback.
criterion_feedback = {
'clarity': 'I thought it was about as accurate as Scrubs is to the medical profession.',
'accuracy': 'I thought it was about as accurate as Scrubs is to the medical profession.'
}
overall_feedback = ""
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options_selected, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
options_selected, criterion_feedback, overall_feedback,
rubric, scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# all criteria to an empty string.
for part in assessment["parts"]:
self.assertEqual(part["option"], None)
self.assertEqual(part["feedback"], u"")
self.assertEqual(
part["feedback"], u'I thought it was about as accurate as Scrubs is to the medical profession.'
)
......@@ -107,7 +107,7 @@ class Command(BaseCommand):
print "-- Creating self assessment"
self_api.create_assessment(
submission_uuid, student_item['student_id'],
options_selected, rubric
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)), rubric
)
@property
......
......@@ -146,27 +146,42 @@
{% endif %}
{% endfor %}
{% if criterion.feedback %}
{% if criterion.peer_feedback or criterion.self_feedback %}
<li class="answer--feedback ui-toggle-visibility {% if criterion.options %}is--collapsed{% endif %}">
{% if criterion.options %}
<h5 class="answer--feedback__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span>
{% if criterion.self_feedback %}
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.peer_feedback|length|add:'1' }})</span>
{% else %}
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.peer_feedback|length }})</span>
{% endif %}
</h5>
{% endif %}
<ul class="answer--feedback__content {% if criterion.options %}ui-toggle-visibility__content{% endif %}">
{% for feedback in criterion.feedback %}
{% for feedback in criterion.peer_feedback %}
<li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source">
{% trans "Peer" %} {{ forloop.counter }}
</h6>
<div class="feedback__value">
{{ feedback }}
{{ feedback }}
</div>
</li>
{% endfor %}
{% if criterion.self_feedback %}
<li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source">
{% trans "Your Assessment" %}
</h6>
<div class="feedback__value">
{{ criterion.self_feedback }}
</div>
</li>
{% endif %}
</ul>
</li>
{% endif %}
......@@ -175,7 +190,7 @@
</li>
{% endwith %}
{% endfor %}
{% if peer_assessments %}
{% if peer_assessments or self_assessment.feedback %}
<li class="question question--feedback ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
......@@ -204,6 +219,23 @@
{% endif %}
{% endwith %}
{% endfor %}
{% if self_assessment.feedback %}
<li class="answer self-evaluation--0" id="question--feedback__answer-0">
<h5 class="answer__title">
<span class="answer__source">
<span class="label sr">{% trans "Self assessment" %}: </span>
<span class="value">{% trans "Self assessment" %}</span>
</span>
</h5>
<div class="answer__value">
<h6 class="label sr">{% trans "Your assessment" %}: </h6>
<div class="value">
<p>{{ self_assessment.feedback }}</p>
</div>
</div>
</li>
{% endif %}
</ul>
</li>
{% endif %}
......
{% spaceless %}
{% load i18n %}
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="ui-toggle-visibility__content">
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label"
>{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{ option.points }} <span class="answer__points__label">{% trans "points" %}</span></span>
</div>
</li>
{% endfor %}
{% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
{% if criterion.feedback == 'required' %}required{% endif %}
>
</textarea>
</div>
</li>
{% endif %}
</ol>
</div>
</li>
{% endfor %}
<li class="wrapper--input field field--textarea assessment__rubric__question assessment__rubric__question--feedback" id="assessment__rubric__question--feedback">
<label class="question__title" for="assessment__rubric__question--feedback__value">
<span class="question__title__copy">{{ rubric_feedback_prompt }}</span>
</label>
<div class="wrapper--input">
<textarea
id="assessment__rubric__question--feedback__value"
placeholder="{% trans "I noticed that this response..." %}"
maxlength="500"
>
</textarea>
</div>
</li>
</ol>
</fieldset>
{% endspaceless %}
\ No newline at end of file
......@@ -72,77 +72,7 @@
</div>
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="ui-toggle-visibility__content">
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label"
>{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{ option.points }} <span class="answer__points__label">{% trans "points" %}</span></span>
</div>
</li>
{% endfor %}
{% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
maxlength="300"
{% if criterion.feedback == 'required' %}required{% endif %}
>
</textarea>
</div>
</li>
{% endif %}
</ol>
</div>
</li>
{% endfor %}
<li class="wrapper--input field field--textarea assessment__rubric__question assessment__rubric__question--feedback" id="assessment__rubric__question--feedback">
<label class="question__title" for="assessment__rubric__question--feedback__value">
<span class="question__title__copy">{{ rubric_feedback_prompt }}</span>
</label>
<div class="wrapper--input">
<textarea
id="assessment__rubric__question--feedback__value"
placeholder="{% trans "I noticed that this response..." %}"
maxlength="500"
>
</textarea>
</div>
</li>
</ol>
</fieldset>
{% include "openassessmentblock/oa_rubric.html" %}
</form>
</article>
</li>
......
......@@ -59,46 +59,7 @@
</article>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
{% if criterion.options %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility has--options"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="ui-toggle-visibility__content">
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label">{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="answer__points__label">{% trans "points" %}</span></span>
</div>
</li>
{% endfor %}
</ol>
</div>
</li>
{% endif %}
{% endfor %}
</ol>
</fieldset>
{% include "openassessmentblock/oa_rubric.html" %}
</form>
</div>
......
......@@ -73,3 +73,23 @@ def create_rubric_dict(prompt, criteria):
"prompt": prompt,
"criteria": criteria
}
def clean_criterion_feedback(rubric_criteria, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
rubric_criteria (list): The rubric criteria from the problem definition.
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
......@@ -127,7 +127,7 @@ class GradeMixin(object):
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'example_based_assessment': example_based_assessment,
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments, self_assessment),
'has_submitted_feedback': has_submitted_feedback,
'allow_file_upload': self.allow_file_upload,
'file_url': self.get_download_url_from_submission(student_submission)
......@@ -218,13 +218,14 @@ class GradeMixin(object):
)
return {'success': True, 'msg': _(u"Feedback saved.")}
def _rubric_criteria_with_feedback(self, peer_assessments):
def _rubric_criteria_with_feedback(self, peer_assessments, self_assessment):
"""
Add per-criterion feedback from peer assessments to the rubric criteria.
Filters out empty feedback.
Args:
peer_assessments (list of dict): Serialized assessment models from the peer API.
self_assessment (dict): Serialized assessment model from the self API
Returns:
list of criterion dictionaries
......@@ -245,16 +246,24 @@ class GradeMixin(object):
]
"""
criteria = copy.deepcopy(self.rubric_criteria)
criteria_feedback = defaultdict(list)
peer_criteria_feedback = defaultdict(list)
self_criteria_feedback = {}
for assessment in peer_assessments:
for part in assessment['parts']:
if part['feedback']:
part_criterion_name = part['criterion']['name']
criteria_feedback[part_criterion_name].append(part['feedback'])
peer_criteria_feedback[part_criterion_name].append(part['feedback'])
if self_assessment:
for part in self_assessment['parts']:
if part['feedback']:
part_criterion_name = part['criterion']['name']
self_criteria_feedback[part_criterion_name] = part['feedback']
for criterion in criteria:
criterion_name = criterion['name']
criterion['feedback'] = criteria_feedback[criterion_name]
criterion['peer_feedback'] = peer_criteria_feedback[criterion_name]
criterion['self_feedback'] = self_criteria_feedback.get(criterion_name)
return criteria
......@@ -9,10 +9,8 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
)
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.api import FileUploadError
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import create_rubric_dict, clean_criterion_feedback
logger = logging.getLogger(__name__)
......@@ -64,19 +62,15 @@ class PeerAssessmentMixin(object):
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
try:
# Create the assessment
assessment = peer_api.create_assessment(
self.submission_uuid,
self.get_student_item_dict()["student_id"],
data['options_selected'],
self._clean_criterion_feedback(data['criterion_feedback']),
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
rubric_dict,
create_rubric_dict(self.prompt, self.rubric_criteria),
assessment_ui_model['must_be_graded_by']
)
......@@ -268,22 +262,3 @@ class PeerAssessmentMixin(object):
logger.exception(err)
return peer_submission
def _clean_criterion_feedback(self, criterion_feedback):
"""
Remove per-criterion feedback for criteria with feedback disabled
in the rubric.
Args:
criterion_feedback (dict): Mapping of criterion names to feedback text.
Returns:
dict
"""
return {
criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required']
}
......@@ -8,6 +8,7 @@ from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as submission_api
from .resolve_dates import DISTANT_FUTURE
from .data_conversion import create_rubric_dict, clean_criterion_feedback
logger = logging.getLogger(__name__)
......@@ -112,6 +113,12 @@ class SelfAssessmentMixin(object):
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
if 'overall_feedback' not in data:
return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}
if 'criterion_feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}
if self.submission_uuid is None:
return {'success': False, 'msg': _(u"You must submit a response before you can perform a self-assessment.")}
......@@ -120,7 +127,9 @@ class SelfAssessmentMixin(object):
self.submission_uuid,
self.get_student_item_dict()['student_id'],
data['options_selected'],
{"criteria": self.rubric_criteria}
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria)
)
self.publish_assessment_event("openassessmentblock.self_assess", assessment)
......
......@@ -77,7 +77,7 @@ describe("OpenAssessment.PeerView", function() {
// Provide overall feedback
var overallFeedback = "Good job!";
view.overallFeedback(overallFeedback);
view.rubric.overallFeedback(overallFeedback);
// Submit the peer assessment
view.peerAssess();
......
......@@ -56,8 +56,28 @@ describe("OpenAssessment.SelfView", function() {
it("Sends a self assessment to the server", function() {
spyOn(server, 'selfAssess').andCallThrough();
// Select options in the rubric
var optionsSelected = {};
optionsSelected['Criterion 1'] = 'Poor';
optionsSelected['Criterion 2'] = 'Fair';
optionsSelected['Criterion 3'] = 'Good';
view.rubric.optionsSelected(optionsSelected);
// Provide per-criterion feedback
var criterionFeedback = {};
criterionFeedback['Criterion 1'] = "You did a fair job";
criterionFeedback['Criterion 3'] = "You did a good job";
view.rubric.criterionFeedback(criterionFeedback);
// Provide overall feedback
var overallFeedback = "Good job!";
view.rubric.overallFeedback(overallFeedback);
view.selfAssess();
expect(server.selfAssess).toHaveBeenCalled();
expect(server.selfAssess).toHaveBeenCalledWith(
optionsSelected, criterionFeedback, overallFeedback
);
});
it("Re-enables the self assess button on error", function() {
......
......@@ -107,6 +107,29 @@ describe("OpenAssessment.Server", function() {
});
});
it("sends a self-assessment to the XBlock", function() {
stubAjax(true, {success: true, msg: ''});
var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var criterionFeedback = {clarity: "This essay was very clear."};
server.selfAssess(options, criterionFeedback, "Excellent job!").done(
function() { success = true; }
);
expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({
url: '/self_assess',
type: "POST",
data: JSON.stringify({
options_selected: options,
criterion_feedback: criterionFeedback,
overall_feedback: "Excellent job!"
})
});
});
it("sends a training assessment to the XBlock", function() {
stubAjax(true, {success: true, msg: '', correct: true});
var success = false;
......@@ -241,7 +264,7 @@ describe("OpenAssessment.Server", function() {
it("informs the caller of an AJAX error when sending a self assessment", function() {
stubAjax(false, null);
var receivedMsg = null;
server.selfAssess("Test").fail(function(errorMsg) { receivedMsg = errorMsg; });
server.selfAssess("Test", {}, "Excellent job!").fail(function(errorMsg) { receivedMsg = errorMsg; });
expect(receivedMsg).toContain('This assessment could not be submitted');
});
......
......@@ -197,7 +197,7 @@ OpenAssessment.PeerView.prototype = {
this.server.peerAssess(
this.rubric.optionsSelected(),
this.rubric.criterionFeedback(),
this.overallFeedback()
this.rubric.overallFeedback()
).done(
successFunction
).fail(function(errMsg) {
......@@ -206,28 +206,5 @@ OpenAssessment.PeerView.prototype = {
});
},
/**
Get or set overall feedback on the submission.
Args:
overallFeedback (string or undefined): The overall feedback text (optional).
Returns:
string or undefined
Example usage:
>>> view.overallFeedback('Good job!'); // Set the feedback text
>>> view.overallFeedback(); // Retrieve the feedback text
'Good job!'
**/
overallFeedback: function(overallFeedback) {
var selector = '#assessment__rubric__question--feedback__value';
if (typeof overallFeedback === 'undefined') {
return $(selector, this.element).val();
}
else {
$(selector, this.element).val(overallFeedback);
}
}
};
......@@ -47,6 +47,31 @@ OpenAssessment.Rubric.prototype = {
},
/**
Get or set overall feedback on the submission.
Args:
overallFeedback (string or undefined): The overall feedback text (optional).
Returns:
string or undefined
Example usage:
>>> view.overallFeedback('Good job!'); // Set the feedback text
>>> view.overallFeedback(); // Retrieve the feedback text
'Good job!'
**/
overallFeedback: function(overallFeedback) {
var selector = '#assessment__rubric__question--feedback__value';
if (typeof overallFeedback === 'undefined') {
return $(selector, this.element).val();
}
else {
$(selector, this.element).val(overallFeedback);
}
},
/**
Get or set the options selected in the rubric.
Args:
......
......@@ -103,8 +103,11 @@ OpenAssessment.SelfView.prototype = {
baseView.toggleActionError('self', null);
view.selfSubmitEnabled(false);
var options = this.rubric.optionsSelected();
this.server.selfAssess(options).done(
this.server.selfAssess(
this.rubric.optionsSelected(),
this.rubric.criterionFeedback(),
this.rubric.overallFeedback()
).done(
function() {
baseView.loadAssessmentModules();
baseView.scrollToTop();
......
/**
Interface for server-side XBlock handlers.
Args:
......@@ -261,6 +262,8 @@ OpenAssessment.Server.prototype = {
Args:
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
var criterionFeedback = { clarity: "The essay was very clear." };
var overallFeedback = "Good job!";
Returns:
A JQuery promise, which resolves with no args if successful
......@@ -274,10 +277,12 @@ OpenAssessment.Server.prototype = {
function(errorMsg) { console.log(errorMsg); }
);
**/
selfAssess: function(optionsSelected) {
selfAssess: function(optionsSelected, criterionFeedback, overallFeedback) {
var url = this.url('self_assess');
var payload = JSON.stringify({
options_selected: optionsSelected
options_selected: optionsSelected,
criterion_feedback: criterionFeedback,
overall_feedback: overallFeedback
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
......
......@@ -115,9 +115,16 @@ class TestGrade(XBlockHandlerTestCase):
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u"Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ."
}
self_assessment = copy.deepcopy(self.ASSESSMENTS[0])
self_assessment['criterion_feedback'] = {
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': "Feedback here",
u'Form': 'lots of feedback yes"',
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': "such feedback"
}
# Submit, assess, and render the grade view
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, peer_assessments, self.ASSESSMENTS[0]
xblock, self.SUBMISSION, self.PEERS, peer_assessments, self_assessment
)
# Render the grade section
......@@ -172,11 +179,13 @@ class TestGrade(XBlockHandlerTestCase):
# Verify that the context for the grade complete page contains the feedback
_, context = xblock.render_grade_complete(xblock.get_workflow_info())
criteria = context['rubric_criteria']
self.assertEqual(criteria[0]['feedback'], [
self.assertEqual(criteria[0]['peer_feedback'], [
u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!',
])
self.assertEqual(criteria[1]['feedback'], [u'Peer 2: ƒαιя נσв'])
self.assertEqual(criteria[0]['self_feedback'], u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!')
self.assertEqual(criteria[1]['peer_feedback'], [u'Peer 2: ƒαιя נσв'])
# The order of the peers in the per-criterion feedback needs
# to match the order of the peer assessments
......@@ -365,5 +374,6 @@ class TestGrade(XBlockHandlerTestCase):
if self_assessment is not None:
self_api.create_assessment(
submission['uuid'], student_id, self_assessment['options_selected'],
self_assessment['criterion_feedback'], self_assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}
)
......@@ -9,6 +9,7 @@ import mock
import pytz
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from openassessment.xblock.data_conversion import create_rubric_dict
from .base import XBlockHandlerTestCase, scenario
......@@ -23,6 +24,8 @@ class TestSelfAssessment(XBlockHandlerTestCase):
ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
}
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
......@@ -87,6 +90,10 @@ class TestSelfAssessment(XBlockHandlerTestCase):
# Submit a self assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'criterion_feedback': {
u'vocabulary': 'Awesome job!',
u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': 'fairly illegible.'
},
'overall_feedback': u''
}
resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
......@@ -99,10 +106,9 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
# The written feedback should default to an empty string
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'')
self.assertEqual(assessment['parts'][1]['feedback'], u'fairly illegible.')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock):
......@@ -267,7 +273,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria}
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
......@@ -302,7 +309,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria}
{}, "Good job!",
create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
)
# This case probably isn't possible, because presumably when we create
......
......@@ -32,6 +32,12 @@ ASSESSMENT_DICT = {
"Clear-headed": "Yogi Berra",
"Form": "Reddit",
},
'criterion_feedback': {
"Concise": "Not very.",
"Clear-headed": "Indubitably",
"Form": "s ka tter ed"
}
}
......@@ -209,6 +215,8 @@ class TestCourseStaff(XBlockHandlerTestCase):
submission['uuid'],
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
{'criteria': xblock.rubric_criteria},
)
......@@ -235,6 +243,13 @@ class TestCourseStaff(XBlockHandlerTestCase):
"Content": "Poor",
}
criterion_feedback = {
"Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
"Content": "Not as insightful as I had thought in the wee hours of the morning!"
}
overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."
bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id
......@@ -265,6 +280,8 @@ class TestCourseStaff(XBlockHandlerTestCase):
submission['uuid'],
STUDENT_ITEM["student_id"],
options_selected,
criterion_feedback,
overall_feedback,
{'criteria': xblock.rubric_criteria},
)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment