Commit 1e49572f by Will Daly

Update the edit rubric templates to include both a name and label for criteria and options

Update container JS to retrieve both name and label for criteria and options

Update schema to make 'name' optional and 'label' required; the XBlock editor handler assigns UUIDs to criteria and options that don't have names

Assign labels to criteria/options if they do not have any

Update the grade template to display criteria/option labels if they're provided, otherwise use the 'name' field

Update student facing rubric templates to display the label if it's available

Add label field to XML problem definition.

Add label field to assessment

Add label field to the data dump script.

Update the turbo mode template to use option labels

Exclude the "name" key for new criteria and options in the JSON dict sent to the server.
Default new option points to 1 instead of NaN

Add data migration to fill in labels with default values
parent 209d2fa7
...@@ -26,7 +26,7 @@ class RubricAdmin(admin.ModelAdmin): ...@@ -26,7 +26,7 @@ class RubricAdmin(admin.ModelAdmin):
"""Short description of criteria for presenting in a list.""" """Short description of criteria for presenting in a list."""
rubric_data = RubricSerializer.serialized_from_cache(rubric_obj) rubric_data = RubricSerializer.serialized_from_cache(rubric_obj)
return u", ".join( return u", ".join(
u"{}: {}".format(criterion["name"], criterion["points_possible"]) u"{} - {}: {}".format(criterion["name"], criterion['label'], criterion["points_possible"])
for criterion in rubric_data["criteria"] for criterion in rubric_data["criteria"]
) )
...@@ -88,11 +88,13 @@ class AssessmentAdmin(admin.ModelAdmin): ...@@ -88,11 +88,13 @@ class AssessmentAdmin(admin.ModelAdmin):
def parts_summary(self, assessment_obj): def parts_summary(self, assessment_obj):
return "<br/>".join( return "<br/>".join(
html.escape( html.escape(
u"{}/{} - {}: {} - {}".format( u"{}/{} - {} - {}: {} - {} - {}".format(
part.points_earned, part.points_earned,
part.points_possible, part.points_possible,
part.criterion.name, part.criterion.name,
part.criterion.label,
part.option.name if part.option else "None", part.option.name if part.option else "None",
part.option.label if part.option else "None",
part.feedback, part.feedback,
) )
) )
......
...@@ -149,7 +149,13 @@ class Criterion(models.Model): ...@@ -149,7 +149,13 @@ class Criterion(models.Model):
""" """
rubric = models.ForeignKey(Rubric, related_name="criteria") rubric = models.ForeignKey(Rubric, related_name="criteria")
# Backwards compatibility: The "name" field was formerly
# used both as a display name and as a unique identifier.
# Now we're using it only as a unique identifier.
# We include the "label" (which is displayed to the user)
# in the data model so we can include it in analytics data packages.
name = models.CharField(max_length=100, blank=False) name = models.CharField(max_length=100, blank=False)
label = models.CharField(max_length=100, blank=True)
# 0-based order in the Rubric # 0-based order in the Rubric
order_num = models.PositiveIntegerField() order_num = models.PositiveIntegerField()
...@@ -189,9 +195,13 @@ class CriterionOption(models.Model): ...@@ -189,9 +195,13 @@ class CriterionOption(models.Model):
# How many points this option is worth. 0 is allowed. # How many points this option is worth. 0 is allowed.
points = models.PositiveIntegerField() points = models.PositiveIntegerField()
# Short name of the option. This is visible to the user. # Backwards compatibility: The "name" field was formerly
# Examples: "Excellent", "Good", "Fair", "Poor" # used both as a display name and as a unique identifier.
# Now we're using it only as a unique identifier.
# We include the "label" (which is displayed to the user)
# in the data model so we can include it in analytics data packages.
name = models.CharField(max_length=100) name = models.CharField(max_length=100)
label = models.CharField(max_length=100, blank=True)
# Longer text describing this option and why you should choose it. # Longer text describing this option and why you should choose it.
# Example: "The response makes 3-5 Monty Python references and at least one # Example: "The response makes 3-5 Monty Python references and at least one
......
...@@ -63,7 +63,7 @@ class CriterionOptionSerializer(NestedModelSerializer): ...@@ -63,7 +63,7 @@ class CriterionOptionSerializer(NestedModelSerializer):
"""Serializer for :class:`CriterionOption`""" """Serializer for :class:`CriterionOption`"""
class Meta: class Meta:
model = CriterionOption model = CriterionOption
fields = ('order_num', 'points', 'name', 'explanation') fields = ('order_num', 'points', 'name', 'label', 'explanation')
class CriterionSerializer(NestedModelSerializer): class CriterionSerializer(NestedModelSerializer):
...@@ -73,7 +73,7 @@ class CriterionSerializer(NestedModelSerializer): ...@@ -73,7 +73,7 @@ class CriterionSerializer(NestedModelSerializer):
class Meta: class Meta:
model = Criterion model = Criterion
fields = ('order_num', 'name', 'prompt', 'options', 'points_possible') fields = ('order_num', 'name', 'label', 'prompt', 'options', 'points_possible')
class RubricSerializer(NestedModelSerializer): class RubricSerializer(NestedModelSerializer):
......
...@@ -28,7 +28,8 @@ class CsvWriter(object): ...@@ -28,7 +28,8 @@ class CsvWriter(object):
], ],
'assessment_part': [ 'assessment_part': [
'assessment_id', 'points_earned', 'assessment_id', 'points_earned',
'criterion_name', 'option_name', 'feedback' 'criterion_name', 'criterion_label',
'option_name', 'option_label', 'feedback'
], ],
'assessment_feedback': [ 'assessment_feedback': [
'submission_uuid', 'feedback_text', 'options' 'submission_uuid', 'feedback_text', 'options'
...@@ -230,7 +231,9 @@ class CsvWriter(object): ...@@ -230,7 +231,9 @@ class CsvWriter(object):
part.assessment.id, part.assessment.id,
part.points_earned, part.points_earned,
part.criterion.name, part.criterion.name,
part.criterion.label,
part.option.name if part.option is not None else u"", part.option.name if part.option is not None else u"",
part.option.label if part.option is not None else u"",
part.feedback part.feedback
]) ])
......
...@@ -7,15 +7,16 @@ ...@@ -7,15 +7,16 @@
<div class="openassessment_criterion_remove_button"><h2>{% trans "Remove" %}</h2></div> <div class="openassessment_criterion_remove_button"><h2>{% trans "Remove" %}</h2></div>
</div> </div>
<div class="openassessment_criterion_body wrapper-comp-settings"> <div class="openassessment_criterion_body wrapper-comp-settings">
<input type="hidden" class="openassessment_criterion_name" value="{{ criterion_name }}" />
<ul class="list-input settings-list openassessment_criterion_basic_editor"> <ul class="list-input settings-list openassessment_criterion_basic_editor">
<li class="field comp-setting-entry"> <li class="field comp-setting-entry">
<div class="wrapper-comp-settings"> <div class="wrapper-comp-settings">
<label class="openassessment_criterion_name_label setting-label"> <label class="openassessment_criterion_name_label setting-label">
{% trans "Criterion Name" %} {% trans "Criterion Name" %}
<input <input
class="openassessment_criterion_name input setting-input" class="openassessment_criterion_label input setting-input"
type="text" type="text"
value="{{ criterion_name }}" value="{{ criterion_label }}"
> >
</label> </label>
</div> </div>
...@@ -31,7 +32,7 @@ ...@@ -31,7 +32,7 @@
</ul> </ul>
<ul class="openassessment_criterion_option_list"> <ul class="openassessment_criterion_option_list">
{% for option in criterion_options %} {% for option in criterion_options %}
{% include "openassessmentblock/edit/oa_edit_option.html" with option_name=option.name option_points=option.points option_explanation=option.explanation %} {% include "openassessmentblock/edit/oa_edit_option.html" with option_name=option.name option_label=option.label option_points=option.points option_explanation=option.explanation %}
{% endfor %} {% endfor %}
</ul> </ul>
......
...@@ -8,15 +8,17 @@ ...@@ -8,15 +8,17 @@
</div> </div>
</div> </div>
<div class="wrapper-comp-settings"> <div class="wrapper-comp-settings">
<input type="hidden" class="openassessment_criterion_option_name" value="{{ option_name }}" />
<ul class="list-input settings-list"> <ul class="list-input settings-list">
<li class="field comp-setting-entry openassessment_criterion_option_name_wrapper"> <li class="field comp-setting-entry openassessment_criterion_option_name_wrapper">
<div class="wrapper-comp-setting"> <div class="wrapper-comp-setting">
<label class="openassessment_criterion_option_name_label setting-label"> <label class="openassessment_criterion_option_name_label setting-label">
{% trans "Option Name"%} {% trans "Option Name"%}
<input <input
class="openassessment_criterion_option_name input input-label" class="openassessment_criterion_option_label input input-label"
type="text" type="text"
value="{{ option_name }}" name="{{ option_name }}"
value="{{ option_label }}"
> >
</label> </label>
</div> </div>
......
...@@ -2,11 +2,11 @@ ...@@ -2,11 +2,11 @@
{% spaceless %} {% spaceless %}
<div id="oa_rubric_editor_wrapper" class="oa_editor_content_wrapper"> <div id="oa_rubric_editor_wrapper" class="oa_editor_content_wrapper">
<div id="openassessment_criterion_template" class="is--hidden"> <div id="openassessment_criterion_template" class="is--hidden">
{% include "openassessmentblock/edit/oa_edit_criterion.html" with criterion_name="" criterion_prompt="" criterion_options=False criterion_feedback="disabled" %} {% include "openassessmentblock/edit/oa_edit_criterion.html" with criterion_name="" criterion_label="" criterion_prompt="" criterion_options=False criterion_feedback="disabled" %}
</div> </div>
<div id="openassessment_option_template" class="is--hidden"> <div id="openassessment_option_template" class="is--hidden">
{% include "openassessmentblock/edit/oa_edit_option.html" with option_name="" option_points="" option_explanation="" %} {% include "openassessmentblock/edit/oa_edit_option.html" with option_name="" option_label="" option_points=1 option_explanation="" %}
</div> </div>
<div id="openassessment_rubric_instructions"> <div id="openassessment_rubric_instructions">
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
<ul id="openassessment_criterion_list" > <ul id="openassessment_criterion_list" >
{% for criterion in criteria %} {% for criterion in criteria %}
{% include "openassessmentblock/edit/oa_edit_criterion.html" with criterion_name=criterion.name criterion_prompt=criterion.prompt criterion_options=criterion.options criterion_feedback=criterion.feedback %} {% include "openassessmentblock/edit/oa_edit_criterion.html" with criterion_name=criterion.name criterion_label=criterion.label criterion_prompt=criterion.prompt criterion_options=criterion.options criterion_feedback=criterion.feedback %}
{% endfor %} {% endfor %}
</ul> </ul>
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
<li class="question question--{{ criterion_num }} ui-toggle-visibility"> <li class="question question--{{ criterion_num }} ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control"> <h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.name }}</span> <span class="question__title__copy">{{ criterion.label }}</span>
<span class="question__score"> <span class="question__score">
<span class="label sr">{% trans "Overall Grade" %}</span> <span class="label sr">{% trans "Overall Grade" %}</span>
...@@ -78,11 +78,11 @@ ...@@ -78,11 +78,11 @@
<span class="answer__value"> <span class="answer__value">
<span class="answer__value__label sr">{% trans "Peer's Assessment" %}: </span> <span class="answer__value__label sr">{% trans "Peer's Assessment" %}: </span>
<span class="answer__value__value"> <span class="answer__value__value">
{{ part.option.name }} {{ part.option.label }}
<span class="ui-hint hint--top" data-hint="{{ part.option.explanation }}"> <span class="ui-hint hint--top" data-hint="{{ part.option.explanation }}">
<i class="ico icon-info-sign" <i class="ico icon-info-sign"
title="{% blocktrans with name=part.option.name %}More information about {{ name }}{% endblocktrans %}"></i> title="{% blocktrans with name=part.option.label %}More information about {{ name }}{% endblocktrans %}"></i>
</span> </span>
</span> </span>
...@@ -109,11 +109,11 @@ ...@@ -109,11 +109,11 @@
<span class="answer__value"> <span class="answer__value">
<span class="answer__value__label sr">{% trans "Your Assessment" %}: </span> <span class="answer__value__label sr">{% trans "Your Assessment" %}: </span>
<span class="answer__value__value"> <span class="answer__value__value">
{{ part.option.name }} {{ part.option.label }}
<span class="ui-hint hint--top" data-hint="{{ part.option.explanation }}"> <span class="ui-hint hint--top" data-hint="{{ part.option.explanation }}">
<i class="ico icon-info-sign" <i class="ico icon-info-sign"
title="{% blocktrans with name=part.option.name %}More information about {{ name }}{% endblocktrans %}"></i> title="{% blocktrans with name=part.option.label %}More information about {{ name }}{% endblocktrans %}"></i>
</span> </span>
</span> </span>
</span> </span>
...@@ -133,11 +133,11 @@ ...@@ -133,11 +133,11 @@
<span class="answer__value"> <span class="answer__value">
<span class="answer__value__label sr">{% trans "Example-Based Assessment" %}: </span> <span class="answer__value__label sr">{% trans "Example-Based Assessment" %}: </span>
<span class="answer__value__value"> <span class="answer__value__value">
{{ part.option.name }} {{ part.option.label }}
<span class="ui-hint hint--top" data-hint="{{ part.option.explanation }}"> <span class="ui-hint hint--top" data-hint="{{ part.option.explanation }}">
<i class="ico icon-info-sign" <i class="ico icon-info-sign"
title="{% blocktrans with name=part.option.name %}More information about {{ name }}{% endblocktrans %}"></i> title="{% blocktrans with name=part.option.label %}More information about {{ name }}{% endblocktrans %}"></i>
</span> </span>
</span> </span>
</span> </span>
......
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
value="{{ option.name }}" /> value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}" <label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label" class="answer__label"
>{{ option.name }}</label> >{{ option.label }}</label>
</div> </div>
<div class="wrapper--metadata"> <div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span> <span class="answer__tip">{{ option.explanation }}</span>
......
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
value="{{ option.name }}" /> value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}" <label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label" class="answer__label"
>{{ option.name }}</label> >{{ option.label }}</label>
</div> </div>
<div class="wrapper--metadata"> <div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span> <span class="answer__tip">{{ option.explanation }}</span>
......
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
class="answer__value" class="answer__value"
value="{{ option.name }}" /> value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}" <label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label">{{ option.name }}</label> class="answer__label">{{ option.label }}</label>
</div> </div>
<div class="wrapper--metadata"> <div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span> <span class="answer__tip">{{ option.explanation }}</span>
......
...@@ -108,7 +108,7 @@ ...@@ -108,7 +108,7 @@
class="answer__value" class="answer__value"
value="{{ option.name }}" /> value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}" <label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label">{{ option.name }}</label> class="answer__label">{{ option.label }}</label>
</div> </div>
<div class="wrapper--metadata"> <div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span> <span class="answer__tip">{{ option.explanation }}</span>
......
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
"order_num": 0, "order_num": 0,
"prompt": "How concise is it?", "prompt": "How concise is it?",
"rubric": 1, "rubric": 1,
"name": "concise" "name": "concise",
"label": "concise label"
} }
}, },
{ {
...@@ -23,7 +24,8 @@ ...@@ -23,7 +24,8 @@
"order_num": 1, "order_num": 1,
"prompt": "How clear is the thinking?", "prompt": "How clear is the thinking?",
"rubric": 1, "rubric": 1,
"name": "clear-headed" "name": "clear-headed",
"label": "clear-headed label"
} }
}, },
{ {
...@@ -33,183 +35,200 @@ ...@@ -33,183 +35,200 @@
"order_num": 2, "order_num": 2,
"prompt": "Lastly, how is its form? Punctuation, grammar, and spelling all count.", "prompt": "Lastly, how is its form? Punctuation, grammar, and spelling all count.",
"rubric": 1, "rubric": 1,
"name": "form" "name": "form",
"label": "form label"
} }
}, },
{ {
"pk": 1, "pk": 1,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 0, "name": "Neal Stephenson (late)",
"explanation": "\n In \"Cryptonomicon\", Stephenson spent multiple pages talking about breakfast cereal.\n While hilarious, in recent years his work has been anything but 'concise'.\n ", "explanation": "\n In \"Cryptonomicon\", Stephenson spent multiple pages talking about breakfast cereal.\n While hilarious, in recent years his work has been anything but 'concise'.\n ",
"label": "Neal Stephenson (late) label",
"points": 0, "points": 0,
"criterion": 1, "criterion": 1,
"name": "Neal Stephenson (late)" "order_num": 0
} }
}, },
{ {
"pk": 2, "pk": 2,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 1, "name": "HP Lovecraft",
"explanation": "\n If the author wrote something cyclopean that staggers the mind, score it thus.\n ", "explanation": "\n If the author wrote something cyclopean that staggers the mind, score it thus.\n ",
"label": "HP Lovecraft label",
"points": 1, "points": 1,
"criterion": 1, "criterion": 1,
"name": "HP Lovecraft" "order_num": 1
} }
}, },
{ {
"pk": 3, "pk": 3,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 2, "name": "Robert Heinlein",
"explanation": "\n Tight prose that conveys a wealth of information about the world in relatively\n few words. Example, \"The door irised open and he stepped inside.\"\n ", "explanation": "\n Tight prose that conveys a wealth of information about the world in relatively\n few words. Example, \"The door irised open and he stepped inside.\"\n ",
"label": "Robert Heinlein label",
"points": 3, "points": 3,
"criterion": 1, "criterion": 1,
"name": "Robert Heinlein" "order_num": 2
} }
}, },
{ {
"pk": 4, "pk": 4,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 3, "name": "Neal Stephenson (early)",
"explanation": "\n When Stephenson still had an editor, his prose was dense, with anecdotes about\n nitrox abuse implying main characters' whole life stories.\n ", "explanation": "\n When Stephenson still had an editor, his prose was dense, with anecdotes about\n nitrox abuse implying main characters' whole life stories.\n ",
"label": "Neal Stephenson (early) label",
"points": 4, "points": 4,
"criterion": 1, "criterion": 1,
"name": "Neal Stephenson (early)" "order_num": 3
} }
}, },
{ {
"pk": 5, "pk": 5,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 4, "name": "Earnest Hemingway",
"explanation": "\n Score the work this way if it makes you weep, and the removal of a single\n word would make you sneer.\n ", "explanation": "\n Score the work this way if it makes you weep, and the removal of a single\n word would make you sneer.\n ",
"label": "Earnest Hemingway label",
"points": 5, "points": 5,
"criterion": 1, "criterion": 1,
"name": "Earnest Hemingway" "order_num": 4
} }
}, },
{ {
"pk": 6, "pk": 6,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 0, "name": "Yogi Berra",
"explanation": "", "explanation": "",
"label": "Yogi Berra label",
"points": 0, "points": 0,
"criterion": 2, "criterion": 2,
"name": "Yogi Berra" "order_num": 0
} }
}, },
{ {
"pk": 7, "pk": 7,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 1, "name": "Hunter S. Thompson",
"explanation": "", "explanation": "",
"label": "Hunter S. Thompson label",
"points": 1, "points": 1,
"criterion": 2, "criterion": 2,
"name": "Hunter S. Thompson" "order_num": 1
} }
}, },
{ {
"pk": 8, "pk": 8,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 2, "name": "Robert Heinlein",
"explanation": "", "explanation": "",
"label": "Robert Heinlein label",
"points": 2, "points": 2,
"criterion": 2, "criterion": 2,
"name": "Robert Heinlein" "order_num": 2
} }
}, },
{ {
"pk": 9, "pk": 9,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 3, "name": "Isaac Asimov",
"explanation": "", "explanation": "",
"label": "Isaac Asimov label",
"points": 3, "points": 3,
"criterion": 2, "criterion": 2,
"name": "Isaac Asimov" "order_num": 3
} }
}, },
{ {
"pk": 10, "pk": 10,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 4, "name": "Spock",
"explanation": "\n Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,\n and unemotional examination of the facts. This is the only item explained in this category,\n to show that explained and unexplained items can be mixed.\n ", "explanation": "\n Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,\n and unemotional examination of the facts. This is the only item explained in this category,\n to show that explained and unexplained items can be mixed.\n ",
"label": "Spock label",
"points": 10, "points": 10,
"criterion": 2, "criterion": 2,
"name": "Spock" "order_num": 4
} }
}, },
{ {
"pk": 11, "pk": 11,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 0, "name": "lolcats",
"explanation": "", "explanation": "",
"label": "lolcats label",
"points": 0, "points": 0,
"criterion": 3, "criterion": 3,
"name": "lolcats" "order_num": 0
} }
}, },
{ {
"pk": 12, "pk": 12,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 1, "name": "Facebook",
"explanation": "", "explanation": "",
"label": "Facebook label",
"points": 1, "points": 1,
"criterion": 3, "criterion": 3,
"name": "Facebook" "order_num": 1
} }
}, },
{ {
"pk": 13, "pk": 13,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 2, "name": "Reddit",
"explanation": "", "explanation": "",
"label": "Reddit label",
"points": 2, "points": 2,
"criterion": 3, "criterion": 3,
"name": "Reddit" "order_num": 2
} }
}, },
{ {
"pk": 14, "pk": 14,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 3, "name": "metafilter",
"explanation": "", "explanation": "",
"label": "metafilter label",
"points": 3, "points": 3,
"criterion": 3, "criterion": 3,
"name": "metafilter" "order_num": 3
} }
}, },
{ {
"pk": 15, "pk": 15,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 4, "name": "Usenet, 1996",
"explanation": "", "explanation": "",
"label": "Usenet, 1996 label",
"points": 4, "points": 4,
"criterion": 3, "criterion": 3,
"name": "Usenet, 1996" "order_num": 4
} }
}, },
{ {
"pk": 16, "pk": 16,
"model": "assessment.criterionoption", "model": "assessment.criterionoption",
"fields": { "fields": {
"order_num": 5, "name": "The Elements of Style",
"explanation": "", "explanation": "",
"label": "The Elements of Style label",
"points": 5, "points": 5,
"criterion": 3, "criterion": 3,
"name": "The Elements of Style" "order_num": 5
} }
} }
] ]
\ No newline at end of file
...@@ -13,7 +13,15 @@ ...@@ -13,7 +13,15 @@
["submission_uuid", "feedback_text", "options"] ["submission_uuid", "feedback_text", "options"]
], ],
"assessment_part": [ "assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"] [
"assessment_id",
"points_earned",
"criterion_name",
"criterion_label",
"option_name",
"option_label",
"feedback"
]
], ],
"assessment_feedback_option": [ "assessment_feedback_option": [
["id", "text"] ["id", "text"]
...@@ -62,10 +70,10 @@ ...@@ -62,10 +70,10 @@
] ]
], ],
"assessment_part": [ "assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"], ["assessment_id", "points_earned", "criterion_name", "criterion_label", "option_name", "option_label", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."], ["1", "4", "concise", "concise label", "Neal Stephenson (early)", "Neal Stephenson (early) label", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"], ["1", "5", "form", "form label", "The Elements of Style", "The Elements of Style label", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""] ["1", "3", "clear-headed", "clear-headed label", "Isaac Asimov", "Isaac Asimov label", ""]
] ]
} }
}, },
...@@ -97,13 +105,13 @@ ...@@ -97,13 +105,13 @@
] ]
], ],
"assessment_part": [ "assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"], ["assessment_id", "points_earned", "criterion_name", "criterion_label", "option_name", "option_label", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."], ["1", "4", "concise", "concise label", "Neal Stephenson (early)", "Neal Stephenson (early) label", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"], ["1", "5", "form", "form label", "The Elements of Style", "The Elements of Style label", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""], ["1", "3", "clear-headed", "clear-headed label", "Isaac Asimov", "Isaac Asimov label", ""],
["2", "5", "concise", "Earnest Hemingway", ""], ["2", "5", "concise", "concise label", "Earnest Hemingway", "Earnest Hemingway label", ""],
["2", "5", "form", "The Elements of Style", ""], ["2", "5", "form", "form label", "The Elements of Style", "The Elements of Style label", ""],
["2", "10", "clear-headed", "Spock", ""] ["2", "10", "clear-headed", "clear-headed label", "Spock", "Spock label", ""]
] ]
} }
}, },
...@@ -172,11 +180,11 @@ ...@@ -172,11 +180,11 @@
] ]
], ],
"assessment_part": [ "assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"], ["assessment_id", "points_earned", "criterion_name", "criterion_label", "option_name", "option_label", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."], ["1", "4", "concise", "concise label", "Neal Stephenson (early)", "Neal Stephenson (early) label", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"], ["1", "5", "form", "form label", "The Elements of Style", "The Elements of Style label", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""], ["1", "3", "clear-headed", "clear-headed label", "Isaac Asimov", "Isaac Asimov label", ""],
["1", "0", "feedback only", "", "Feedback!"] ["1", "0", "feedback only", "feedback only label", "", "", "Feedback!"]
] ]
} }
} }
......
...@@ -3,6 +3,7 @@ Grade step in the OpenAssessment XBlock. ...@@ -3,6 +3,7 @@ Grade step in the OpenAssessment XBlock.
""" """
import copy import copy
from collections import defaultdict from collections import defaultdict
from lazy import lazy
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
...@@ -100,14 +101,21 @@ class GradeMixin(object): ...@@ -100,14 +101,21 @@ class GradeMixin(object):
if "peer-assessment" in assessment_steps: if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid) feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = peer_api.get_assessments(submission_uuid) peer_assessments = [
self._assessment_grade_context(asmnt)
for asmnt in peer_api.get_assessments(submission_uuid)
]
has_submitted_feedback = feedback is not None has_submitted_feedback = feedback is not None
if "self-assessment" in assessment_steps: if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid) self_assessment = self._assessment_grade_context(
self_api.get_assessment(submission_uuid)
)
if "example-based-assessment" in assessment_steps: if "example-based-assessment" in assessment_steps:
example_based_assessment = ai_api.get_latest_assessment(submission_uuid) example_based_assessment = self._assessment_grade_context(
ai_api.get_latest_assessment(submission_uuid)
)
feedback_text = feedback.get('feedback', '') if feedback else '' feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(submission_uuid) student_submission = sub_api.get_submission(submission_uuid)
...@@ -127,7 +135,7 @@ class GradeMixin(object): ...@@ -127,7 +135,7 @@ class GradeMixin(object):
'peer_assessments': peer_assessments, 'peer_assessments': peer_assessments,
'self_assessment': self_assessment, 'self_assessment': self_assessment,
'example_based_assessment': example_based_assessment, 'example_based_assessment': example_based_assessment,
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments), 'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments),
'has_submitted_feedback': has_submitted_feedback, 'has_submitted_feedback': has_submitted_feedback,
'allow_file_upload': self.allow_file_upload, 'allow_file_upload': self.allow_file_upload,
'file_url': self.get_download_url_from_submission(student_submission) 'file_url': self.get_download_url_from_submission(student_submission)
...@@ -218,10 +226,14 @@ class GradeMixin(object): ...@@ -218,10 +226,14 @@ class GradeMixin(object):
) )
return {'success': True, 'msg': _(u"Feedback saved.")} return {'success': True, 'msg': _(u"Feedback saved.")}
def _rubric_criteria_with_feedback(self, peer_assessments): def _rubric_criteria_grade_context(self, peer_assessments):
""" """
Add per-criterion feedback from peer assessments to the rubric criteria. Sanitize the rubric criteria into a format that can be passed
Filters out empty feedback. into the grade complete Django template.
* Add per-criterion feedback from peer assessments to the rubric criteria.
* Filters out empty feedback.
* Assign a "label" for criteria/options if none is defined (backwards compatibility).
Args: Args:
peer_assessments (list of dict): Serialized assessment models from the peer API. peer_assessments (list of dict): Serialized assessment models from the peer API.
...@@ -232,7 +244,8 @@ class GradeMixin(object): ...@@ -232,7 +244,8 @@ class GradeMixin(object):
Example: Example:
[ [
{ {
'name': 'Test name', 'label': 'Test name',
'name': 'f78ac7d4ca1e4134b0ba4b40ca212e72',
'prompt': 'Test prompt', 'prompt': 'Test prompt',
'order_num': 2, 'order_num': 2,
'options': [...] 'options': [...]
...@@ -244,7 +257,7 @@ class GradeMixin(object): ...@@ -244,7 +257,7 @@ class GradeMixin(object):
... ...
] ]
""" """
criteria = copy.deepcopy(self.rubric_criteria) criteria = copy.deepcopy(self.rubric_criteria_with_labels)
criteria_feedback = defaultdict(list) criteria_feedback = defaultdict(list)
for assessment in peer_assessments: for assessment in peer_assessments:
...@@ -258,3 +271,67 @@ class GradeMixin(object): ...@@ -258,3 +271,67 @@ class GradeMixin(object):
criterion['feedback'] = criteria_feedback[criterion_name] criterion['feedback'] = criteria_feedback[criterion_name]
return criteria return criteria
@lazy
def _criterion_and_option_labels(self):
"""
Retrieve criteria and option labels from the rubric in the XBlock problem definition,
defaulting to the name value if no label is available (backwards compatibility).
Evaluated lazily, so it will return a cached value if called repeatedly.
For the grade mixin, this should be okay, since we can't change the problem
definition in the LMS (the settings fields are read-only).
Returns:
Tuple of dictionaries:
`criterion_labels` maps criterion names to criterion labels.
`option_labels` maps (criterion name, option name) tuples to option labels.
"""
criterion_labels = {}
option_labels = {}
for criterion in self.rubric_criteria_with_labels:
criterion_labels[criterion['name']] = criterion['label']
for option in criterion['options']:
option_label_key = (criterion['name'], option['name'])
option_labels[option_label_key] = option['label']
return criterion_labels, option_labels
def _assessment_grade_context(self, assessment):
"""
Sanitize an assessment dictionary into a format that can be
passed into the grade complete Django template.
Args:
assessment (dict): The serialized assessment model.
Returns:
dict
"""
assessment = copy.deepcopy(assessment)
# Retrieve dictionaries mapping criteria/option names to the associated labels.
# This is a lazy property, so we can call it repeatedly for each assessment.
criterion_labels, option_labels = self._criterion_and_option_labels
# Backwards compatibility: We used to treat "name" as both a user-facing label
# and a unique identifier for criteria and options.
# Now we treat "name" as a unique identifier, and we've added an additional "label"
# field that we display to the user.
# If criteria/options in the problem definition do NOT have a "label" field
# (because they were created before this change),
# we create a new label that has the same value as "name".
for part in assessment['parts']:
criterion_label_key = part['criterion']['name']
part['criterion']['label'] = criterion_labels.get(criterion_label_key, part['criterion']['name'])
# We need to be a little bit careful here: some assessment parts
# have only written feedback, so they're not associated with any options.
# If that's the case, we don't need to add the label field.
if part.get('option') is not None:
option_label_key = (part['criterion']['name'], part['option']['name'])
part['option']['label'] = option_labels.get(option_label_key, part['option']['name'])
return assessment
...@@ -3,12 +3,14 @@ ...@@ -3,12 +3,14 @@
import datetime as dt import datetime as dt
import logging import logging
import pkg_resources import pkg_resources
import copy
import pytz import pytz
from django.template.context import Context from django.template.context import Context
from django.template.loader import get_template from django.template.loader import get_template
from webob import Response from webob import Response
from lazy import lazy
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String, Boolean from xblock.fields import List, Scope, String, Boolean
...@@ -233,7 +235,6 @@ class OpenAssessmentBlock( ...@@ -233,7 +235,6 @@ class OpenAssessmentBlock(
context_dict = { context_dict = {
"title": self.title, "title": self.title,
"question": self.prompt, "question": self.prompt,
"rubric_criteria": self.rubric_criteria,
"rubric_assessments": ui_models, "rubric_assessments": ui_models,
"show_staff_debug_info": self.is_course_staff and not self.in_studio_preview, "show_staff_debug_info": self.is_course_staff and not self.in_studio_preview,
} }
...@@ -394,6 +395,33 @@ class OpenAssessmentBlock( ...@@ -394,6 +395,33 @@ class OpenAssessmentBlock(
def assessment_steps(self): def assessment_steps(self):
return [asmnt['name'] for asmnt in self.valid_assessments] return [asmnt['name'] for asmnt in self.valid_assessments]
@lazy
def rubric_criteria_with_labels(self):
"""
Backwards compatibility: We used to treat "name" as both a user-facing label
and a unique identifier for criteria and options.
Now we treat "name" as a unique identifier, and we've added an additional "label"
field that we display to the user.
If criteria/options in the problem definition do NOT have a "label" field
(because they were created before this change),
we create a new label that has the same value as "name".
The result of this call is cached, so it should NOT be used in a runtime
that can modify the XBlock settings (in the LMS, settings are read-only).
Returns:
list of criteria dictionaries
"""
criteria = copy.deepcopy(self.rubric_criteria)
for criterion in criteria:
if 'label' not in criterion:
criterion['label'] = criterion['name']
for option in criterion['options']:
if 'label' not in option:
option['label'] = option['name']
return criteria
def render_assessment(self, path, context_dict=None): def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML """Render an Assessment Module's HTML
......
...@@ -12,6 +12,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError ...@@ -12,6 +12,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.fileupload import api as file_upload_api from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.api import FileUploadError from openassessment.fileupload.api import FileUploadError
from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -64,10 +65,6 @@ class PeerAssessmentMixin(object): ...@@ -64,10 +65,6 @@ class PeerAssessmentMixin(object):
assessment_ui_model = self.get_assessment_module('peer-assessment') assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model: if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
try: try:
# Create the assessment # Create the assessment
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
...@@ -76,7 +73,7 @@ class PeerAssessmentMixin(object): ...@@ -76,7 +73,7 @@ class PeerAssessmentMixin(object):
data['options_selected'], data['options_selected'],
self._clean_criterion_feedback(data['criterion_feedback']), self._clean_criterion_feedback(data['criterion_feedback']),
data['overall_feedback'], data['overall_feedback'],
rubric_dict, create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
assessment_ui_model['must_be_graded_by'] assessment_ui_model['must_be_graded_by']
) )
...@@ -154,7 +151,7 @@ class PeerAssessmentMixin(object): ...@@ -154,7 +151,7 @@ class PeerAssessmentMixin(object):
problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment") problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")
context_dict = { context_dict = {
"rubric_criteria": self.rubric_criteria, "rubric_criteria": self.rubric_criteria_with_labels,
"estimated_time": "20 minutes" # TODO: Need to configure this. "estimated_time": "20 minutes" # TODO: Need to configure this.
} }
...@@ -283,7 +280,7 @@ class PeerAssessmentMixin(object): ...@@ -283,7 +280,7 @@ class PeerAssessmentMixin(object):
""" """
return { return {
criterion['name']: criterion_feedback[criterion['name']] criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria for criterion in self.rubric_criteria_with_labels
if criterion['name'] in criterion_feedback if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') in ['optional', 'required'] and criterion.get('feedback', 'disabled') in ['optional', 'required']
} }
...@@ -96,7 +96,8 @@ EDITOR_UPDATE_SCHEMA = Schema({ ...@@ -96,7 +96,8 @@ EDITOR_UPDATE_SCHEMA = Schema({
Required('criteria'): [ Required('criteria'): [
Schema({ Schema({
Required('order_num'): All(int, Range(min=0)), Required('order_num'): All(int, Range(min=0)),
Required('name'): utf8_validator, 'name': utf8_validator,
Required('label'): utf8_validator,
Required('prompt'): utf8_validator, Required('prompt'): utf8_validator,
Required('feedback'): All( Required('feedback'): All(
utf8_validator, utf8_validator,
...@@ -109,7 +110,8 @@ EDITOR_UPDATE_SCHEMA = Schema({ ...@@ -109,7 +110,8 @@ EDITOR_UPDATE_SCHEMA = Schema({
Required('options'): [ Required('options'): [
Schema({ Schema({
Required('order_num'): All(int, Range(min=0)), Required('order_num'): All(int, Range(min=0)),
Required('name'): utf8_validator, 'name': utf8_validator,
Required('label'): utf8_validator,
Required('explanation'): utf8_validator, Required('explanation'): utf8_validator,
Required('points'): All(int, Range(min=0)), Required('points'): All(int, Range(min=0)),
}) })
......
...@@ -7,6 +7,7 @@ from webob import Response ...@@ -7,6 +7,7 @@ from webob import Response
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as submission_api from submissions import api as submission_api
from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -81,7 +82,7 @@ class SelfAssessmentMixin(object): ...@@ -81,7 +82,7 @@ class SelfAssessmentMixin(object):
path = 'openassessmentblock/self/oa_self_closed.html' path = 'openassessmentblock/self/oa_self_closed.html'
else: else:
submission = submission_api.get_submission(self.submission_uuid) submission = submission_api.get_submission(self.submission_uuid)
context["rubric_criteria"] = self.rubric_criteria context["rubric_criteria"] = self.rubric_criteria_with_labels
context["estimated_time"] = "20 minutes" # TODO: Need to configure this. context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = submission context["self_submission"] = submission
...@@ -120,7 +121,7 @@ class SelfAssessmentMixin(object): ...@@ -120,7 +121,7 @@ class SelfAssessmentMixin(object):
self.submission_uuid, self.submission_uuid,
self.get_student_item_dict()['student_id'], self.get_student_item_dict()['student_id'],
data['options_selected'], data['options_selected'],
{"criteria": self.rubric_criteria} create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
) )
self.publish_assessment_event("openassessmentblock.self_assess", assessment) self.publish_assessment_event("openassessmentblock.self_assess", assessment)
......
...@@ -116,7 +116,7 @@ class StaffInfoMixin(object): ...@@ -116,7 +116,7 @@ class StaffInfoMixin(object):
context['display_reschedule_unfinished_tasks'] = display_ai_staff_info context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
if display_ai_staff_info: if display_ai_staff_info:
context['classifierset'] = ai_api.get_classifier_set_info( context['classifierset'] = ai_api.get_classifier_set_info(
create_rubric_dict(self.prompt, self.rubric_criteria), create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
example_based_assessment['algorithm_id'], example_based_assessment['algorithm_id'],
student_item['course_id'], student_item['course_id'],
student_item['item_id'] student_item['item_id']
...@@ -154,7 +154,7 @@ class StaffInfoMixin(object): ...@@ -154,7 +154,7 @@ class StaffInfoMixin(object):
examples = assessment["examples"] examples = assessment["examples"]
try: try:
workflow_uuid = ai_api.train_classifiers( workflow_uuid = ai_api.train_classifiers(
create_rubric_dict(self.prompt, self.rubric_criteria), create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
convert_training_examples_list_to_dict(examples), convert_training_examples_list_to_dict(examples),
student_item_dict.get('course_id'), student_item_dict.get('course_id'),
student_item_dict.get('item_id'), student_item_dict.get('item_id'),
...@@ -236,7 +236,7 @@ class StaffInfoMixin(object): ...@@ -236,7 +236,7 @@ class StaffInfoMixin(object):
'submitted_assessments': submitted_assessments, 'submitted_assessments': submitted_assessments,
'self_assessment': self_assessment, 'self_assessment': self_assessment,
'example_based_assessment': example_based_assessment, 'example_based_assessment': example_based_assessment,
'rubric_criteria': copy.deepcopy(self.rubric_criteria), 'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
} }
if peer_assessments or self_assessment or example_based_assessment: if peer_assessments or self_assessment or example_based_assessment:
......
...@@ -391,7 +391,8 @@ ...@@ -391,7 +391,8 @@
"submission_due": "2014-10-1T10:00:00", "submission_due": "2014-10-1T10:00:00",
"criteria": [ "criteria": [
{ {
"name": "Criterion with two options", "name": "52bfbd0eb3044212b809564866e77079",
"label": "Criterion with two options",
"prompt": "Prompt for criterion with two options", "prompt": "Prompt for criterion with two options",
"order_num": 0, "order_num": 0,
"feedback": "disabled", "feedback": "disabled",
...@@ -399,20 +400,23 @@ ...@@ -399,20 +400,23 @@
{ {
"order_num": 0, "order_num": 0,
"points": 1, "points": 1,
"name": "Fair", "name": "85bbbecbb6a343f8a2146cde0e609ad0",
"label": "Fair",
"explanation": "Fair explanation" "explanation": "Fair explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Good", "name": "5936d5b9e281403ca123964055d4719a",
"label": "Good",
"explanation": "Good explanation" "explanation": "Good explanation"
} }
], ],
"points_possible": 2 "points_possible": 2
}, },
{ {
"name": "Criterion with no options", "name": "d96bb68a69ee4ccb8f86c753b6924f75",
"label": "Criterion with no options",
"prompt": "Prompt for criterion with no options", "prompt": "Prompt for criterion with no options",
"order_num": 0, "order_num": 0,
"options": [], "options": [],
...@@ -420,7 +424,8 @@ ...@@ -420,7 +424,8 @@
"points_possible": 0 "points_possible": 0
}, },
{ {
"name": "Criterion with optional feedback", "name": "2ca052403b06424da714f7a80dfb954d",
"label": "Criterion with optional feedback",
"prompt": "Prompt for criterion with optional feedback", "prompt": "Prompt for criterion with optional feedback",
"order_num": 2, "order_num": 2,
"feedback": "optional", "feedback": "optional",
...@@ -428,7 +433,8 @@ ...@@ -428,7 +433,8 @@
{ {
"order_num": 0, "order_num": 0,
"points": 2, "points": 2,
"name": "Good", "name": "d7445661a89b4b339b9788cb7225a603",
"label": "Good",
"explanation": "Good explanation" "explanation": "Good explanation"
} }
], ],
......
...@@ -6,6 +6,8 @@ describe("OpenAssessment.Container", function () { ...@@ -6,6 +6,8 @@ describe("OpenAssessment.Container", function () {
var counter = 0; var counter = 0;
var StubContainerItem = function(element) { var StubContainerItem = function(element) {
this.element = element;
// Assign an ID to the item if it doesn't already have one. // Assign an ID to the item if it doesn't already have one.
if ($(element).attr("test_id") === "") { if ($(element).attr("test_id") === "") {
$(element).attr("test_id", counter); $(element).attr("test_id", counter);
...@@ -79,20 +81,20 @@ describe("OpenAssessment.Container", function () { ...@@ -79,20 +81,20 @@ describe("OpenAssessment.Container", function () {
]); ]);
// Remove the second item // Remove the second item
container.remove(container.getItemElement(1)); container.remove(container.getItem(1));
expect(container.getItemValues()).toEqual([ expect(container.getItemValues()).toEqual([
{ id: 0 }, { id: 0 },
{ id: 2 }, { id: 2 },
]); ]);
// Remove the first item // Remove the first item
container.remove(container.getItemElement(0)); container.remove(container.getItem(0));
expect(container.getItemValues()).toEqual([ expect(container.getItemValues()).toEqual([
{ id: 2 }, { id: 2 },
]); ]);
// Remove the last item // Remove the last item
container.remove(container.getItemElement(0)); container.remove(container.getItem(0));
expect(container.getItemValues()).toEqual([]); expect(container.getItemValues()).toEqual([]);
}); });
...@@ -113,7 +115,7 @@ describe("OpenAssessment.Container", function () { ...@@ -113,7 +115,7 @@ describe("OpenAssessment.Container", function () {
expect(container.getItemValues().length).toEqual(3); expect(container.getItemValues().length).toEqual(3);
// Remove the first element // Remove the first element
container.remove(container.getItemElement(0)); container.remove(container.getItem(0));
expect(container.getItemValues().length).toEqual(2); expect(container.getItemValues().length).toEqual(2);
}); });
...@@ -132,7 +134,7 @@ describe("OpenAssessment.Container", function () { ...@@ -132,7 +134,7 @@ describe("OpenAssessment.Container", function () {
expect(container.getItemValues().length).toEqual(3); expect(container.getItemValues().length).toEqual(3);
// Press the button to delete the second item // Press the button to delete the second item
$(".remove_button", container.getItemElement(1)).click(); $(".remove_button", container.getItem(1).element).click();
expect(container.getItemValues().length).toEqual(2); expect(container.getItemValues().length).toEqual(2);
expect(container.getItemValues()).toEqual([ expect(container.getItemValues()).toEqual([
{ id: 0 }, { id: 0 },
...@@ -157,7 +159,7 @@ describe("OpenAssessment.Container", function () { ...@@ -157,7 +159,7 @@ describe("OpenAssessment.Container", function () {
// Expect that we can click the "remove" button // Expect that we can click the "remove" button
// to remove the item. // to remove the item.
$(".remove_button", container.getItemElement(0)).click(); $(".remove_button", container.getItem(0).element).click();
expect(container.getItemValues().length).toEqual(0); expect(container.getItemValues().length).toEqual(0);
}); });
}); });
\ No newline at end of file
...@@ -20,7 +20,8 @@ describe("OpenAssessment.EditRubricView", function() { ...@@ -20,7 +20,8 @@ describe("OpenAssessment.EditRubricView", function() {
// Criterion with two options, feedback disabled // Criterion with two options, feedback disabled
expect(criteria[0]).toEqual({ expect(criteria[0]).toEqual({
name: "Criterion with two options", name: "52bfbd0eb3044212b809564866e77079",
label: "Criterion with two options",
prompt: "Prompt for criterion with two options", prompt: "Prompt for criterion with two options",
order_num: 0, order_num: 0,
feedback: "disabled", feedback: "disabled",
...@@ -28,13 +29,15 @@ describe("OpenAssessment.EditRubricView", function() { ...@@ -28,13 +29,15 @@ describe("OpenAssessment.EditRubricView", function() {
{ {
order_num: 0, order_num: 0,
points: 1, points: 1,
name: "Fair", name: "85bbbecbb6a343f8a2146cde0e609ad0",
label: "Fair",
explanation: "Fair explanation" explanation: "Fair explanation"
}, },
{ {
order_num: 1, order_num: 1,
points: 2, points: 2,
name: "Good", name: "5936d5b9e281403ca123964055d4719a",
label: "Good",
explanation: "Good explanation" explanation: "Good explanation"
} }
], ],
...@@ -42,7 +45,8 @@ describe("OpenAssessment.EditRubricView", function() { ...@@ -42,7 +45,8 @@ describe("OpenAssessment.EditRubricView", function() {
// Criterion with no options, feedback required // Criterion with no options, feedback required
expect(criteria[1]).toEqual({ expect(criteria[1]).toEqual({
name: "Criterion with no options", name: "d96bb68a69ee4ccb8f86c753b6924f75",
label: "Criterion with no options",
prompt: "Prompt for criterion with no options", prompt: "Prompt for criterion with no options",
order_num: 1, order_num: 1,
feedback: "required", feedback: "required",
...@@ -51,7 +55,8 @@ describe("OpenAssessment.EditRubricView", function() { ...@@ -51,7 +55,8 @@ describe("OpenAssessment.EditRubricView", function() {
// Criterion with one option, feeback optional // Criterion with one option, feeback optional
expect(criteria[2]).toEqual({ expect(criteria[2]).toEqual({
name: "Criterion with optional feedback", name: "2ca052403b06424da714f7a80dfb954d",
label: "Criterion with optional feedback",
prompt: "Prompt for criterion with optional feedback", prompt: "Prompt for criterion with optional feedback",
order_num: 2, order_num: 2,
feedback: "optional", feedback: "optional",
...@@ -59,13 +64,54 @@ describe("OpenAssessment.EditRubricView", function() { ...@@ -59,13 +64,54 @@ describe("OpenAssessment.EditRubricView", function() {
{ {
order_num: 0, order_num: 0,
points: 2, points: 2,
name: "Good", name: "d7445661a89b4b339b9788cb7225a603",
label: "Good",
explanation: "Good explanation" explanation: "Good explanation"
} }
] ]
}); });
}); });
it("creates new criteria and options", function() {
// Delete all existing criteria from the rubric
// Then add new criteria (created from a client-side template)
view.removeAllCriteria();
view.addCriterion();
view.addCriterion();
// Add an option to the second criterion
view.getCriterionItem(1).addOption();
// Check the definition
// Since no criteria/option names are set, leave them out of the description.
// This will cause the server to assign them unique names.
var criteria = view.criteriaDefinition();
expect(criteria.length).toEqual(2);
expect(criteria[0]).toEqual({
order_num: 0,
label: "",
prompt: "",
feedback: "disabled",
options: [],
});
expect(criteria[1]).toEqual({
order_num: 1,
label: "",
prompt: "",
feedback: "disabled",
options: [
{
order_num: 0,
label: "",
points: 1,
explanation: ""
}
]
});
});
it("reads the feedback prompt from the editor", function() { it("reads the feedback prompt from the editor", function() {
view.feedbackPrompt(""); view.feedbackPrompt("");
expect(view.feedbackPrompt()).toEqual(""); expect(view.feedbackPrompt()).toEqual("");
......
...@@ -72,7 +72,10 @@ OpenAssessment.Container = function(containerItem, kwargs) { ...@@ -72,7 +72,10 @@ OpenAssessment.Container = function(containerItem, kwargs) {
// handlers for the delete buttons. // handlers for the delete buttons.
var container = this; var container = this;
$("." + this.removeButtonClass, this.containerElement).click( $("." + this.removeButtonClass, this.containerElement).click(
function(eventData) { container.remove(eventData.target); } function(eventData) {
var item = new container.containerItem(eventData.target);
container.remove(item);
}
); );
// Initialize existing items, in case they need to install their // Initialize existing items, in case they need to install their
...@@ -106,7 +109,10 @@ OpenAssessment.Container.prototype = { ...@@ -106,7 +109,10 @@ OpenAssessment.Container.prototype = {
var container = this; var container = this;
var containerItem = $("." + this.containerItemClass, this.containerElement).last(); var containerItem = $("." + this.containerItemClass, this.containerElement).last();
containerItem.find('.' + this.removeButtonClass) containerItem.find('.' + this.removeButtonClass)
.click(function(eventData) { container.remove(eventData.target); } ); .click(function(eventData) {
var containerItem = new container.containerItem(eventData.target);
container.remove(containerItem);
} );
// Initialize the item, allowing it to install event handlers. // Initialize the item, allowing it to install event handlers.
new this.containerItem(containerItem.get(0)); new this.containerItem(containerItem.get(0));
...@@ -118,12 +124,11 @@ OpenAssessment.Container.prototype = { ...@@ -118,12 +124,11 @@ OpenAssessment.Container.prototype = {
DOM tree until an item is found. DOM tree until an item is found.
Args: Args:
element (DOM element): An element representing the container item item: The container item object to remove.
or an element within the container item.
**/ **/
remove: function(element) { remove: function(item) {
$(element).closest("." + this.containerItemClass).remove(); $(item.element).closest("." + this.containerItemClass).remove();
}, },
/** /**
...@@ -156,11 +161,24 @@ OpenAssessment.Container.prototype = { ...@@ -156,11 +161,24 @@ OpenAssessment.Container.prototype = {
index (int): The index of the item, starting from 0. index (int): The index of the item, starting from 0.
Returns: Returns:
DOM element if the item is found, otherwise null. Container item object or null.
**/ **/
getItemElement: function(index) { getItem: function(index) {
var element = $("." + this.containerItemClass, this.containerElement).get(index); var element = $("." + this.containerItemClass, this.containerElement).get(index);
return (element !== undefined) ? element : null; return (element !== undefined) ? new this.containerItem(element) : null;
},
/**
Retrieve all elements representing items in this container.
Returns:
array of container item objects
**/
getAllItems: function() {
var container = this;
return $("." + this.containerItemClass, this.containerElement)
.map(function() { return new container.containerItem(this); });
}, },
}; };
...@@ -26,9 +26,9 @@ OpenAssessment.RubricOption.prototype = { ...@@ -26,9 +26,9 @@ OpenAssessment.RubricOption.prototype = {
} }
**/ **/
getFieldValues: function () { getFieldValues: function () {
return { var fields = {
name: OpenAssessment.Fields.stringField( label: OpenAssessment.Fields.stringField(
$('.openassessment_criterion_option_name', this.element) $('.openassessment_criterion_option_label', this.element)
), ),
points: OpenAssessment.Fields.intField( points: OpenAssessment.Fields.intField(
$('.openassessment_criterion_option_points', this.element) $('.openassessment_criterion_option_points', this.element)
...@@ -37,6 +37,16 @@ OpenAssessment.RubricOption.prototype = { ...@@ -37,6 +37,16 @@ OpenAssessment.RubricOption.prototype = {
$('.openassessment_criterion_option_explanation', this.element) $('.openassessment_criterion_option_explanation', this.element)
) )
}; };
// New options won't have unique names assigned.
// By convention, we exclude the "name" key from the JSON dict
// sent to the server, and the server will assign a unique name.
var nameString = OpenAssessment.Fields.stringField(
$('.openassessment_criterion_option_name', this.element)
);
if (nameString !== "") { fields.name = nameString; }
return fields;
} }
}; };
...@@ -85,9 +95,9 @@ OpenAssessment.RubricCriterion.prototype = { ...@@ -85,9 +95,9 @@ OpenAssessment.RubricCriterion.prototype = {
} }
**/ **/
getFieldValues: function () { getFieldValues: function () {
return { var fields = {
name: OpenAssessment.Fields.stringField( label: OpenAssessment.Fields.stringField(
$('.openassessment_criterion_name', this.element) $('.openassessment_criterion_label', this.element)
), ),
prompt: OpenAssessment.Fields.stringField( prompt: OpenAssessment.Fields.stringField(
$('.openassessment_criterion_prompt', this.element) $('.openassessment_criterion_prompt', this.element)
...@@ -97,5 +107,23 @@ OpenAssessment.RubricCriterion.prototype = { ...@@ -97,5 +107,23 @@ OpenAssessment.RubricCriterion.prototype = {
), ),
options: this.optionContainer.getItemValues() options: this.optionContainer.getItemValues()
}; };
// New criteria won't have unique names assigned.
// By convention, we exclude the "name" key from the JSON dict
// sent to the server, and the server will assign a unique name.
var nameString = OpenAssessment.Fields.stringField(
$('.openassessment_criterion_name', this.element)
);
if (nameString !== "") { fields.name = nameString; }
return fields;
},
/**
Add an option to the criterion.
Uses the client-side template to create the new option.
**/
addOption: function() {
this.optionContainer.add();
} }
}; };
\ No newline at end of file
...@@ -74,5 +74,38 @@ OpenAssessment.EditRubricView.prototype = { ...@@ -74,5 +74,38 @@ OpenAssessment.EditRubricView.prototype = {
feedbackPrompt: function(text) { feedbackPrompt: function(text) {
var sel = $("#openassessment_rubric_feedback", this.element); var sel = $("#openassessment_rubric_feedback", this.element);
return OpenAssessment.Fields.stringField(sel, text); return OpenAssessment.Fields.stringField(sel, text);
},
/**
Remove all criteria in this rubric.
Mainly useful for testing.
**/
removeAllCriteria: function() {
var items = this.criteriaContainer.getAllItems();
var view = this;
$.each(items, function() { view.criteriaContainer.remove(this); });
},
/**
Add a new criterion to the rubric.
Uses a client-side template to create the new criterion.
**/
addCriterion: function() {
this.criteriaContainer.add();
},
/**
Retrieve a criterion item (a container item) from the rubric
at a particular index.
Args:
index (int): The index of the criterion, starting from 0.
Returns:
OpenAssessment.RubricCriterion
**/
getCriterionItem: function(index) {
return this.criteriaContainer.getItem(index);
} }
}; };
\ No newline at end of file
...@@ -114,7 +114,7 @@ class StudentTrainingMixin(object): ...@@ -114,7 +114,7 @@ class StudentTrainingMixin(object):
self.submission_uuid, self.submission_uuid,
{ {
'prompt': self.prompt, 'prompt': self.prompt,
'criteria': self.rubric_criteria 'criteria': self.rubric_criteria_with_labels
}, },
examples examples
) )
......
""" """
Studio editing view for OpenAssessment XBlock. Studio editing view for OpenAssessment XBlock.
""" """
from django.template import Context
import pkg_resources import pkg_resources
import copy import copy
import logging import logging
from uuid import uuid4
from django.template import Context
from django.template.loader import get_template from django.template.loader import get_template
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from dateutil.parser import parse as parse_date from dateutil.parser import parse as parse_date
...@@ -29,8 +30,10 @@ class StudioMixin(object): ...@@ -29,8 +30,10 @@ class StudioMixin(object):
DEFAULT_CRITERIA = [ DEFAULT_CRITERIA = [
{ {
'label': '',
'options': [ 'options': [
{ {
'label': ''
}, },
] ]
} }
...@@ -84,7 +87,7 @@ class StudioMixin(object): ...@@ -84,7 +87,7 @@ class StudioMixin(object):
# Every rubric requires one criterion. If there is no criteria # Every rubric requires one criterion. If there is no criteria
# configured for the XBlock, return one empty default criterion, with # configured for the XBlock, return one empty default criterion, with
# an empty default option. # an empty default option.
criteria = copy.deepcopy(self.rubric_criteria) criteria = copy.deepcopy(self.rubric_criteria_with_labels)
if not criteria: if not criteria:
criteria = self.DEFAULT_CRITERIA criteria = self.DEFAULT_CRITERIA
...@@ -132,6 +135,19 @@ class StudioMixin(object): ...@@ -132,6 +135,19 @@ class StudioMixin(object):
logger.exception('Editor context is invalid') logger.exception('Editor context is invalid')
return {'success': False, 'msg': _('Error updating XBlock configuration')} return {'success': False, 'msg': _('Error updating XBlock configuration')}
# Backwards compatibility: We used to treat "name" as both a user-facing label
# and a unique identifier for criteria and options.
# Now we treat "name" as a unique identifier, and we've added an additional "label"
# field that we display to the user.
# If the JavaScript editor sends us a criterion or option without a "name"
# field, we should assign it a unique identifier.
for criterion in data['criteria']:
if 'name' not in criterion:
criterion['name'] = uuid4().hex
for option in criterion['options']:
if 'name' not in option:
option['name'] = uuid4().hex
xblock_validator = validator(self) xblock_validator = validator(self)
success, msg = xblock_validator( success, msg = xblock_validator(
create_rubric_dict(data['prompt'], data['criteria']), create_rubric_dict(data['prompt'], data['criteria']),
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -24,12 +25,14 @@ ...@@ -24,12 +25,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "No",
"label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "Yes",
"label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
] ]
...@@ -56,6 +59,7 @@ ...@@ -56,6 +59,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -63,12 +67,14 @@ ...@@ -63,12 +67,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "No",
"label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "Yes",
"label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
] ]
...@@ -93,6 +99,7 @@ ...@@ -93,6 +99,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -100,12 +107,14 @@ ...@@ -100,12 +107,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "No",
"label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "Yes",
"label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
] ]
...@@ -131,6 +140,7 @@ ...@@ -131,6 +140,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -138,12 +148,14 @@ ...@@ -138,12 +148,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "No",
"label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "Yes",
"label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
] ]
...@@ -159,7 +171,7 @@ ...@@ -159,7 +171,7 @@
"<name>𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷</name>", "<name>𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷</name>",
"<prompt>Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ</prompt>", "<prompt>Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ</prompt>",
"<option points=\"0\"><name>𝕹𝖔</name><explanation>𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓</explanation></option>", "<option points=\"0\"><name>𝕹𝖔</name><explanation>𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓</explanation></option>",
"<option points=\"2\"><name>ﻉร</name><explanation>ﻉร ﻉซρɭคกคՇٱѻก</explanation></option>", "<option points=\"2\"><name>Ÿëṡ</name><explanation>ﻉร ﻉซρɭคกคՇٱѻก</explanation></option>",
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
...@@ -169,6 +181,7 @@ ...@@ -169,6 +181,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷", "name": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷",
"label": "𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷",
"prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ", "prompt": "Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -176,12 +189,14 @@ ...@@ -176,12 +189,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "𝕹𝖔", "name": "𝕹𝖔",
"label": "𝕹𝖔",
"explanation": "𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓" "explanation": "𝕹𝖔 𝖊𝖝𝖕𝖑𝖆𝖓𝖆𝖙𝖎𝖔𝖓"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "ﻉร", "name": "Ÿëṡ",
"label": "Ÿëṡ",
"explanation": "ﻉร ﻉซρɭคกคՇٱѻก" "explanation": "ﻉร ﻉซρɭคกคՇٱѻก"
} }
] ]
...@@ -212,6 +227,7 @@ ...@@ -212,6 +227,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -219,12 +235,14 @@ ...@@ -219,12 +235,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "No",
"label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "Yes",
"label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
] ]
...@@ -232,6 +250,7 @@ ...@@ -232,6 +250,7 @@
{ {
"order_num": 1, "order_num": 1,
"name": "Second criterion", "name": "Second criterion",
"label": "Second criterion",
"prompt": "Second criterion prompt", "prompt": "Second criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -239,6 +258,7 @@ ...@@ -239,6 +258,7 @@
"order_num": 0, "order_num": 0,
"points": 1, "points": 1,
"name": "Maybe", "name": "Maybe",
"label": "Maybe",
"explanation": "Maybe explanation" "explanation": "Maybe explanation"
} }
] ]
...@@ -269,6 +289,7 @@ ...@@ -269,6 +289,7 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "Test criterion",
"label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
...@@ -276,12 +297,14 @@ ...@@ -276,12 +297,14 @@
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "No",
"label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "Yes",
"label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
] ]
...@@ -289,6 +312,7 @@ ...@@ -289,6 +312,7 @@
{ {
"order_num": 1, "order_num": 1,
"name": "Second criterion", "name": "Second criterion",
"label": "Second criterion",
"prompt": "Second criterion prompt", "prompt": "Second criterion prompt",
"feedback": "optional", "feedback": "optional",
"options": [ "options": [
...@@ -296,6 +320,7 @@ ...@@ -296,6 +320,7 @@
"order_num": 0, "order_num": 0,
"points": 1, "points": 1,
"name": "Maybe", "name": "Maybe",
"label": "Maybe",
"explanation": "Maybe explanation" "explanation": "Maybe explanation"
} }
] ]
......
...@@ -11,24 +11,28 @@ ...@@ -11,24 +11,28 @@
{ {
"order_num": 0, "order_num": 0,
"name": "Vocabulary", "name": "Vocabulary",
"label": "Vocabulary",
"prompt": "How varied is the vocabulary?", "prompt": "How varied is the vocabulary?",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "Poor", "name": "Poor",
"label": "Poor",
"explanation": "Poor job" "explanation": "Poor job"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 1, "points": 1,
"name": "Good", "name": "Good",
"label": "Good",
"explanation": "Good job" "explanation": "Good job"
}, },
{ {
"order_num": 2, "order_num": 2,
"points": 3, "points": 3,
"name": "Excellent", "name": "Excellent",
"label": "Excellent",
"explanation": "Excellent job" "explanation": "Excellent job"
} }
], ],
...@@ -37,24 +41,28 @@ ...@@ -37,24 +41,28 @@
{ {
"order_num": 1, "order_num": 1,
"name": "Grammar", "name": "Grammar",
"label": "Grammar",
"prompt": "How correct is the grammar?", "prompt": "How correct is the grammar?",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "Poor", "name": "Poor",
"label": "Poor",
"explanation": "Poor job" "explanation": "Poor job"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 1, "points": 1,
"name": "Good", "name": "Good",
"label": "Good",
"explanation": "Good job" "explanation": "Good job"
}, },
{ {
"order_num": 2, "order_num": 2,
"points": 3, "points": 3,
"name": "Excellent", "name": "Excellent",
"label": "Excellent",
"explanation": "Excellent job" "explanation": "Excellent job"
} }
], ],
......
...@@ -3,19 +3,19 @@ ...@@ -3,19 +3,19 @@
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "label": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "label": "No",
"explanation": "No explanation" "explanation": "No explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "label": "Yes",
"explanation": "Yes explanation" "explanation": "Yes explanation"
} }
], ],
...@@ -48,19 +48,19 @@ ...@@ -48,19 +48,19 @@
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
"name": "Ṫëṡẗ ċṛïẗëïṛöṅ", "label": "Ṫëṡẗ ċṛïẗëïṛöṅ",
"prompt": "Téśt ćŕítéíŕőń ṕŕőḿṕt", "prompt": "Téśt ćŕítéíŕőń ṕŕőḿṕt",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "Ṅö", "label": "Ṅö",
"explanation": "Ńő éxṕĺáńátíőń" "explanation": "Ńő éxṕĺáńátíőń"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "sǝʎ", "label": "sǝʎ",
"explanation": "Чэѕ эхрlаиатіои" "explanation": "Чэѕ эхрlаиатіои"
} }
], ],
...@@ -94,18 +94,21 @@ ...@@ -94,18 +94,21 @@
{ {
"order_num": 0, "order_num": 0,
"name": "тєѕт ¢яιтєяιση", "name": "тєѕт ¢яιтєяιση",
"label": "тєѕт ¢яιтєяιση",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "Ṅö", "name": "Ṅö",
"label": "Ṅö",
"explanation": "Ṅö explanation" "explanation": "Ṅö explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "sǝʎ", "name": "sǝʎ",
"label": "sǝʎ",
"explanation": "sǝʎ explanation" "explanation": "sǝʎ explanation"
} }
], ],
...@@ -144,5 +147,53 @@ ...@@ -144,5 +147,53 @@
"due": "4014-03-10T00:00" "due": "4014-03-10T00:00"
} }
] ]
},
"already_has_criteria_and_options_names_assigned": {
"criteria": [
{
"order_num": 0,
"name": "cd316c145cb14e06b377db65719ed41c",
"label": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "7c080ee29c38414291c92eb42b2ab310",
"label": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "8bcdb0769b15482d9b2c3791d22e8ad2",
"label": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "required"
}
],
"prompt": "My new prompt.",
"feedback_prompt": "Feedback prompt",
"submission_due": "4014-02-27T09:46",
"submission_start": "4014-02-10T09:46",
"allow_file_upload": false,
"title": "My new title.",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": null,
"due": "4014-03-10T00:00"
},
{
"name": "self-assessment",
"start": null,
"due": null
}
]
} }
} }
...@@ -295,6 +295,43 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -295,6 +295,43 @@ class TestGrade(XBlockHandlerTestCase):
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertGreater(len(resp['msg']), 0) self.assertGreater(len(resp['msg']), 0)
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_grade_display_assigns_labels(self, xblock):
# Strip out labels defined for criteria and options in the problem definition
for criterion in xblock.rubric_criteria:
if 'label' in criterion:
del criterion['label']
for option in criterion['options']:
if 'label' in option:
del option['label']
# Create a submission and assessments so we can get a grade
self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
)
# Verify that criteria and options are assigned labels before
# being passed to the Django template.
# Remember the criteria and option labels so we can verify
# that the same labels are applied to the assessment parts.
__, context = xblock.render_grade_complete(xblock.get_workflow_info())
criterion_labels = {}
option_labels = {}
for criterion in context['rubric_criteria']:
self.assertEqual(criterion['label'], criterion['name'])
criterion_labels[criterion['name']] = criterion['label']
for option in criterion['options']:
self.assertEqual(option['label'], option['name'])
option_labels[(criterion['name'], option['name'])] = option['label']
# Verify that assessment part options are also assigned labels
for asmnt in context['peer_assessments'] + [context['self_assessment']]:
for part in asmnt['parts']:
expected_criterion_label = criterion_labels[part['criterion']['name']]
self.assertEqual(part['criterion']['label'], expected_criterion_label)
expected_option_label = option_labels[(part['criterion']['name'], part['option']['name'])]
self.assertEqual(part['option']['label'], expected_option_label)
def _create_submission_and_assessments( def _create_submission_and_assessments(
self, xblock, submission_text, peers, peer_assessments, self_assessment, self, xblock, submission_text, peers, peer_assessments, self_assessment,
waiting_for_peer=False, waiting_for_peer=False,
......
...@@ -19,10 +19,12 @@ class StudentTrainingTest(XBlockHandlerTestCase): ...@@ -19,10 +19,12 @@ class StudentTrainingTest(XBlockHandlerTestCase):
""" """
Base class for student training tests. Base class for student training tests.
""" """
SUBMISSION = { SUBMISSION = {
'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.' 'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.'
} }
def assert_path_and_context(self, xblock, expected_path, expected_context): def assert_path_and_context(self, xblock, expected_path, expected_context):
""" """
Render the student training step and verify that the expected template Render the student training step and verify that the expected template
......
...@@ -15,6 +15,45 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -15,6 +15,45 @@ class StudioViewTest(XBlockHandlerTestCase):
Test the view and handlers for editing the OpenAssessment XBlock in Studio. Test the view and handlers for editing the OpenAssessment XBlock in Studio.
""" """
RUBRIC_CRITERIA_WITH_AND_WITHOUT_NAMES = [
{
"order_num": 0,
"label": "Test criterion with no name",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"label": "Test option with no name",
"explanation": "Test explanation"
}
]
},
{
"order_num": 1,
"label": "Test criterion that already has a name",
"name": "cd316c145cb14e06b377db65719ed41c",
"prompt": "Test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"label": "Test option with no name",
"explanation": "Test explanation"
},
{
"order_num": 1,
"points": 0,
"label": "Test option that already has a name",
"name": "8bcdb0769b15482d9b2c3791d22e8ad2",
"explanation": "Test explanation"
},
]
}
]
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_render_studio_view(self, xblock): def test_render_studio_view(self, xblock):
frag = self.runtime.render(xblock, 'studio_view') frag = self.runtime.render(xblock, 'studio_view')
...@@ -27,6 +66,37 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -27,6 +66,37 @@ class StudioViewTest(XBlockHandlerTestCase):
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json') resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg')) self.assertTrue(resp['success'], msg=resp.get('msg'))
@scenario('data/basic_scenario.xml')
def test_update_editor_context_assign_unique_names(self, xblock):
# Update the XBlock with a rubric that is missing
# some of the (unique) names for rubric criteria/options.
data = {
"title": "Test title",
"prompt": "Test prompt",
"feedback_prompt": "Test feedback prompt",
"submission_start": "4014-02-10T09:46",
"submission_due": "4014-02-27T09:46",
"allow_file_upload": False,
"assessments": [{"name": "self-assessment"}],
"criteria": self.RUBRIC_CRITERIA_WITH_AND_WITHOUT_NAMES
}
xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg'))
# Check that the XBlock has assigned unique names for all criteria
criteria_names = set([criterion.get('name') for criterion in xblock.rubric_criteria])
self.assertEqual(len(criteria_names), 2)
self.assertNotIn(None, criteria_names)
# Check that the XBlock has assigned unique names for all options
option_names = set()
for criterion in xblock.rubric_criteria:
for option in criterion['options']:
option_names.add(option.get('name'))
self.assertEqual(len(option_names), 3)
self.assertNotIn(None, option_names)
@file_data('data/invalid_update_xblock.json') @file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_context_invalid_request_data(self, xblock, data): def test_update_context_invalid_request_data(self, xblock, data):
...@@ -81,3 +151,23 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -81,3 +151,23 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
self.assertFalse(resp['is_released']) self.assertFalse(resp['is_released'])
self.assertIn('msg', resp) self.assertIn('msg', resp)
@scenario('data/basic_scenario.xml')
def test_editor_context_assigns_labels(self, xblock):
# Strip out any labels from criteria/options that may have been imported.
for criterion in xblock.rubric_criteria:
if 'label' in criterion:
del criterion['label']
for option in criterion['options']:
if 'label' in option:
del option['label']
# Retrieve the context used to render the Studio view
context = xblock.editor_context()
# Verify that labels were assigned for all criteria and options
for criterion in context['criteria']:
self.assertEqual(criterion['label'], criterion['name'])
for option in criterion['options']:
self.assertEqual(option['label'], option['name'])
...@@ -43,6 +43,7 @@ class TestSerializeContent(TestCase): ...@@ -43,6 +43,7 @@ class TestSerializeContent(TestCase):
BASIC_CRITERIA = [ BASIC_CRITERIA = [
{ {
"order_num": 0, "order_num": 0,
"label": "Test criterion",
"name": "Test criterion", "name": "Test criterion",
"prompt": "Test criterion prompt", "prompt": "Test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
...@@ -50,6 +51,7 @@ class TestSerializeContent(TestCase): ...@@ -50,6 +51,7 @@ class TestSerializeContent(TestCase):
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"label": "Maybe",
"name": "Maybe", "name": "Maybe",
"explanation": "Maybe explanation" "explanation": "Maybe explanation"
} }
...@@ -239,6 +241,47 @@ class TestSerializeContent(TestCase): ...@@ -239,6 +241,47 @@ class TestSerializeContent(TestCase):
) )
self.fail(msg) self.fail(msg)
def test_serialize_missing_names_and_labels(self):
# Configure rubric criteria and options with no names or labels
# This *should* never happen, but if it does, recover gracefully
# by assigning unique names and empty labels
self.oa_block.rubric_criteria = copy.deepcopy(self.BASIC_CRITERIA)
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
self.oa_block.start = None
self.oa_block.due = None
self.oa_block.submission_start = None
self.oa_block.submission_due = None
self.oa_block.allow_file_upload = None
for criterion in self.oa_block.rubric_criteria:
del criterion['name']
del criterion['label']
for option in criterion['options']:
del option['name']
del option['label']
xml = serialize_content(self.oa_block)
content_dict = parse_from_xml_str(xml)
# Verify that all names are unique
# and that all labels are empty
criterion_names = set()
option_names = set()
criteria_count = 0
options_count = 0
for criterion in content_dict['rubric_criteria']:
criterion_names.add(criterion['name'])
self.assertEqual(criterion['label'], u'')
criteria_count += 1
for option in criterion['options']:
option_names.add(option['name'])
self.assertEqual(option['label'], u'')
options_count += 1
self.assertEqual(len(criterion_names), criteria_count)
self.assertEqual(len(option_names), options_count)
def _dict_mutations(self, input_dict): def _dict_mutations(self, input_dict):
""" """
Iterator over mutations of a dictionary: Iterator over mutations of a dictionary:
......
...@@ -54,7 +54,7 @@ class WorkflowMixin(object): ...@@ -54,7 +54,7 @@ class WorkflowMixin(object):
ai_module = self.get_assessment_module('example-based-assessment') ai_module = self.get_assessment_module('example-based-assessment')
on_init_params = { on_init_params = {
'ai': { 'ai': {
'rubric': create_rubric_dict(self.prompt, self.rubric_criteria), 'rubric': create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
'algorithm_id': ai_module["algorithm_id"] if ai_module else None 'algorithm_id': ai_module["algorithm_id"] if ai_module else None
} }
} }
......
""" """
Serialize and deserialize OpenAssessment XBlock content to/from XML. Serialize and deserialize OpenAssessment XBlock content to/from XML.
""" """
from uuid import uuid4 as uuid
import lxml.etree as etree import lxml.etree as etree
import pytz import pytz
import dateutil.parser import dateutil.parser
...@@ -76,9 +77,16 @@ def _serialize_options(options_root, options_list): ...@@ -76,9 +77,16 @@ def _serialize_options(options_root, options_list):
# Points (default to 0) # Points (default to 0)
option_el.set('points', unicode(option.get('points', 0))) option_el.set('points', unicode(option.get('points', 0)))
# Name (default to empty str) # Name (default to a UUID)
option_name = etree.SubElement(option_el, 'name') option_name = etree.SubElement(option_el, 'name')
option_name.text = unicode(option.get('name', u'')) if 'name' in option:
option_name.text = unicode(option['name'])
else:
option_name.text = unicode(uuid().hex)
# Label (default to the option name, then an empty string)
option_label = etree.SubElement(option_el, 'label')
option_label.text = unicode(option.get('label', option.get('name', u'')))
# Explanation (default to empty str) # Explanation (default to empty str)
option_explanation = etree.SubElement(option_el, 'explanation') option_explanation = etree.SubElement(option_el, 'explanation')
...@@ -105,9 +113,16 @@ def _serialize_criteria(criteria_root, criteria_list): ...@@ -105,9 +113,16 @@ def _serialize_criteria(criteria_root, criteria_list):
for criterion in _sort_by_order_num(criteria_list): for criterion in _sort_by_order_num(criteria_list):
criterion_el = etree.SubElement(criteria_root, 'criterion') criterion_el = etree.SubElement(criteria_root, 'criterion')
# Criterion name (default to empty string) # Criterion name (default to a UUID)
criterion_name = etree.SubElement(criterion_el, u'name') criterion_name = etree.SubElement(criterion_el, u'name')
criterion_name.text = unicode(criterion.get('name', '')) if 'name' in criterion:
criterion_name.text = unicode(criterion['name'])
else:
criterion_name.text = unicode(uuid().hex)
# Criterion label (default to the name, then an empty string)
criterion_label = etree.SubElement(criterion_el, 'label')
criterion_label.text = unicode(criterion.get('label', criterion.get('name', u'')))
# Criterion prompt (default to empty string) # Criterion prompt (default to empty string)
criterion_prompt = etree.SubElement(criterion_el, 'prompt') criterion_prompt = etree.SubElement(criterion_el, 'prompt')
...@@ -247,6 +262,16 @@ def _parse_options_xml(options_root): ...@@ -247,6 +262,16 @@ def _parse_options_xml(options_root):
else: else:
raise UpdateFromXmlError(_('Every "option" element must contain a "name" element.')) raise UpdateFromXmlError(_('Every "option" element must contain a "name" element.'))
# Option label
# Backwards compatibility: Older problem definitions won't have this.
# If no label is defined, default to the option name.
option_label = option.find('label')
option_dict['label'] = (
_safe_get_text(option_label)
if option_label is not None
else option_dict['name']
)
# Option explanation # Option explanation
option_explanation = option.find('explanation') option_explanation = option.find('explanation')
if option_explanation is not None: if option_explanation is not None:
...@@ -290,6 +315,16 @@ def _parse_criteria_xml(criteria_root): ...@@ -290,6 +315,16 @@ def _parse_criteria_xml(criteria_root):
else: else:
raise UpdateFromXmlError(_('Every "criterion" element must contain a "name" element.')) raise UpdateFromXmlError(_('Every "criterion" element must contain a "name" element.'))
# Criterion label
# Backwards compatibility: Older problem definitions won't have this,
# so if it isn't set, default to the criterion name.
criterion_label = criterion.find('label')
criterion_dict['label'] = (
_safe_get_text(criterion_label)
if criterion_label is not None
else criterion_dict['name']
)
# Criterion prompt # Criterion prompt
criterion_prompt = criterion.find('prompt') criterion_prompt = criterion.find('prompt')
if criterion_prompt is not None: if criterion_prompt is not None:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment