Commit fd00c5f3 by Albert (AJ) St. Aubin Committed by GitHub

Merge pull request #953 from edx/aj/TNL-6016_missing_peer_scores

Show feedback when no comment is provided.
parents a64fd58c 998edca0
{% load i18n %} {% load i18n %}
{% spaceless %} {% spaceless %}
{% if assessment %} {% if assessment %}
<li class="answer feedback"> {% if assessment.individual_assessments %}
{% if assessment.individual_assessments %} {% for individual_assessment in assessment.individual_assessments %}
{% for individual_assessment in assessment.individual_assessments %} <div class="answer__source">
{% if individual_assessment.feedback %} {% if individual_assessment.option %}
<div class="answer_title"> {% blocktrans with title=individual_assessment.title grade=individual_assessment.option.label start_tag='<span class="answer__source__individual">'|safe end_tag="</span>"|safe start_grade_tag='<span class="answer__grade__individual">'|safe%}
<span class="answer__source"> {{ start_tag }}{{ title }}{{ end_tag }}{{ start_grade_tag }} - {{ grade }}{{end_tag}}
{% if individual_assessment.option %} {% endblocktrans %}
{% blocktrans with title=individual_assessment.title grade=individual_assessment.option.label start_tag='<span class="answer__source__value">'|safe end_tag="</span>"|safe %}
{{ start_tag }}{{ title }}{{ end_tag }} - {{ grade }}
{% endblocktrans %}
{% else %}
<span class="answer__source__value">
{{ individual_assessment.title }}
</span>
{% endif %}
</span>
</div>
<div class="feedback__value">
<p class="feedback__value__raw">{{ individual_assessment.feedback }}</p>
</div>
{% endif %}
{% endfor %}
{% else %} {% else %}
<div class="answer_title"> <span class="answer__source__value">
<span class="answer__source"> {{ individual_assessment.title }}
<span class="answer__source__value">{{ title }}</span> </span>
</span>
</div>
<div class="feedback__value">
<p class="feedback__value__raw">{{ assessment.feedback }}</p>
</div>
{% endif %} {% endif %}
</li> </div>
{% if individual_assessment.feedback %}
<div class="feedback__value">
<p class="feedback__value__raw">{{ individual_assessment.feedback }}</p>
</div>
{% endif %}
{% endfor %}
{% elif assessment.feedback %}
<div class="answer__source">
<span class="answer__source__value">{{ title }}</span>
</div>
<div class="feedback__value">
<p class="feedback__value__raw">{{ assessment.feedback }}</p>
</div>
{% endif %}
{% endif %} {% endif %}
{% endspaceless %} {% endspaceless %}
{% load i18n %} {% load i18n %}
{% spaceless %} {% spaceless %}
{% if assessment %} {% if assessment %}
<li class="answer"> <div class="answer__source">
<h5 class="answer__title"> {% if assessment.points != None %}
<span class="answer__source"> <span class="answer__source__value answer__source__value-with-points">
{% if assessment.points != None %} {% blocktrans with assessment_title=assessment.title count points=assessment.points %}
<span class="answer__source__value answer__source__value-with-points"> {{ assessment_title }} - {{ points }} point
{% blocktrans with assessment_title=assessment.title count points=assessment.points %} {% plural %}
{{ assessment_title }} - {{ points }} point {{ assessment_title }} - {{ points }} points
{% plural %} {% endblocktrans %}
{{ assessment_title }} - {{ points }} points </span>
{% endblocktrans %} {% else %}
</span> <span class="answer__source__value">{{ assessment.title }}</span>
{% else %} {% endif %}
<span class="answer__source__value">{{ assessment.title }}</span> </div>
{% endif %} <div class="answer__value">
</span> <span class="answer__value__value">
<span class="answer__value"> {{ assessment.option.label }}
<span class="answer__value__label sr">{{ assessment.title }}</span>
<span class="answer__value__value">
{{ assessment.option.label }}
{% if assessment.option.explanation %} {% if assessment.option.explanation %}
<span class="ui-hint hint--top" data-hint="{{ assessment.option.explanation }}"> <span class="ui-hint hint--top" data-hint="{{ assessment.option.explanation }}">
<span class="icon fa fa-info-circle" aria-hidden="true" <span class="icon fa fa-info-circle" aria-hidden="true"
title="{% blocktrans with name=assessment.option.label %}More information about {{ name }}{% endblocktrans %}"> title="{% blocktrans with name=assessment.option.label %}More information about {{ name }}{% endblocktrans %}">
</span> </span>
</span> </span>
{% endif %} {% endif %}
</span> </span>
</span> </div>
</h5>
</li>
{% endif %} {% endif %}
{% endspaceless %} {% endspaceless %}
...@@ -62,21 +62,16 @@ ...@@ -62,21 +62,16 @@
</div> </div>
<div class="ui-slidable__content" aria-labelledby="oa_grade_{{ xblock_id }}_criteria_{{ criterion_num }}" id="oa_grade_{{ xblock_id }}_criteria_{{ criterion_num }}_content"> <div class="ui-slidable__content" aria-labelledby="oa_grade_{{ xblock_id }}_criteria_{{ criterion_num }}" id="oa_grade_{{ xblock_id }}_criteria_{{ criterion_num }}_content">
{% if criterion.options %} <ul class="question__answers has--hints">
<ul class="question__answers has--hints"> {% for assessment in criterion.assessments %}
{% for assessment in criterion.assessments %} <li class="answer">
{% include "openassessmentblock/grade/oa_assessment_title.html" with assessment=assessment %} {% if criterion.options %}
{% endfor %} {% include "openassessmentblock/grade/oa_assessment_title.html" with assessment=assessment %}
</ul> {% endif %}
{% endif %}
{% if criterion.has_feedback %}
<ul class="question__answers has--hints">
{% for assessment in criterion.assessments %}
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=assessment.feedback_title assessment=assessment %} {% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=assessment.feedback_title assessment=assessment %}
{% endfor %} </li>
</ul> {% endfor %}
{% endif %} </ul>
</div> </div>
</li> </li>
{% endwith %} {% endwith %}
...@@ -92,7 +87,9 @@ ...@@ -92,7 +87,9 @@
<ul class="question__answers ui-slidable__content" id="oa_grade_{{ xblock_id }}_feedback_content" aria-labelledby="oa_grade_{{ xblock_id }}_feedback"> <ul class="question__answers ui-slidable__content" id="oa_grade_{{ xblock_id }}_feedback_content" aria-labelledby="oa_grade_{{ xblock_id }}_feedback">
{% for feedback in grade_details.additional_feedback %} {% for feedback in grade_details.additional_feedback %}
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=feedback.title assessment=feedback %} <li class="answer feedback">
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=feedback.title assessment=feedback %}
</li>
{% endfor %} {% endfor %}
</ul> </ul>
</li> </li>
......
...@@ -526,32 +526,29 @@ class GradePage(OpenAssessmentPage): ...@@ -526,32 +526,29 @@ class GradePage(OpenAssessmentPage):
pass pass
return score_candidates[0] if len(score_candidates) > 0 else None return score_candidates[0] if len(score_candidates) > 0 else None
def grade_entry(self, question, column): def grade_entry(self, question):
""" """
Returns a tuple of source and value information for a specific grade source. Returns a tuple of the text of all answer spans for a given question
Args: Args:
question: the 0-based question for which to get grade information. question: the 0-based question for which to get grade information.
column: the 0-based column of data within a question. Each column corresponds
to a source of data (for example, staff, peer, or self).
Returns: the tuple of source and value information for the requested grade Returns: a tuple containing all text elements.
""" """
self.wait_for_element_visibility( self.wait_for_element_visibility(
self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1)), self._bounded_selector('.question--{} .answer'.format(question + 1)),
"Grade entry was present", "Answers not present",
2 2
) )
source = self.q(
css=self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1))
)[column]
value = self.q( selector_str = ".question--{} .answer div span".format(question + 1)
css=self._bounded_selector('.question--{} .answer .answer__value__value'.format(question + 1)) span_text = self.q(
)[column] css=self._bounded_selector(selector_str)
)
return source.text.strip(), value.text.strip() result = tuple(span_entry.text.strip() for span_entry in span_text if span_entry.text != '')
return result
def feedback_entry(self, question, column): def feedback_entry(self, question, column):
""" """
......
...@@ -849,20 +849,21 @@ class FullWorkflowMixin(object): ...@@ -849,20 +849,21 @@ class FullWorkflowMixin(object):
self.refresh_page() self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS) self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score) self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
if peer_grades_me: if peer_grades_me:
self.verify_grade_entries([ self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")], [(u"STAFF GRADE - 0 POINTS", u"Poor", u"PEER MEDIAN GRADE", u"Poor", u"PEER 1", u"- POOR",
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")], u"YOUR SELF ASSESSMENT", u"Good"),
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")], (u"STAFF GRADE - 1 POINT", u"Fair", u"PEER MEDIAN GRADE", u"Poor", u"PEER 1", u"- POOR",
]) u"YOUR SELF ASSESSMENT", u"Excellent")]
)
else: else:
self.verify_grade_entries([ self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")], [(u"STAFF GRADE - 0 POINTS", u"Poor", u'PEER MEDIAN GRADE',
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'), u'Waiting for peer reviews', u"YOUR SELF ASSESSMENT", u"Good"),
(u'PEER MEDIAN GRADE', u'Waiting for peer reviews')], (u"STAFF GRADE - 1 POINT", u"Fair", u'PEER MEDIAN GRADE',
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")], u'Waiting for peer reviews', u"YOUR SELF ASSESSMENT", u"Excellent")
]) ]
)
def verify_staff_area_fields(self, username, peer_assessments, submitted_assessments, self_assessment): def verify_staff_area_fields(self, username, peer_assessments, submitted_assessments, self_assessment):
""" """
...@@ -911,17 +912,14 @@ class FullWorkflowMixin(object): ...@@ -911,17 +912,14 @@ class FullWorkflowMixin(object):
def verify_grade_entries(self, expected_entries): def verify_grade_entries(self, expected_entries):
""" """
Verify the grade entries (sources and values) as shown in the Verify the grade entries as shown in the "Your Grade" section.
"Your Grade" section.
Args: Args:
expected_entries: array of expected entries, with each entry being an array expected_entries: array of expected entries, with each entry being an tuple
consisting of the data for a particular source. Note that order is important. consisting of the data for a particular question. Note that order is important.
""" """
for index, expected_entry in enumerate(expected_entries): for index, expected_entry in enumerate(expected_entries):
self.assertEqual(expected_entry[0], self.grade_page.grade_entry(0, index)) self.assertEqual(expected_entry, self.grade_page.grade_entry(index))
self.assertEqual(expected_entry[1], self.grade_page.grade_entry(1, index))
class MultipleOpenAssessmentMixin(FullWorkflowMixin): class MultipleOpenAssessmentMixin(FullWorkflowMixin):
...@@ -992,10 +990,10 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -992,10 +990,10 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
self.staff_area_page.learner_final_score_table_values self.staff_area_page.learner_final_score_table_values
) )
self.verify_grade_entries([ self.verify_grade_entries(
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor"), (u"PEER MEDIAN GRADE - 0 POINTS", u"Poor")], [(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Good"),
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")] (u"PEER MEDIAN GRADE - 0 POINTS", u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Excellent")]
]) )
# Now do a staff override, changing the score (to 1). # Now do a staff override, changing the score (to 1).
self.do_staff_override(learner) self.do_staff_override(learner)
...@@ -1016,12 +1014,13 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1016,12 +1014,13 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
'Fair - 1 point', 'Peer 1 - Poor', 'Excellent'], 'Fair - 1 point', 'Peer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values self.staff_area_page.learner_final_score_table_values
) )
self.verify_grade_entries(
self.verify_grade_entries([ [(u"STAFF GRADE - 0 POINTS", u"Poor", u"PEER MEDIAN GRADE", u"Poor",
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")], u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Good"),
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")], (u"STAFF GRADE - 1 POINT", u"Fair", u"PEER MEDIAN GRADE",
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")] u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Excellent")
]) ]
)
@retry() @retry()
@attr('acceptance') @attr('acceptance')
...@@ -1064,11 +1063,11 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1064,11 +1063,11 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
'Fair - 1 point', 'Waiting for peer reviews'], 'Fair - 1 point', 'Waiting for peer reviews'],
self.staff_area_page.learner_final_score_table_values self.staff_area_page.learner_final_score_table_values
) )
self.verify_grade_entries(
self.verify_grade_entries([ [(u"STAFF GRADE - 0 POINTS", u"Poor", u'PEER MEDIAN GRADE', u'Waiting for peer reviews'),
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")], (u"STAFF GRADE - 1 POINT", u"Fair", u'PEER MEDIAN GRADE', u'Waiting for peer reviews')
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'), (u'PEER MEDIAN GRADE', u'Waiting for peer reviews')], ]
]) )
@ddt.ddt @ddt.ddt
...@@ -1101,6 +1100,7 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1101,6 +1100,7 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
# Do staff assessment step # Do staff assessment step
self.staff_assessment(peer_grades_me) self.staff_assessment(peer_grades_me)
@ddt.ddt @ddt.ddt
class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin): class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
""" """
...@@ -1145,8 +1145,9 @@ class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1145,8 +1145,9 @@ class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
# Verify student-viewable grade report # Verify student-viewable grade report
self.refresh_page() self.refresh_page()
self.grade_page.wait_for_page() self.grade_page.wait_for_page()
self.assertEqual(self.grade_page.grade_entry(0, 0), (u'STAFF GRADE - 1 POINT', u'Yes')) # Reported answer 1 self.verify_grade_entries(
self.assertEqual(self.grade_page.grade_entry(0, 1), (u'YOUR SELF ASSESSMENT', u'Yes')) # Reported answer 2 [(u'STAFF GRADE - 1 POINT', u'Yes', u'YOUR SELF ASSESSMENT', u'Yes')]
)
for i, assessment_type in enumerate(["staff", "self"]): for i, assessment_type in enumerate(["staff", "self"]):
# Criterion feedback first # Criterion feedback first
expected = self.generate_feedback(assessment_type, "criterion") expected = self.generate_feedback(assessment_type, "criterion")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment