Commit fd00c5f3 by Albert (AJ) St. Aubin Committed by GitHub

Merge pull request #953 from edx/aj/TNL-6016_missing_peer_scores

Show feedback when no comment is provided.
parents a64fd58c 998edca0
{% load i18n %}
{% spaceless %}
{% if assessment %}
<li class="answer feedback">
{% if assessment.individual_assessments %}
{% for individual_assessment in assessment.individual_assessments %}
{% if individual_assessment.feedback %}
<div class="answer_title">
<span class="answer__source">
{% if individual_assessment.option %}
{% blocktrans with title=individual_assessment.title grade=individual_assessment.option.label start_tag='<span class="answer__source__value">'|safe end_tag="</span>"|safe %}
{{ start_tag }}{{ title }}{{ end_tag }} - {{ grade }}
{% endblocktrans %}
{% else %}
<span class="answer__source__value">
{{ individual_assessment.title }}
</span>
{% endif %}
</span>
</div>
<div class="feedback__value">
<p class="feedback__value__raw">{{ individual_assessment.feedback }}</p>
</div>
{% endif %}
{% endfor %}
{% if assessment.individual_assessments %}
{% for individual_assessment in assessment.individual_assessments %}
<div class="answer__source">
{% if individual_assessment.option %}
{% blocktrans with title=individual_assessment.title grade=individual_assessment.option.label start_tag='<span class="answer__source__individual">'|safe end_tag="</span>"|safe start_grade_tag='<span class="answer__grade__individual">'|safe%}
{{ start_tag }}{{ title }}{{ end_tag }}{{ start_grade_tag }} - {{ grade }}{{end_tag}}
{% endblocktrans %}
{% else %}
<div class="answer_title">
<span class="answer__source">
<span class="answer__source__value">{{ title }}</span>
</span>
</div>
<div class="feedback__value">
<p class="feedback__value__raw">{{ assessment.feedback }}</p>
</div>
<span class="answer__source__value">
{{ individual_assessment.title }}
</span>
{% endif %}
</li>
</div>
{% if individual_assessment.feedback %}
<div class="feedback__value">
<p class="feedback__value__raw">{{ individual_assessment.feedback }}</p>
</div>
{% endif %}
{% endfor %}
{% elif assessment.feedback %}
<div class="answer__source">
<span class="answer__source__value">{{ title }}</span>
</div>
<div class="feedback__value">
<p class="feedback__value__raw">{{ assessment.feedback }}</p>
</div>
{% endif %}
{% endif %}
{% endspaceless %}
{% load i18n %}
{% spaceless %}
{% if assessment %}
<li class="answer">
<h5 class="answer__title">
<span class="answer__source">
{% if assessment.points != None %}
<span class="answer__source__value answer__source__value-with-points">
{% blocktrans with assessment_title=assessment.title count points=assessment.points %}
{{ assessment_title }} - {{ points }} point
{% plural %}
{{ assessment_title }} - {{ points }} points
{% endblocktrans %}
</span>
{% else %}
<span class="answer__source__value">{{ assessment.title }}</span>
{% endif %}
</span>
<span class="answer__value">
<span class="answer__value__label sr">{{ assessment.title }}</span>
<span class="answer__value__value">
{{ assessment.option.label }}
<div class="answer__source">
{% if assessment.points != None %}
<span class="answer__source__value answer__source__value-with-points">
{% blocktrans with assessment_title=assessment.title count points=assessment.points %}
{{ assessment_title }} - {{ points }} point
{% plural %}
{{ assessment_title }} - {{ points }} points
{% endblocktrans %}
</span>
{% else %}
<span class="answer__source__value">{{ assessment.title }}</span>
{% endif %}
</div>
<div class="answer__value">
<span class="answer__value__value">
{{ assessment.option.label }}
{% if assessment.option.explanation %}
<span class="ui-hint hint--top" data-hint="{{ assessment.option.explanation }}">
<span class="icon fa fa-info-circle" aria-hidden="true"
title="{% blocktrans with name=assessment.option.label %}More information about {{ name }}{% endblocktrans %}">
</span>
</span>
{% endif %}
{% if assessment.option.explanation %}
<span class="ui-hint hint--top" data-hint="{{ assessment.option.explanation }}">
<span class="icon fa fa-info-circle" aria-hidden="true"
title="{% blocktrans with name=assessment.option.label %}More information about {{ name }}{% endblocktrans %}">
</span>
</span>
{% endif %}
</span>
</span>
</h5>
</li>
</span>
</div>
{% endif %}
{% endspaceless %}
......@@ -62,21 +62,16 @@
</div>
<div class="ui-slidable__content" aria-labelledby="oa_grade_{{ xblock_id }}_criteria_{{ criterion_num }}" id="oa_grade_{{ xblock_id }}_criteria_{{ criterion_num }}_content">
{% if criterion.options %}
<ul class="question__answers has--hints">
{% for assessment in criterion.assessments %}
{% include "openassessmentblock/grade/oa_assessment_title.html" with assessment=assessment %}
{% endfor %}
</ul>
{% endif %}
{% if criterion.has_feedback %}
<ul class="question__answers has--hints">
{% for assessment in criterion.assessments %}
<ul class="question__answers has--hints">
{% for assessment in criterion.assessments %}
<li class="answer">
{% if criterion.options %}
{% include "openassessmentblock/grade/oa_assessment_title.html" with assessment=assessment %}
{% endif %}
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=assessment.feedback_title assessment=assessment %}
{% endfor %}
</ul>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
</li>
{% endwith %}
......@@ -92,7 +87,9 @@
<ul class="question__answers ui-slidable__content" id="oa_grade_{{ xblock_id }}_feedback_content" aria-labelledby="oa_grade_{{ xblock_id }}_feedback">
{% for feedback in grade_details.additional_feedback %}
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=feedback.title assessment=feedback %}
<li class="answer feedback">
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=feedback.title assessment=feedback %}
</li>
{% endfor %}
</ul>
</li>
......
......@@ -526,32 +526,29 @@ class GradePage(OpenAssessmentPage):
pass
return score_candidates[0] if len(score_candidates) > 0 else None
def grade_entry(self, question, column):
def grade_entry(self, question):
"""
Returns a tuple of source and value information for a specific grade source.
Returns a tuple of the text of all answer spans for a given question
Args:
question: the 0-based question for which to get grade information.
column: the 0-based column of data within a question. Each column corresponds
to a source of data (for example, staff, peer, or self).
Returns: the tuple of source and value information for the requested grade
Returns: a tuple containing all text elements.
"""
self.wait_for_element_visibility(
self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1)),
"Grade entry was present",
self._bounded_selector('.question--{} .answer'.format(question + 1)),
"Answers not present",
2
)
source = self.q(
css=self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1))
)[column]
value = self.q(
css=self._bounded_selector('.question--{} .answer .answer__value__value'.format(question + 1))
)[column]
selector_str = ".question--{} .answer div span".format(question + 1)
span_text = self.q(
css=self._bounded_selector(selector_str)
)
return source.text.strip(), value.text.strip()
result = tuple(span_entry.text.strip() for span_entry in span_text if span_entry.text != '')
return result
def feedback_entry(self, question, column):
"""
......
......@@ -849,20 +849,21 @@ class FullWorkflowMixin(object):
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
if peer_grades_me:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u"PEER MEDIAN GRADE", u"Poor", u"PEER 1", u"- POOR",
u"YOUR SELF ASSESSMENT", u"Good"),
(u"STAFF GRADE - 1 POINT", u"Fair", u"PEER MEDIAN GRADE", u"Poor", u"PEER 1", u"- POOR",
u"YOUR SELF ASSESSMENT", u"Excellent")]
)
else:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'),
(u'PEER MEDIAN GRADE', u'Waiting for peer reviews')],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u'PEER MEDIAN GRADE',
u'Waiting for peer reviews', u"YOUR SELF ASSESSMENT", u"Good"),
(u"STAFF GRADE - 1 POINT", u"Fair", u'PEER MEDIAN GRADE',
u'Waiting for peer reviews', u"YOUR SELF ASSESSMENT", u"Excellent")
]
)
def verify_staff_area_fields(self, username, peer_assessments, submitted_assessments, self_assessment):
"""
......@@ -911,17 +912,14 @@ class FullWorkflowMixin(object):
def verify_grade_entries(self, expected_entries):
"""
Verify the grade entries (sources and values) as shown in the
"Your Grade" section.
Verify the grade entries as shown in the "Your Grade" section.
Args:
expected_entries: array of expected entries, with each entry being an array
consisting of the data for a particular source. Note that order is important.
expected_entries: array of expected entries, with each entry being an tuple
consisting of the data for a particular question. Note that order is important.
"""
for index, expected_entry in enumerate(expected_entries):
self.assertEqual(expected_entry[0], self.grade_page.grade_entry(0, index))
self.assertEqual(expected_entry[1], self.grade_page.grade_entry(1, index))
self.assertEqual(expected_entry, self.grade_page.grade_entry(index))
class MultipleOpenAssessmentMixin(FullWorkflowMixin):
......@@ -992,10 +990,10 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor"), (u"PEER MEDIAN GRADE - 0 POINTS", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
self.verify_grade_entries(
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Good"),
(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Excellent")]
)
# Now do a staff override, changing the score (to 1).
self.do_staff_override(learner)
......@@ -1016,12 +1014,13 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
'Fair - 1 point', 'Peer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u"PEER MEDIAN GRADE", u"Poor",
u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Good"),
(u"STAFF GRADE - 1 POINT", u"Fair", u"PEER MEDIAN GRADE",
u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Excellent")
]
)
@retry()
@attr('acceptance')
......@@ -1064,11 +1063,11 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
'Fair - 1 point', 'Waiting for peer reviews'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'), (u'PEER MEDIAN GRADE', u'Waiting for peer reviews')],
])
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u'PEER MEDIAN GRADE', u'Waiting for peer reviews'),
(u"STAFF GRADE - 1 POINT", u"Fair", u'PEER MEDIAN GRADE', u'Waiting for peer reviews')
]
)
@ddt.ddt
......@@ -1101,6 +1100,7 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
# Do staff assessment step
self.staff_assessment(peer_grades_me)
@ddt.ddt
class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
"""
......@@ -1145,8 +1145,9 @@ class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
# Verify student-viewable grade report
self.refresh_page()
self.grade_page.wait_for_page()
self.assertEqual(self.grade_page.grade_entry(0, 0), (u'STAFF GRADE - 1 POINT', u'Yes')) # Reported answer 1
self.assertEqual(self.grade_page.grade_entry(0, 1), (u'YOUR SELF ASSESSMENT', u'Yes')) # Reported answer 2
self.verify_grade_entries(
[(u'STAFF GRADE - 1 POINT', u'Yes', u'YOUR SELF ASSESSMENT', u'Yes')]
)
for i, assessment_type in enumerate(["staff", "self"]):
# Criterion feedback first
expected = self.generate_feedback(assessment_type, "criterion")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment