Commit d59be2f1 by cahrens

Add summary of grades in final grade section.

TNL-3930, TNL-3761
parent 0e6d1e63
......@@ -83,21 +83,61 @@
</h2>
<div class="ui-toggle-visibility__content">
{% if workflow_status == "done" %}
<p>
<p class="staff-info__final__grade__score">
{% with points_earned_string=score.points_earned|stringformat:"s" points_possible_string=score.points_possible|stringformat:"s" %}
{% blocktrans with points_earned='<span class="grade__value__earned">'|safe|add:points_earned_string|add:'</span>'|safe points_possible='<span class="grade__value__potential">'|safe|add:points_possible_string|add:'</span>'|safe %}
Final grade: {{ points_earned }} out of {{ points_possible }}
{% endblocktrans %}
{% endwith %}
</p>
<table class="staff-info__status__table staff-info__final__grade__table" summary="{% trans "Final Grade Details" %}">
<thead>
<tr>
<th abbr="{% trans 'Criterion' %}" scope="col">{% trans "Criterion" %}</th>
{% with criterion=grade_details.criteria.0 %}
{% for assessment in criterion.assessments %}
<th abbr="{{ assessment.title }}" scope="col">{{ assessment.title }}</th>
{% endfor %}
{% endwith %}
</tr>
</thead>
<tbody>
{% for criterion in grade_details.criteria %}
<tr>
<td class="label">{{ criterion.label }}</td>
{% for assessment in criterion.assessments %}
<td class="value">
{% if assessment.points != None %}
<div>
{% blocktrans with assessment_label=assessment.option.label count points=assessment.points %}
{{ assessment_label }} - {{ points }} point
{% plural %}
{{ assessment_label }} - {{ points }} points
{% endblocktrans %}
</div>
{% endif %}
{% if assessment.individual_assessments %}
{% for individual_assessment in assessment.individual_assessments %}
<div>{{ individual_assessment.title }} - {{ individual_assessment.option.label}}</div>
{% endfor %}
{% elif assessment.points == None %}
<div>{{ assessment.option.label }}</div>
{% endif %}
</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{% elif workflow_status == "waiting" %}
<p>{% trans "The submission is waiting for assessments." %}</p>
<p class="staff-info__final__grade__score">{% trans "The submission is waiting for assessments." %}</p>
{% elif workflow_status == "cancelled" %}
<p>{% trans "The learner's submission has been removed from peer assessment. The learner receives a grade of zero unless you delete the learner's state for the problem to allow them to resubmit a response." %}</p>
<p class="staff-info__final__grade__score">{% trans "The learner's submission has been removed from peer assessment. The learner receives a grade of zero unless you delete the learner's state for the problem to allow them to resubmit a response." %}</p>
{% elif workflow_status == None %}
<p>{% trans "The problem has not been started." %}</p>
<p class="staff-info__final__grade__score">{% trans "The problem has not been started." %}</p>
{% else %}
<p>{% trans "The problem has not been completed." %}</p>
<p class="staff-info__final__grade__score">{% trans "The problem has not been completed." %}</p>
{% endif %}
</div>
</div>
......
......@@ -212,7 +212,8 @@ class GradeMixin(object):
return {'success': True, 'msg': self._(u"Feedback saved.")}
def grade_details(
self, submission_uuid, peer_assessments, self_assessment, example_based_assessment, staff_assessment
self, submission_uuid, peer_assessments, self_assessment, example_based_assessment, staff_assessment,
is_staff=False
):
"""
Returns details about the grade assigned to the submission.
......@@ -223,6 +224,8 @@ class GradeMixin(object):
self_assessment (dict): Serialized assessment model from the self API
example_based_assessment (dict): Serialized assessment model from the example-based API
staff_assessment (dict): Serialized assessment model from the staff API
is_staff (bool): True if the grade details are being displayed to staff, else False.
Default value is False (meaning grade details are being shown to the learner).
Returns:
A dictionary with full details about the submission's grade.
......@@ -280,10 +283,12 @@ class GradeMixin(object):
# Record assessment info for the current criterion
criterion['assessments'] = self._graded_assessments(
submission_uuid, criterion,
staff_assessment=staff_assessment,
peer_assessments=peer_assessments,
example_based_assessment=example_based_assessment,
self_assessment=self_assessment,
assessment_steps,
staff_assessment,
peer_assessments,
example_based_assessment,
self_assessment,
is_staff=is_staff,
)
# Record whether there is any feedback provided in the assessments
......@@ -309,8 +314,8 @@ class GradeMixin(object):
}
def _graded_assessments(
self, submission_uuid, criterion, staff_assessment, peer_assessments,
example_based_assessment, self_assessment
self, submission_uuid, criterion, assessment_steps, staff_assessment, peer_assessments,
example_based_assessment, self_assessment, is_staff=False
):
"""
Returns an array of assessments with their associated grades.
......@@ -343,12 +348,21 @@ class GradeMixin(object):
for index, peer_assessment in enumerate(peer_assessments)
],
}
elif "peer-assessment" in assessment_steps:
peer_assessment_part = {
'title': _('Peer Median Grade'),
'option': {'label': _('Waiting for peer reviews')}
}
else:
peer_assessment_part = None
example_based_assessment_part = _get_assessment_part(
_('Example-Based Grade'), criterion_name, example_based_assessment
)
self_assessment_part = _get_assessment_part(_('Your Self Assessment'), criterion_name, self_assessment)
self_assessment_part = _get_assessment_part(
_('Self Assessment Grade') if is_staff else _('Your Self Assessment'),
criterion_name,
self_assessment
)
# Now collect together all the assessments
assessments = []
......
......@@ -375,22 +375,56 @@ class StaffAreaMixin(object):
assessment_steps = self.assessment_steps
example_based_assessment = None
example_based_assessment_grade_context = None
self_assessment = None
self_assessment_grade_context = None
peer_assessments = None
submitted_assessments = None
peer_assessments_grade_context = []
staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
staff_assessment_grade_context = None
submitted_assessments = None
grade_details = None
workflow = self.get_workflow_info(submission_uuid=submission_uuid)
grade_exists = workflow.get('status') == "done"
if "peer-assessment" in assessment_steps:
peer_assessments = peer_api.get_assessments(submission_uuid)
submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
if grade_exists:
peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
peer_assessments_grade_context = [
self._assessment_grade_context(peer_assessment)
for peer_assessment in peer_assessments
]
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
if grade_exists:
self_assessment_grade_context = self._assessment_grade_context(self_assessment)
if "example-based-assessment" in assessment_steps:
example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
workflow = self.get_workflow_info(submission_uuid=submission_uuid)
if grade_exists:
example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)
if grade_exists:
if staff_assessment:
staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)
grade_details = self.grade_details(
submission_uuid,
peer_assessments_grade_context,
self_assessment_grade_context,
example_based_assessment_grade_context,
staff_assessment_grade_context,
is_staff=True,
)
workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)
......@@ -399,8 +433,9 @@ class StaffAreaMixin(object):
'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
'self_assessment': [self_assessment] if self_assessment else None,
'peer_assessments': peer_assessments,
'submitted_assessments': submitted_assessments,
'staff_assessment': [staff_assessment] if staff_assessment else None,
'submitted_assessments': submitted_assessments,
'grade_details': grade_details,
'score': workflow.get('score'),
'workflow_status': workflow.get('status'),
'workflow_cancellation': workflow_cancellation,
......
......@@ -208,6 +208,9 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertIsNone(context['staff_assessment'])
self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)
# Bob still needs to assess other learners
self.assertIsNone(context['grade_details'])
@scenario('data/self_only_scenario.xml', user_id='Bob')
def test_staff_area_student_info_self_only(self, xblock):
# Simulate that we are course staff
......@@ -239,6 +242,10 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertIsNone(context['staff_assessment'])
self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)
grade_details = context['grade_details']
self.assertEquals(1, len(grade_details['criteria'][0]['assessments']))
self.assertEquals('Self Assessment Grade', grade_details['criteria'][0]['assessments'][0]['title'])
@scenario('data/staff_grade_scenario.xml', user_id='Bob')
def test_staff_area_student_info_staff_only(self, xblock):
# Simulate that we are course staff
......@@ -270,6 +277,10 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertIsNotNone(context['staff_assessment'])
self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)
grade_details = context['grade_details']
self.assertEquals(1, len(grade_details['criteria'][0]['assessments']))
self.assertEquals('Staff Grade', grade_details['criteria'][0]['assessments'][0]['title'])
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_staff_area_student_info_with_cancelled_submission(self, xblock):
requirements = {
......
......@@ -579,7 +579,7 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin):
"""
Returns the final score displayed in the learner report.
"""
score = self.q(css=self._bounded_selector(".staff-info__student__grade .ui-toggle-visibility__content"))
score = self.q(css=self._bounded_selector(".staff-info__final__grade__score"))
if len(score) == 0:
return None
return score.text[0]
......@@ -594,6 +594,27 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin):
).fulfill()
@property
def learner_final_score_table_headers(self):
"""
Return the final score table headers (as an array of strings) as shown in the staff area section.
Returns: array of strings representing the headers (for example,
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'])
"""
return self._get_table_text(".staff-info__final__grade__table th")
@property
def learner_final_score_table_values(self):
"""
Return the final score table values (as an array of strings) as shown in the staff area section.
Returns: array of strings representing the text (for example,
['Poor - 0 points', 'Waiting for peer reviews', 'Good',
'Fair - 1 point', 'Waiting for peer reviews', 'Excellent'])
"""
return self._get_table_text(".staff-info__final__grade__table .value")
@property
def learner_response(self):
return self.q(
css=self._bounded_selector(".staff-info__student__response .ui-toggle-visibility__content")
......@@ -637,12 +658,16 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin):
Returns: array of strings representing the text(for example, ['Good', u'5', u'5', u'Excellent', u'3', u'3'])
"""
return self._get_table_text(".staff-info__{} .staff-info__status__table .value".format(section))
def _get_table_text(self, selector):
"""
Helper method for getting text out of a table.
"""
table_elements = self.q(
css=self._bounded_selector(".staff-info__{} .staff-info__status__table .value".format(section))
css=self._bounded_selector(selector)
)
text = []
for value in table_elements:
text.append(value.text)
for element in table_elements:
text.append(element.text)
return text
......@@ -578,13 +578,28 @@ class StaffAreaTest(OpenAssessmentTest):
# Check the learner's current score.
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.EXPECTED_SCORE))
self.assertEquals(
['CRITERION', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Fair - 3 points', 'Good - 3 points'], self.staff_area_page.learner_final_score_table_values
)
# Do staff override and wait for final score to change.
self.staff_area_page.assess("staff-override", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Verify that the new student score is different from the original one.
# Unfortunately there is no indication presently that this was a staff override.
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Fair',
'Fair - 1 point', 'Good'],
self.staff_area_page.learner_final_score_table_values
)
@retry()
@attr('acceptance')
......@@ -906,6 +921,15 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
learner, self.STAFF_AREA_PEER_ASSESSMENT, self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT
)
self.staff_area_page.verify_learner_final_score(self.PEER_ASSESSMENT_STAFF_AREA_SCORE)
self.assertEquals(
['CRITERION', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points\nPeer 1 - Poor', 'Good',
'Poor - 0 points\nPeer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor"), (u"PEER MEDIAN GRADE - 0 POINTS", u"Poor")],
......@@ -922,6 +946,15 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
learner, self.STAFF_AREA_PEER_ASSESSMENT, self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT
)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Peer 1 - Poor', 'Good',
'Fair - 1 point', 'Peer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
......@@ -985,9 +1018,19 @@ class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(learner, [], self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Waiting for peer reviews', 'Good',
'Fair - 1 point', 'Waiting for peer reviews', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'), (u'PEER MEDIAN GRADE', u'Waiting for peer reviews')],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
......@@ -1043,6 +1086,8 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
else:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'),
(u'PEER MEDIAN GRADE', u'Waiting for peer reviews')],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment