Commit 23deecdb by Eric Fischer

Merge pull request #903 from edx/efischer/no_options_cleanup

Feedback only cleanup
parents f71893fd 7080c7e4
...@@ -46,28 +46,31 @@ ...@@ -46,28 +46,31 @@
<i class="icon fa fa-caret-right" aria-hidden="true"></i> <i class="icon fa fa-caret-right" aria-hidden="true"></i>
<span class="question__title__copy">{{ criterion.label }}</span> <span class="question__title__copy">{{ criterion.label }}</span>
<span class="question__score"> {% if criterion.total_value > 0 %}
<span class="label sr">{% trans "Overall Grade" %}</span> <span class="question__score">
<span class="question__score__value">{{ criterion.median_score }}</span> <span class="label sr">{% trans "Overall Grade" %}</span>
<span class="label label--divider sr">out of</span> <span class="question__score__value">{{ criterion.median_score }}</span>
<span class="question__score__potential"> <span class="label label--divider sr">out of</span>
{{ criterion.total_value }} <span class="question__score__potential">
<span class="unit">{% trans "Points" %}</span> {{ criterion.total_value }}
<span class="unit">{% trans "Points" %}</span>
</span>
</span> </span>
</span> {% endif %}
</h4> </h4>
<ul class="question__answers ui-toggle-visibility__content has--hints"> {% if criterion.options %}
{% for assessment in criterion.assessments %} <ul class="question__answers ui-toggle-visibility__content has--hints">
{% include "openassessmentblock/grade/oa_assessment_title.html" with assessment=assessment %} {% for assessment in criterion.assessments %}
{% endfor %} {% include "openassessmentblock/grade/oa_assessment_title.html" with assessment=assessment %}
</ul> {% endfor %}
</ul>
{% endif %}
{% if criterion.has_feedback %} {% if criterion.has_feedback %}
<ul class="question__answers ui-toggle-visibility__content has--hints"> <ul class="question__answers ui-toggle-visibility__content has--hints">
{% trans "Comments" as feedback_title %}
{% for assessment in criterion.assessments %} {% for assessment in criterion.assessments %}
{% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=feedback_title assessment=assessment %} {% include "openassessmentblock/grade/oa_assessment_feedback.html" with title=assessment.feedback_title assessment=assessment %}
{% endfor %} {% endfor %}
</ul> </ul>
{% endif %} {% endif %}
......
...@@ -84,13 +84,15 @@ ...@@ -84,13 +84,15 @@
</button> </button>
<div class="ui-slidable__content" role="group" id="final_grade_content_{{ submission.uuid }}" aria-labelledby="final_grade_{{ submission.uuid }}"> <div class="ui-slidable__content" role="group" id="final_grade_content_{{ submission.uuid }}" aria-labelledby="final_grade_{{ submission.uuid }}">
{% if workflow_status == "done" %} {% if workflow_status == "done" %}
<div class="staff-info__final__grade__score"> {% if score != None %}
{% with points_earned_string=score.points_earned|stringformat:"s" points_possible_string=score.points_possible|stringformat:"s" %} <div class="staff-info__final__grade__score">
{% blocktrans with points_earned='<span class="grade__value__earned">'|safe|add:points_earned_string|add:'</span>'|safe points_possible='<span class="grade__value__potential">'|safe|add:points_possible_string|add:'</span>'|safe %} {% with points_earned_string=score.points_earned|stringformat:"s" points_possible_string=score.points_possible|stringformat:"s" %}
Final grade: {{ points_earned }} out of {{ points_possible }} {% blocktrans with points_earned='<span class="grade__value__earned">'|safe|add:points_earned_string|add:'</span>'|safe points_possible='<span class="grade__value__potential">'|safe|add:points_possible_string|add:'</span>'|safe %}
{% endblocktrans %} Final grade: {{ points_earned }} out of {{ points_possible }}
{% endwith %} {% endblocktrans %}
</div> {% endwith %}
</div>
{% endif %}
<table class="staff-info__status__table staff-info__final__grade__table"> <table class="staff-info__status__table staff-info__final__grade__table">
<caption class="sr">{% trans "Final Grade Details" %}</caption> <caption class="sr">{% trans "Final Grade Details" %}</caption>
<thead> <thead>
......
...@@ -42,10 +42,11 @@ ...@@ -42,10 +42,11 @@
<td class="value">{{ part.feedback }}</td> <td class="value">{{ part.feedback }}</td>
{% if part.option != None %} {% if part.option != None %}
<td class="value">{{ part.option.points }}</td> <td class="value">{{ part.option.points }}</td>
<td class="value">{{ criterion.total_value }}</td>
{% else %} {% else %}
<td class="value"></td> <td class="value"></td>
<td class="value"></td>
{% endif %} {% endif %}
<td class="value">{{ criterion.total_value }}</td>
</tr> </tr>
{% endif %} {% endif %}
{% endfor %} {% endfor %}
......
...@@ -319,7 +319,7 @@ class GradeMixin(object): ...@@ -319,7 +319,7 @@ class GradeMixin(object):
""" """
Returns an array of assessments with their associated grades. Returns an array of assessments with their associated grades.
""" """
def _get_assessment_part(title, part_criterion_name, assessment): def _get_assessment_part(title, feedback_title, part_criterion_name, assessment):
""" """
Returns the assessment part for the given criterion name. Returns the assessment part for the given criterion name.
""" """
...@@ -327,12 +327,18 @@ class GradeMixin(object): ...@@ -327,12 +327,18 @@ class GradeMixin(object):
for part in assessment['parts']: for part in assessment['parts']:
if part['criterion']['name'] == part_criterion_name: if part['criterion']['name'] == part_criterion_name:
part['title'] = title part['title'] = title
part['feedback_title'] = feedback_title
return part return part
return None return None
# Fetch all the unique assessment parts # Fetch all the unique assessment parts
criterion_name = criterion['name'] criterion_name = criterion['name']
staff_assessment_part = _get_assessment_part(_('Staff Grade'), criterion_name, staff_assessment) staff_assessment_part = _get_assessment_part(
_('Staff Grade'),
_('Staff Comments'),
criterion_name,
staff_assessment
)
if "peer-assessment" in assessment_steps: if "peer-assessment" in assessment_steps:
peer_assessment_part = { peer_assessment_part = {
'title': _('Peer Median Grade'), 'title': _('Peer Median Grade'),
...@@ -341,6 +347,7 @@ class GradeMixin(object): ...@@ -341,6 +347,7 @@ class GradeMixin(object):
'individual_assessments': [ 'individual_assessments': [
_get_assessment_part( _get_assessment_part(
_('Peer {peer_index}').format(peer_index=index + 1), _('Peer {peer_index}').format(peer_index=index + 1),
_('Peer Comments'),
criterion_name, criterion_name,
peer_assessment peer_assessment
) )
...@@ -350,10 +357,11 @@ class GradeMixin(object): ...@@ -350,10 +357,11 @@ class GradeMixin(object):
else: else:
peer_assessment_part = None peer_assessment_part = None
example_based_assessment_part = _get_assessment_part( example_based_assessment_part = _get_assessment_part(
_('Example-Based Grade'), criterion_name, example_based_assessment _('Example-Based Grade'), _('Example-Based Comments'), criterion_name, example_based_assessment
) )
self_assessment_part = _get_assessment_part( self_assessment_part = _get_assessment_part(
_('Self Assessment Grade') if is_staff else _('Your Self Assessment'), _('Self Assessment Grade') if is_staff else _('Your Self Assessment'),
_('Your Comments'), # This is only used in the LMS student-facing view
criterion_name, criterion_name,
self_assessment self_assessment
) )
......
...@@ -192,6 +192,24 @@ class AssessmentMixin(object): ...@@ -192,6 +192,24 @@ class AssessmentMixin(object):
self.submit_assessment() self.submit_assessment()
return self return self
def provide_criterion_feedback(self, feedback):
"""
Provides feedback for the first criterion on a given assessment, without submitting the assessment.
Args:
feedback (string): the feedback to be recorded
"""
self.q(css=self._bounded_selector('.answer--feedback .answer__value')).first.fill(feedback)
def provide_overall_feedback(self, feedback):
"""
Provides overall feedback for a given assessment, without submitting the assessment.
Args:
feedback (string): the feedback to be recorded
"""
self.q(css=self._bounded_selector('.assessment__rubric__question--feedback__value')).first.fill(feedback)
def submit_assessment(self): def submit_assessment(self):
""" """
Submit an assessment of the problem. Submit an assessment of the problem.
...@@ -433,6 +451,11 @@ class GradePage(OpenAssessmentPage): ...@@ -433,6 +451,11 @@ class GradePage(OpenAssessmentPage):
Returns: the tuple of source and value information for the requested grade Returns: the tuple of source and value information for the requested grade
""" """
self.wait_for_element_visibility(
self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1)),
"Grade entry was present",
2
)
source = self.q( source = self.q(
css=self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1)) css=self._bounded_selector('.question--{} .answer .answer__source__value'.format(question + 1))
)[column] )[column]
...@@ -443,6 +466,48 @@ class GradePage(OpenAssessmentPage): ...@@ -443,6 +466,48 @@ class GradePage(OpenAssessmentPage):
return source.text.strip(), value.text.strip() return source.text.strip(), value.text.strip()
def feedback_entry(self, question, column):
"""
Returns the recorded feedback for a specific grade source.
Args:
question: the 0-based question for which to get grade information. Note that overall feedback can
be acquired by using 'feedback' for this parameter
column: the 0-based column of data within a question. Each column corresponds
to a source of data (for example, staff, peer, or self).
Returns: the recorded feedback for the requested grade source.
"""
if isinstance(question, int):
question = question + 1
self.wait_for_element_visibility(
self._bounded_selector('.question--{} .feedback__value'.format(question)),
"Feedback is visible",
)
feedback = self.q(
css=self._bounded_selector('.question--{} .feedback__value'.format(question))
)
return feedback[column].text.strip()
@property
def total_reported_answers(self):
"""
Returns the total number of reported answers. A "reported answer" is any option or feedback item for a
(criterion, assessment_type) pair. For example, if there are 2 criterion, each with options and feedback,
and 2 assessment types, the total number of reported answers will be 8 (2 for each of option/feedback, for
2 questions, for 2 assessment types = 2*2*2 = 8)
"""
return len(self.q(css=self._bounded_selector('.answer')))
@property
def number_scored_criteria(self):
"""
Returns the number of criteria with a score on the grade page.
"""
return len(self.q(css=self._bounded_selector('.question__score')))
class StaffAreaPage(OpenAssessmentPage, AssessmentMixin): class StaffAreaPage(OpenAssessmentPage, AssessmentMixin):
""" """
...@@ -519,7 +584,7 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin): ...@@ -519,7 +584,7 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin):
Clicks the staff grade control to expand staff grading section for use in staff required workflows. Clicks the staff grade control to expand staff grading section for use in staff required workflows.
""" """
self.q(css=self._bounded_selector(".staff__grade__show-form")).first.click() self.q(css=self._bounded_selector(".staff__grade__show-form")).first.click()
self.wait_for_element_visibility("#staff-full-grade__assessment__rubric__question--0__0", "staff grading is present") self.wait_for_element_visibility("#staff-full-grade__assessment__rubric__question--0", "staff grading is present")
@property @property
def available_checked_out_numbers(self): def available_checked_out_numbers(self):
...@@ -664,13 +729,25 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin): ...@@ -664,13 +729,25 @@ class StaffAreaPage(OpenAssessmentPage, AssessmentMixin):
Args: Args:
section: the classname of the section for which text should be returned section: the classname of the section for which text should be returned
(for example, 'peer__assessments', 'submitted__assessments', or 'self__assessment' (for example, 'peer__assessments', 'submitted__assessments', or 'self__assessments'
Returns: array of strings representing the text(for example, ['Good', u'5', u'5', u'Excellent', u'3', u'3']) Returns: array of strings representing the text(for example, ['Good', u'5', u'5', u'Excellent', u'3', u'3'])
""" """
return self._get_table_text(".staff-info__{} .staff-info__status__table .value".format(section)) return self._get_table_text(".staff-info__{} .staff-info__status__table .value".format(section))
def overall_feedback(self, section):
"""
Return the overall feedback (a string otherwise excluded from status_text) as shown in the staff area section.
Args:
section: the classname of the section for which text should be returned
(for example, 'peer__assessments', 'submitted__assessments', or 'self__assessments'
Returns: the text present in "Overall Feedback"
"""
return self.q(css=self._bounded_selector(".staff-info__{} .student__answer__display__content".format(section))).text[0]
def _get_table_text(self, selector): def _get_table_text(self, selector):
""" """
Helper method for getting text out of a table. Helper method for getting text out of a table.
......
...@@ -75,6 +75,9 @@ class OpenAssessmentTest(WebAppTest): ...@@ -75,6 +75,9 @@ class OpenAssessmentTest(WebAppTest):
'full_workflow_staff_required': 'full_workflow_staff_required':
u'courses/{test_course_id}/courseware/' u'courses/{test_course_id}/courseware/'
u'8d9584d242b44343bc270ea5ef04ab03/0b0dcc728abe45138c650732af178afb/'.format(test_course_id=TEST_COURSE_ID), u'8d9584d242b44343bc270ea5ef04ab03/0b0dcc728abe45138c650732af178afb/'.format(test_course_id=TEST_COURSE_ID),
'feedback_only':
u'courses/{test_course_id}/courseware/'
u'8d9584d242b44343bc270ea5ef04ab03/a2875e0db1454d0b94728b9a7b28000b/'.format(test_course_id=TEST_COURSE_ID),
} }
SUBMISSION = u"This is a test submission." SUBMISSION = u"This is a test submission."
...@@ -232,13 +235,16 @@ class OpenAssessmentTest(WebAppTest): ...@@ -232,13 +235,16 @@ class OpenAssessmentTest(WebAppTest):
self.staff_area_page.staff_assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED, "override") self.staff_area_page.staff_assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED, "override")
self.staff_area_page.verify_learner_final_score(final_score) self.staff_area_page.verify_learner_final_score(final_score)
def do_staff_assessment(self, number_to_assess=0, options_selected=OPTIONS_SELECTED): def do_staff_assessment(self, number_to_assess=0, options_selected=OPTIONS_SELECTED, feedback=None):
""" """
Use staff tools to assess available responses. Use staff tools to assess available responses.
Args: Args:
number_to_assess: the number of submissions to assess. If not provided (or 0), number_to_assess: the number of submissions to assess. If not provided (or 0),
will grade all available submissions. will grade all available submissions.
options_selected (dict): the options to choose when grading. Defaults to OPTIONS_SELECTED.
feedback (function(feedback_type)): if feedback is set, it will be used as a function that takes one
parameter to generate a feedback string.
""" """
self.staff_area_page.visit() self.staff_area_page.visit()
self.staff_area_page.click_staff_toolbar_button("staff-grading") self.staff_area_page.click_staff_toolbar_button("staff-grading")
...@@ -253,7 +259,11 @@ class OpenAssessmentTest(WebAppTest): ...@@ -253,7 +259,11 @@ class OpenAssessmentTest(WebAppTest):
assessed = 0 assessed = 0
while number_to_assess == 0 or assessed < number_to_assess: while number_to_assess == 0 or assessed < number_to_assess:
continue_after = False if number_to_assess-1 == assessed else ungraded > 0 continue_after = False if number_to_assess-1 == assessed else ungraded > 0
self.staff_area_page.staff_assess(options_selected, "full-grade", continue_after) if feedback:
self.staff_area_page.provide_criterion_feedback(feedback("criterion"))
self.staff_area_page.provide_overall_feedback(feedback("overall"))
if options_selected:
self.staff_area_page.staff_assess(options_selected, "full-grade", continue_after)
assessed += 1 assessed += 1
if not continue_after: if not continue_after:
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out-1)) self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out-1))
...@@ -1101,6 +1111,95 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1101,6 +1111,95 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")], [(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
]) ])
@ddt.ddt
class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Test for a problem that containing a criterion that only accepts feedback. Will make and verify self and staff assessments.
"""
def setUp(self):
super(FeedbackOnlyTest, self).setUp("feedback_only", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
def generate_feedback(self, assessment_type, feedback_type):
return "{}: {} feedback".format(assessment_type, feedback_type)
def assess_feedback(self, self_or_peer=""):
if self_or_peer != "self" and self_or_peer != "peer":
raise AssertionError("assert_feedback only works for self or peer assessments")
page = self.self_asmnt_page if self_or_peer == "self" else self.peer_asmnt_page
page.wait_for_page()
page.submit_assessment()
@retry()
@attr('acceptance')
def test_feedback_only(self):
# Make submission
user, pwd = self.do_submission()
# Make self assessment
self.self_asmnt_page.visit()
self.self_asmnt_page.wait_for_page()
self.self_asmnt_page.provide_criterion_feedback(self.generate_feedback("self", "criterion"))
self.self_asmnt_page.provide_overall_feedback(self.generate_feedback("self", "overall"))
self.self_asmnt_page.assess("self", [0])
self.self_asmnt_page.wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
# Staff assess all available submissions
self.do_staff_assessment(
options_selected = [0], # Select the 0-th option (Yes) on the single scored criterion
feedback=lambda feedback_type: self.generate_feedback("staff", feedback_type)
)
# Verify student-viewable grade report
self.refresh_page()
self.grade_page.wait_for_page()
self.assertEqual(self.grade_page.grade_entry(0, 0), (u'STAFF GRADE - 1 POINT', u'Yes')) # Reported answer 1
self.assertEqual(self.grade_page.grade_entry(0, 1), (u'YOUR SELF ASSESSMENT', u'Yes')) # Reported answer 2
for i, assessment_type in enumerate(["staff", "self"]):
# Criterion feedback first
expected = self.generate_feedback(assessment_type, "criterion")
actual = self.grade_page.feedback_entry(1, i)
self.assertEqual(actual, expected) # Reported answers 3 and 4
# Then overall
expected = self.generate_feedback(assessment_type, "overall")
actual = self.grade_page.feedback_entry("feedback", i)
self.assertEqual(actual, expected) # Reported answers 5 and 6
# Verify that no reported answers other than the 6 we already verified are present
self.assertEqual(self.grade_page.total_reported_answers, 6)
# Verify that the feedback-only criterion has no score
self.assertEqual(self.grade_page.number_scored_criteria, 1)
# Verify feedback appears from all assessments in staff tools
self.staff_area_page.show_learner(user)
self.staff_area_page.expand_learner_report_sections()
self.assertEqual(
self.staff_area_page.learner_final_score_table_headers,
[u'CRITERION', u'STAFF GRADE', u'SELF ASSESSMENT GRADE']
)
self.assertEqual(
self.staff_area_page.learner_final_score_table_values,
[u'Yes - 1 point', u'Yes', u'Feedback Recorded', u'Feedback Recorded']
)
self.assertEqual(
self.staff_area_page.status_text('staff__assessments')[5],
self.generate_feedback("staff", "criterion")
)
self.assertEqual(
self.staff_area_page.overall_feedback('staff__assessments'),
self.generate_feedback("staff", "overall")
)
self.assertEqual(
self.staff_area_page.status_text('self__assessments')[5],
self.generate_feedback("self", "criterion")
)
self.assertEqual(
self.staff_area_page.overall_feedback('self__assessments'),
self.generate_feedback("self", "overall")
)
# Verify correct score is shown
self.staff_area_page.verify_learner_final_score("Final grade: 1 out of 1")
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment