Commit be5940e6 by Nimisha Asthagiri Committed by Eric Fischer

Robust Grades refactor

Adds ENABLE_SUBSECTION_GRADES_SAVED feature flag to both lms and cms. Also
installs the wiring that will allow robust grades to be used for courses
that enable it. This functionality is still gated by the feature flag
and should not be used until the remaining robust grades work is finished.
parent e641c0dc
...@@ -100,6 +100,9 @@ class CourseMetadata(object): ...@@ -100,6 +100,9 @@ class CourseMetadata(object):
if not XBlockStudioConfigurationFlag.is_enabled(): if not XBlockStudioConfigurationFlag.is_enabled():
filtered_list.append('allow_unsupported_xblocks') filtered_list.append('allow_unsupported_xblocks')
if not settings.FEATURES.get('ENABLE_SUBSECTION_GRADES_SAVED'):
filtered_list.append('enable_subsection_grades_saved')
return filtered_list return filtered_list
@classmethod @classmethod
......
...@@ -208,6 +208,12 @@ FEATURES = { ...@@ -208,6 +208,12 @@ FEATURES = {
# Show Language selector # Show Language selector
'SHOW_LANGUAGE_SELECTOR': False, 'SHOW_LANGUAGE_SELECTOR': False,
# Temporary feature flag for disabling saving of subsection grades.
# There is also an advanced setting in the course module. The
# feature flag and the advanced setting must both be true for
# a course to use saved grades.
'ENABLE_SUBSECTION_GRADES_SAVED': False,
} }
ENABLE_JASMINE = False ENABLE_JASMINE = False
......
...@@ -801,6 +801,17 @@ class CourseFields(object): ...@@ -801,6 +801,17 @@ class CourseFields(object):
scope=Scope.settings scope=Scope.settings
) )
enable_subsection_grades_saved = Boolean(
display_name=_("Enable Subsection Grades Saved"),
help=_(
"Enter true or false. If this value is true, the robust "
"grades feature of saving subsection grades is enabled "
"for this course."
),
default=False,
scope=Scope.settings
)
learning_info = List( learning_info = List(
display_name=_("Course Learning Information"), display_name=_("Course Learning Information"),
help=_("Specify what student can learn from the course."), help=_("Specify what student can learn from the course."),
......
"""
Code used to calculate learner grades.
"""
from __future__ import division
import abc import abc
import inspect import inspect
import logging import logging
...@@ -13,6 +19,13 @@ log = logging.getLogger("edx.courseware") ...@@ -13,6 +19,13 @@ log = logging.getLogger("edx.courseware")
Score = namedtuple("Score", "earned possible graded section module_id") Score = namedtuple("Score", "earned possible graded section module_id")
def float_sum(iterable):
"""
Sum the elements of the iterable, and return the result as a float.
"""
return float(sum(iterable))
def aggregate_scores(scores, section_name="summary"): def aggregate_scores(scores, section_name="summary"):
""" """
scores: A list of Score objects scores: A list of Score objects
...@@ -20,11 +33,11 @@ def aggregate_scores(scores, section_name="summary"): ...@@ -20,11 +33,11 @@ def aggregate_scores(scores, section_name="summary"):
all_total: A Score representing the total score summed over all input scores all_total: A Score representing the total score summed over all input scores
graded_total: A Score representing the score summed over all graded input scores graded_total: A Score representing the score summed over all graded input scores
""" """
total_correct_graded = sum(score.earned for score in scores if score.graded) total_correct_graded = float_sum(score.earned for score in scores if score.graded)
total_possible_graded = sum(score.possible for score in scores if score.graded) total_possible_graded = float_sum(score.possible for score in scores if score.graded)
total_correct = sum(score.earned for score in scores) total_correct = float_sum(score.earned for score in scores)
total_possible = sum(score.possible for score in scores) total_possible = float_sum(score.possible for score in scores)
#regardless of whether or not it is graded #regardless of whether or not it is graded
all_total = Score( all_total = Score(
...@@ -207,7 +220,7 @@ class SingleSectionGrader(CourseGrader): ...@@ -207,7 +220,7 @@ class SingleSectionGrader(CourseGrader):
If the name is not appropriate for the short short_label or category, they each may If the name is not appropriate for the short short_label or category, they each may
be specified individually. be specified individually.
""" """
def __init__(self, type, name, short_label=None, category=None): def __init__(self, type, name, short_label=None, category=None): # pylint: disable=redefined-builtin
self.type = type self.type = type
self.name = name self.name = name
self.short_label = short_label or name self.short_label = short_label or name
...@@ -229,7 +242,7 @@ class SingleSectionGrader(CourseGrader): ...@@ -229,7 +242,7 @@ class SingleSectionGrader(CourseGrader):
earned = found_score.earned earned = found_score.earned
possible = found_score.possible possible = found_score.possible
percent = earned / float(possible) percent = earned / possible
detail = u"{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format( detail = u"{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(
name=self.name, name=self.name,
percent=percent, percent=percent,
...@@ -244,10 +257,11 @@ class SingleSectionGrader(CourseGrader): ...@@ -244,10 +257,11 @@ class SingleSectionGrader(CourseGrader):
breakdown = [{'percent': percent, 'label': self.short_label, breakdown = [{'percent': percent, 'label': self.short_label,
'detail': detail, 'category': self.category, 'prominent': True}] 'detail': detail, 'category': self.category, 'prominent': True}]
return {'percent': percent, return {
'section_breakdown': breakdown, 'percent': percent,
#No grade_breakdown here 'section_breakdown': breakdown,
} #No grade_breakdown here
}
class AssignmentFormatGrader(CourseGrader): class AssignmentFormatGrader(CourseGrader):
...@@ -284,8 +298,18 @@ class AssignmentFormatGrader(CourseGrader): ...@@ -284,8 +298,18 @@ class AssignmentFormatGrader(CourseGrader):
min_count = 2 would produce the labels "Assignment 3", "Assignment 4" min_count = 2 would produce the labels "Assignment 3", "Assignment 4"
""" """
def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, def __init__(
show_only_average=False, hide_average=False, starting_index=1): self,
type, # pylint: disable=redefined-builtin
min_count,
drop_count,
category=None,
section_type=None,
short_label=None,
show_only_average=False,
hide_average=False,
starting_index=1
):
self.type = type self.type = type
self.min_count = min_count self.min_count = min_count
self.drop_count = drop_count self.drop_count = drop_count
...@@ -330,7 +354,7 @@ class AssignmentFormatGrader(CourseGrader): ...@@ -330,7 +354,7 @@ class AssignmentFormatGrader(CourseGrader):
possible = scores[i].possible possible = scores[i].possible
section_name = scores[i].section section_name = scores[i].section
percentage = earned / float(possible) percentage = earned / possible
summary_format = u"{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})" summary_format = u"{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})"
summary = summary_format.format( summary = summary_format.format(
index=i + self.starting_index, index=i + self.starting_index,
...@@ -341,7 +365,7 @@ class AssignmentFormatGrader(CourseGrader): ...@@ -341,7 +365,7 @@ class AssignmentFormatGrader(CourseGrader):
possible=float(possible) possible=float(possible)
) )
else: else:
percentage = 0 percentage = 0.0
summary = u"{section_type} {index} Unreleased - 0% (?/?)".format( summary = u"{section_type} {index} Unreleased - 0% (?/?)".format(
index=i + self.starting_index, index=i + self.starting_index,
section_type=self.section_type section_type=self.section_type
...@@ -358,8 +382,12 @@ class AssignmentFormatGrader(CourseGrader): ...@@ -358,8 +382,12 @@ class AssignmentFormatGrader(CourseGrader):
total_percent, dropped_indices = total_with_drops(breakdown, self.drop_count) total_percent, dropped_indices = total_with_drops(breakdown, self.drop_count)
for dropped_index in dropped_indices: for dropped_index in dropped_indices:
breakdown[dropped_index]['mark'] = {'detail': u"The lowest {drop_count} {section_type} scores are dropped." breakdown[dropped_index]['mark'] = {
.format(drop_count=self.drop_count, section_type=self.section_type)} 'detail': u"The lowest {drop_count} {section_type} scores are dropped.".format(
drop_count=self.drop_count,
section_type=self.section_type
)
}
if len(breakdown) == 1: if len(breakdown) == 1:
# if there is only one entry in a section, suppress the existing individual entry and the average, # if there is only one entry in a section, suppress the existing individual entry and the average,
...@@ -386,7 +414,8 @@ class AssignmentFormatGrader(CourseGrader): ...@@ -386,7 +414,8 @@ class AssignmentFormatGrader(CourseGrader):
breakdown.append({'percent': total_percent, 'label': total_label, breakdown.append({'percent': total_percent, 'label': total_label,
'detail': total_detail, 'category': self.category, 'prominent': True}) 'detail': total_detail, 'category': self.category, 'prominent': True})
return {'percent': total_percent, return {
'section_breakdown': breakdown, 'percent': total_percent,
#No grade_breakdown here 'section_breakdown': breakdown,
} # No grade_breakdown here
}
...@@ -229,18 +229,18 @@ class TestFieldOverrideMongoPerformance(FieldOverridePerformanceTestCase): ...@@ -229,18 +229,18 @@ class TestFieldOverrideMongoPerformance(FieldOverridePerformanceTestCase):
# # of sql queries to default, # # of sql queries to default,
# # of mongo queries, # # of mongo queries,
# ) # )
('no_overrides', 1, True, False): (35, 6), ('no_overrides', 1, True, False): (21, 6),
('no_overrides', 2, True, False): (41, 6), ('no_overrides', 2, True, False): (21, 6),
('no_overrides', 3, True, False): (51, 6), ('no_overrides', 3, True, False): (21, 6),
('ccx', 1, True, False): (35, 6), ('ccx', 1, True, False): (21, 6),
('ccx', 2, True, False): (41, 6), ('ccx', 2, True, False): (21, 6),
('ccx', 3, True, False): (51, 6), ('ccx', 3, True, False): (21, 6),
('no_overrides', 1, False, False): (35, 6), ('no_overrides', 1, False, False): (21, 6),
('no_overrides', 2, False, False): (41, 6), ('no_overrides', 2, False, False): (21, 6),
('no_overrides', 3, False, False): (51, 6), ('no_overrides', 3, False, False): (21, 6),
('ccx', 1, False, False): (35, 6), ('ccx', 1, False, False): (21, 6),
('ccx', 2, False, False): (41, 6), ('ccx', 2, False, False): (21, 6),
('ccx', 3, False, False): (51, 6), ('ccx', 3, False, False): (21, 6),
} }
...@@ -252,19 +252,19 @@ class TestFieldOverrideSplitPerformance(FieldOverridePerformanceTestCase): ...@@ -252,19 +252,19 @@ class TestFieldOverrideSplitPerformance(FieldOverridePerformanceTestCase):
__test__ = True __test__ = True
TEST_DATA = { TEST_DATA = {
('no_overrides', 1, True, False): (35, 3), ('no_overrides', 1, True, False): (21, 3),
('no_overrides', 2, True, False): (41, 3), ('no_overrides', 2, True, False): (21, 3),
('no_overrides', 3, True, False): (51, 3), ('no_overrides', 3, True, False): (21, 3),
('ccx', 1, True, False): (35, 3), ('ccx', 1, True, False): (21, 3),
('ccx', 2, True, False): (41, 3), ('ccx', 2, True, False): (21, 3),
('ccx', 3, True, False): (51, 3), ('ccx', 3, True, False): (21, 3),
('ccx', 1, True, True): (36, 3), ('ccx', 1, True, True): (22, 3),
('ccx', 2, True, True): (42, 3), ('ccx', 2, True, True): (22, 3),
('ccx', 3, True, True): (52, 3), ('ccx', 3, True, True): (22, 3),
('no_overrides', 1, False, False): (35, 3), ('no_overrides', 1, False, False): (21, 3),
('no_overrides', 2, False, False): (41, 3), ('no_overrides', 2, False, False): (21, 3),
('no_overrides', 3, False, False): (51, 3), ('no_overrides', 3, False, False): (21, 3),
('ccx', 1, False, False): (35, 3), ('ccx', 1, False, False): (21, 3),
('ccx', 2, False, False): (41, 3), ('ccx', 2, False, False): (21, 3),
('ccx', 3, False, False): (51, 3), ('ccx', 3, False, False): (21, 3),
} }
...@@ -253,7 +253,7 @@ class TestCCXProgressChanges(CcxTestCase, LoginEnrollmentTestCase): ...@@ -253,7 +253,7 @@ class TestCCXProgressChanges(CcxTestCase, LoginEnrollmentTestCase):
grade_summary = progress_page_response.mako_context['courseware_summary'] # pylint: disable=no-member grade_summary = progress_page_response.mako_context['courseware_summary'] # pylint: disable=no-member
chapter = grade_summary[0] chapter = grade_summary[0]
section = chapter['sections'][0] section = chapter['sections'][0]
progress_page_due_date = section['due'].strftime("%Y-%m-%d %H:%M") progress_page_due_date = section.due.strftime("%Y-%m-%d %H:%M")
self.assertEqual(progress_page_due_date, due) self.assertEqual(progress_page_due_date, due)
@patch('ccx.views.render_to_response', intercept_renderer) @patch('ccx.views.render_to_response', intercept_renderer)
......
...@@ -268,7 +268,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl ...@@ -268,7 +268,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
ungraded problems, and is good for displaying a course summary with due dates, ungraded problems, and is good for displaying a course summary with due dates,
etc. etc.
""" """
return progress.summary(self.student_user, self.course).chapters return progress.summary(self.student_user, self.course).chapter_grades
def check_grade_percent(self, percent): def check_grade_percent(self, percent):
""" """
...@@ -299,8 +299,8 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl ...@@ -299,8 +299,8 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
sections_list.extend(chapter['sections']) sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one) # get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name) hw_section = next(section for section in sections_list if section.url_name == hw_url_name)
return [s.earned for s in hw_section['scores']] return [s.earned for s in hw_section.scores]
@attr('shard_3') @attr('shard_3')
...@@ -1168,7 +1168,7 @@ class TestConditionalContent(TestSubmittingProblems): ...@@ -1168,7 +1168,7 @@ class TestConditionalContent(TestSubmittingProblems):
self.assertEqual(self.score_for_hw('homework1'), [1.0]) self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), []) self.assertEqual(self.score_for_hw('homework2'), [])
self.assertEqual(self.earned_hw_scores(), [1.0, 0.0]) self.assertEqual(self.earned_hw_scores(), [1.0])
# Grade percent is .25. Here is the calculation. # Grade percent is .25. Here is the calculation.
homework_1_score = 1.0 / 2 homework_1_score = 1.0 / 2
......
...@@ -21,7 +21,7 @@ from django.test import TestCase ...@@ -21,7 +21,7 @@ from django.test import TestCase
from django.test.client import RequestFactory from django.test.client import RequestFactory
from django.test.client import Client from django.test.client import Client
from django.test.utils import override_settings from django.test.utils import override_settings
from mock import MagicMock, patch, create_autospec, Mock from mock import MagicMock, patch, create_autospec, PropertyMock
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from pytz import UTC from pytz import UTC
from xblock.core import XBlock from xblock.core import XBlock
...@@ -1255,8 +1255,8 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1255,8 +1255,8 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
@patch( @patch(
'lms.djangoapps.grades.course_grades.summary', 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}),
) )
def test_view_certificate_link(self): def test_view_certificate_link(self):
""" """
...@@ -1318,8 +1318,8 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1318,8 +1318,8 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False}) @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
@patch( @patch(
'lms.djangoapps.grades.course_grades.summary', 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
) )
def test_view_certificate_link_hidden(self): def test_view_certificate_link_hidden(self):
""" """
...@@ -1346,7 +1346,7 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1346,7 +1346,7 @@ class ProgressPageTests(ModuleStoreTestCase):
self.assertContains(resp, u"Download Your Certificate") self.assertContains(resp, u"Download Your Certificate")
@ddt.data( @ddt.data(
*itertools.product(((49, 4, True), (49, 4, False)), (True, False)) *itertools.product(((38, 4, True), (38, 4, False)), (True, False))
) )
@ddt.unpack @ddt.unpack
def test_query_counts(self, (sql_calls, mongo_calls, self_paced), self_paced_enabled): def test_query_counts(self, (sql_calls, mongo_calls, self_paced), self_paced_enabled):
...@@ -1359,9 +1359,10 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1359,9 +1359,10 @@ class ProgressPageTests(ModuleStoreTestCase):
) )
self.assertEqual(resp.status_code, 200) self.assertEqual(resp.status_code, 200)
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={ @patch(
'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': [] 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
})) PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
)
@ddt.data( @ddt.data(
*itertools.product( *itertools.product(
( (
...@@ -1398,8 +1399,8 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1398,8 +1399,8 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
@patch( @patch(
'lms.djangoapps.grades.course_grades.summary', 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
) )
def test_page_with_invalidated_certificate_with_html_view(self): def test_page_with_invalidated_certificate_with_html_view(self):
""" """
...@@ -1434,8 +1435,8 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1434,8 +1435,8 @@ class ProgressPageTests(ModuleStoreTestCase):
self.assert_invalidate_certificate(generated_certificate) self.assert_invalidate_certificate(generated_certificate)
@patch( @patch(
'lms.djangoapps.grades.course_grades.summary', 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
) )
def test_page_with_invalidated_certificate_with_pdf(self): def test_page_with_invalidated_certificate_with_pdf(self):
""" """
...@@ -1453,8 +1454,8 @@ class ProgressPageTests(ModuleStoreTestCase): ...@@ -1453,8 +1454,8 @@ class ProgressPageTests(ModuleStoreTestCase):
self.assert_invalidate_certificate(generated_certificate) self.assert_invalidate_certificate(generated_certificate)
@patch( @patch(
'lms.djangoapps.grades.course_grades.summary', 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
) )
def test_message_for_audit_mode(self): def test_message_for_audit_mode(self):
""" Verify that message appears on progress page, if learner is enrolled """ Verify that message appears on progress page, if learner is enrolled
...@@ -1642,19 +1643,22 @@ class IsCoursePassedTests(ModuleStoreTestCase): ...@@ -1642,19 +1643,22 @@ class IsCoursePassedTests(ModuleStoreTestCase):
# If user has not grade then false will return # If user has not grade then false will return
self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request)) self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request))
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'percent': 0.9})) @patch('lms.djangoapps.grades.new.course_grade.CourseGrade.summary', PropertyMock(return_value={'percent': 0.9}))
def test_user_pass_if_percent_appears_above_passing_point(self): def test_user_pass_if_percent_appears_above_passing_point(self):
# Mocking the grades.grade # Mocking the grades.grade
# If user has above passing marks then True will return # If user has above passing marks then True will return
self.assertTrue(views.is_course_passed(self.course, None, self.student, self.request)) self.assertTrue(views.is_course_passed(self.course, None, self.student, self.request))
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'percent': 0.2})) @patch('lms.djangoapps.grades.new.course_grade.CourseGrade.summary', PropertyMock(return_value={'percent': 0.2}))
def test_user_fail_if_percent_appears_below_passing_point(self): def test_user_fail_if_percent_appears_below_passing_point(self):
# Mocking the grades.grade # Mocking the grades.grade
# If user has below passing marks then False will return # If user has below passing marks then False will return
self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request)) self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request))
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'percent': SUCCESS_CUTOFF})) @patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'percent': SUCCESS_CUTOFF})
)
def test_user_with_passing_marks_and_achieved_marks_equal(self): def test_user_with_passing_marks_and_achieved_marks_equal(self):
# Mocking the grades.grade # Mocking the grades.grade
# If user's achieved passing marks are equal to the required passing # If user's achieved passing marks are equal to the required passing
...@@ -1688,7 +1692,10 @@ class GenerateUserCertTests(ModuleStoreTestCase): ...@@ -1688,7 +1692,10 @@ class GenerateUserCertTests(ModuleStoreTestCase):
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code) self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("Your certificate will be available when you pass the course.", resp.content) self.assertIn("Your certificate will be available when you pass the course.", resp.content)
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'grade': 'Pass', 'percent': 0.75})) @patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75})
)
@override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar") @override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar")
def test_user_with_passing_grade(self): def test_user_with_passing_grade(self):
# If user has above passing grading then json will return cert generating message and # If user has above passing grading then json will return cert generating message and
...@@ -1720,7 +1727,10 @@ class GenerateUserCertTests(ModuleStoreTestCase): ...@@ -1720,7 +1727,10 @@ class GenerateUserCertTests(ModuleStoreTestCase):
) )
mock_tracker.reset_mock() mock_tracker.reset_mock()
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'grade': 'Pass', 'percent': 0.75})) @patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75})
)
def test_user_with_passing_existing_generating_cert(self): def test_user_with_passing_existing_generating_cert(self):
# If user has passing grade but also has existing generating cert # If user has passing grade but also has existing generating cert
# then json will return cert generating message with bad request code # then json will return cert generating message with bad request code
...@@ -1734,7 +1744,10 @@ class GenerateUserCertTests(ModuleStoreTestCase): ...@@ -1734,7 +1744,10 @@ class GenerateUserCertTests(ModuleStoreTestCase):
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code) self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("Certificate is being created.", resp.content) self.assertIn("Certificate is being created.", resp.content)
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'grade': 'Pass', 'percent': 0.75})) @patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75})
)
@override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar") @override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar")
def test_user_with_passing_existing_downloadable_cert(self): def test_user_with_passing_existing_downloadable_cert(self):
# If user has already downloadable certificate # If user has already downloadable certificate
......
...@@ -41,12 +41,11 @@ import survey.views ...@@ -41,12 +41,11 @@ import survey.views
from lms.djangoapps.ccx.utils import prep_course_for_grading from lms.djangoapps.ccx.utils import prep_course_for_grading
from certificates import api as certs_api from certificates import api as certs_api
from certificates.models import CertificateStatuses from certificates.models import CertificateStatuses
from course_blocks.api import get_course_blocks
from openedx.core.djangoapps.models.course_details import CourseDetails from openedx.core.djangoapps.models.course_details import CourseDetails
from commerce.utils import EcommerceService from commerce.utils import EcommerceService
from enrollment.api import add_enrollment from enrollment.api import add_enrollment
from course_modes.models import CourseMode from course_modes.models import CourseMode
from lms.djangoapps.grades import course_grades, progress as grades_progress from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from courseware.access import has_access, has_ccx_coach_role, _adjust_start_date_for_beta_testers from courseware.access import has_access, has_ccx_coach_role, _adjust_start_date_for_beta_testers
from courseware.access_response import StartDateError from courseware.access_response import StartDateError
from courseware.access_utils import in_preview_mode from courseware.access_utils import in_preview_mode
...@@ -720,15 +719,14 @@ def _progress(request, course_key, student_id): ...@@ -720,15 +719,14 @@ def _progress(request, course_key, student_id):
# additional DB lookup (this kills the Progress page in particular). # additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id) student = User.objects.prefetch_related("groups").get(id=student.id)
# Fetch course blocks once for performance reasons course_grade = CourseGradeFactory(student).create(course)
course_structure = get_course_blocks(student, course.location) if not course_grade.has_access_to_course:
courseware_summary = grades_progress.summary(student, course, course_structure).chapters
if courseware_summary is None:
# This means the student didn't have access to the course (which the instructor requested) # This means the student didn't have access to the course (which the instructor requested)
raise Http404 raise Http404
grade_summary = course_grades.summary(student, course, course_structure=course_structure) courseware_summary = course_grade.chapter_grades
grade_summary = course_grade.summary
studio_url = get_studio_url(course, 'settings/grading') studio_url = get_studio_url(course, 'settings/grading')
# checking certificate generation configuration # checking certificate generation configuration
...@@ -1123,7 +1121,7 @@ def is_course_passed(course, grade_summary=None, student=None, request=None): ...@@ -1123,7 +1121,7 @@ def is_course_passed(course, grade_summary=None, student=None, request=None):
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None: if grade_summary is None:
grade_summary = course_grades.summary(student, course) grade_summary = CourseGradeFactory(student).create(course).summary
return success_cutoff and grade_summary['percent'] >= success_cutoff return success_cutoff and grade_summary['percent'] >= success_cutoff
......
""" """
Functionality for course-level grades. Functionality for course-level grades.
""" """
from collections import namedtuple
from logging import getLogger from logging import getLogger
from django.conf import settings
import dogstats_wrapper as dog_stats_api import dogstats_wrapper as dog_stats_api
import random
from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
from course_blocks.api import get_course_blocks
from courseware.courses import get_course_by_id from courseware.courses import get_course_by_id
from courseware.model_data import ScoresClient from .new.course_grade import CourseGradeFactory
from student.models import anonymous_id_for_user
from util.db import outer_atomic
from xmodule import graders, block_metadata_utils
from xmodule.graders import Score
from .context import grading_context
from .scores import get_score
log = getLogger(__name__) log = getLogger(__name__)
def iterate_grades_for(course_or_id, students, keep_raw_scores=False): GradeResult = namedtuple('StudentGrade', ['student', 'gradeset', 'err_msg'])
"""Given a course_id and an iterable of students (User), yield a tuple of:
def iterate_grades_for(course_or_id, students):
"""
Given a course_id and an iterable of students (User), yield a GradeResult
for every student enrolled in the course. GradeResult is a named tuple of:
(student, gradeset, err_msg) for every student enrolled in the course. (student, gradeset, err_msg)
If an error occurred, gradeset will be an empty dict and err_msg will be an If an error occurred, gradeset will be an empty dict and err_msg will be an
exception message. If there was no error, err_msg is an empty string. exception message. If there was no error, err_msg is an empty string.
...@@ -50,8 +45,8 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False): ...@@ -50,8 +45,8 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
for student in students: for student in students:
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]): with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
try: try:
gradeset = summary(student, course, keep_raw_scores) gradeset = summary(student, course)
yield student, gradeset, "" yield GradeResult(student, gradeset, "")
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
# Keep marching on even if this student couldn't be graded for # Keep marching on even if this student couldn't be graded for
# some reason, but log it for future reference. # some reason, but log it for future reference.
...@@ -62,195 +57,13 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False): ...@@ -62,195 +57,13 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
course.id, course.id,
exc.message exc.message
) )
yield student, {}, exc.message yield GradeResult(student, {}, exc.message)
def summary(student, course, keep_raw_scores=False, course_structure=None): def summary(student, course):
""" """
Returns the grade summary of the student for the given course. Returns the grade summary of the student for the given course.
Also sends a signal to update the minimum grade requirement status. Also sends a signal to update the minimum grade requirement status.
""" """
grade_summary = _summary(student, course, keep_raw_scores, course_structure) return CourseGradeFactory(student).create(course).summary
responses = GRADES_UPDATED.send_robust(
sender=None,
username=student.username,
grade_summary=grade_summary,
course_key=course.id,
deadline=course.end
)
for receiver, response in responses:
log.info('Signal fired when student grade is calculated. Receiver: %s. Response: %s', receiver, response)
return grade_summary
def _summary(student, course, keep_raw_scores, course_structure=None):
"""
This grades a student as quickly as possible. It returns the
output from the course grader, augmented with the final letter
grade. The keys in the output are:
- course: a CourseDescriptor
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
for every graded module
More information on the format is in the docstring for CourseGrader.
"""
if course_structure is None:
course_structure = get_course_blocks(student, course.location)
grading_context_result = grading_context(course_structure)
scorable_locations = [block.location for block in grading_context_result['all_graded_blocks']]
with outer_atomic():
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
# We need to import this here to avoid a circular dependency of the form:
# XBlock --> submissions --> Django Rest Framework error strings -->
# Django translation --> ... --> courseware --> submissions
from submissions import api as sub_api # installed from the edx-submissions repository
with outer_atomic():
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(),
anonymous_id_for_user(student, course.id)
)
totaled_scores, raw_scores = _calculate_totaled_scores(
student, grading_context_result, submissions_scores, scores_client, keep_raw_scores
)
with outer_atomic():
# Grading policy might be overriden by a CCX, need to reset it
course.set_grading_policy(course.grading_policy)
grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)
# We round the grade here, to make sure that the grade is a whole percentage and
# doesn't get displayed differently than it gets grades
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
letter_grade = _letter_grade(course.grade_cutoffs, grade_summary['percent'])
grade_summary['grade'] = letter_grade
grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging
if keep_raw_scores:
# way to get all RAW scores out to instructor
# so grader can be double-checked
grade_summary['raw_scores'] = raw_scores
return grade_summary
def _calculate_totaled_scores(
student,
grading_context_result,
submissions_scores,
scores_client,
keep_raw_scores,
):
"""
Returns a tuple of totaled scores and raw scores, which can be passed to the grader.
"""
raw_scores = []
totaled_scores = {}
for section_format, sections in grading_context_result['all_graded_sections'].iteritems():
format_scores = []
for section_info in sections:
section = section_info['section_block']
section_name = block_metadata_utils.display_name_with_default(section)
with outer_atomic():
# Check to
# see if any of our locations are in the scores from the submissions
# API. If scores exist, we have to calculate grades for this section.
should_grade_section = any(
unicode(descendant.location) in submissions_scores
for descendant in section_info['scored_descendants']
)
if not should_grade_section:
should_grade_section = any(
descendant.location in scores_client
for descendant in section_info['scored_descendants']
)
# If we haven't seen a single problem in the section, we don't have
# to grade it at all! We can assume 0%
if should_grade_section:
scores = []
for descendant in section_info['scored_descendants']:
(correct, total) = get_score(
student,
descendant,
scores_client,
submissions_scores,
)
if correct is None and total is None:
continue
if settings.GENERATE_PROFILE_SCORES: # for debugging!
if total > 1:
correct = random.randrange(max(total - 2, 1), total + 1)
else:
correct = total
graded = descendant.graded
if not total > 0:
# We simply cannot grade a problem that is 12/0, because we might need it as a percentage
graded = False
scores.append(
Score(
correct,
total,
graded,
block_metadata_utils.display_name_with_default_escaped(descendant),
descendant.location
)
)
__, graded_total = graders.aggregate_scores(scores, section_name)
if keep_raw_scores:
raw_scores += scores
else:
graded_total = Score(0.0, 1.0, True, section_name, None)
# Add the graded total to totaled_scores
if graded_total.possible > 0:
format_scores.append(graded_total)
else:
log.info(
"Unable to grade a section with a total possible score of zero. " +
str(section.location)
)
totaled_scores[section_format] = format_scores
return totaled_scores, raw_scores
def _letter_grade(grade_cutoffs, percentage):
"""
Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None.
Arguments
- grade_cutoffs is a dictionary mapping a grade to the lowest
possible percentage to earn that grade.
- percentage is the final percent across all problems in a course
"""
letter_grade = None
# Possible grades, sorted in descending order of score
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
for possible_grade in descending_grades:
if percentage >= grade_cutoffs[possible_grade]:
letter_grade = possible_grade
break
return letter_grade
""" """
Functionality for module-level grades. Functionality for module-level grades.
""" """
# TODO The code in this file needs to be updated to use BlockTransformers. (TNL-4448) # TODO The score computation in this file is not accurate
# TODO The code here needs to be validated - may not be calculating correctly. # since it is summing percentages instead of computing a
# final percentage of the individual sums.
# Regardless, this file and its code should be removed soon
# as part of TNL-5062.
from django.test.client import RequestFactory from django.test.client import RequestFactory
from courseware.model_data import FieldDataCache, ScoresClient from courseware.model_data import FieldDataCache, ScoresClient
......
"""
CourseGrade Class
"""
from collections import defaultdict
from django.conf import settings
from lazy import lazy
from logging import getLogger
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
from xmodule import block_metadata_utils
from .subsection_grade import SubsectionGradeFactory
log = getLogger(__name__)
class CourseGrade(object):
"""
Course Grade class
"""
def __init__(self, student, course, course_structure):
self.student = student
self.course = course
self.course_structure = course_structure
self.chapter_grades = []
@lazy
def subsection_grade_totals_by_format(self):
"""
Returns grades for the subsections in the course in
a dict keyed by subsection format types.
"""
subsections_by_format = defaultdict(list)
for chapter in self.chapter_grades:
for subsection_grade in chapter['sections']:
if subsection_grade.graded:
graded_total = subsection_grade.graded_total
if graded_total.possible > 0:
subsections_by_format[subsection_grade.format].append(graded_total)
return subsections_by_format
@lazy
def locations_to_scores(self):
"""
Returns a dict of problem scores keyed by their locations.
"""
locations_to_scores = {}
for chapter in self.chapter_grades:
for subsection_grade in chapter['sections']:
locations_to_scores.update(subsection_grade.locations_to_scores)
return locations_to_scores
@property
def has_access_to_course(self):
"""
Returns whether the course structure as seen by the
given student is non-empty.
"""
return len(self.course_structure) > 0
@lazy
def summary(self):
"""
Returns the grade summary as calculated by the course's grader.
"""
# Grading policy might be overriden by a CCX, need to reset it
self.course.set_grading_policy(self.course.grading_policy)
grade_summary = self.course.grader.grade(
self.subsection_grade_totals_by_format,
generate_random_scores=settings.GENERATE_PROFILE_SCORES
)
# We round the grade here, to make sure that the grade is a whole percentage and
# doesn't get displayed differently than it gets grades
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
grade_summary['grade'] = self._compute_letter_grade(grade_summary['percent'])
grade_summary['totaled_scores'] = self.subsection_grade_totals_by_format
grade_summary['raw_scores'] = list(self.locations_to_scores.itervalues())
return grade_summary
def compute(self):
"""
Computes the grade for the given student and course.
"""
subsection_grade_factory = SubsectionGradeFactory(self.student)
for chapter_key in self.course_structure.get_children(self.course.location):
chapter = self.course_structure[chapter_key]
subsection_grades = []
for subsection_key in self.course_structure.get_children(chapter_key):
subsection_grades.append(
subsection_grade_factory.create(
self.course_structure[subsection_key],
self.course_structure, self.course
)
)
self.chapter_grades.append({
'display_name': block_metadata_utils.display_name_with_default_escaped(chapter),
'url_name': block_metadata_utils.url_name_for_block(chapter),
'sections': subsection_grades
})
self._signal_listeners_when_grade_computed()
def score_for_module(self, location):
"""
Calculate the aggregate weighted score for any location in the course.
This method returns a tuple containing (earned_score, possible_score).
If the location is of 'problem' type, this method will return the
possible and earned scores for that problem. If the location refers to a
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.locations_to_scores:
score = self.locations_to_scores[location]
return score.earned, score.possible
children = self.course_structure.get_children(location)
earned = 0.0
possible = 0.0
for child in children:
child_earned, child_possible = self.score_for_module(child)
earned += child_earned
possible += child_possible
return earned, possible
def _compute_letter_grade(self, percentage):
"""
Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None.
Arguments
- grade_cutoffs is a dictionary mapping a grade to the lowest
possible percentage to earn that grade.
- percentage is the final percent across all problems in a course
"""
letter_grade = None
grade_cutoffs = self.course.grade_cutoffs
# Possible grades, sorted in descending order of score
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
for possible_grade in descending_grades:
if percentage >= grade_cutoffs[possible_grade]:
letter_grade = possible_grade
break
return letter_grade
def _signal_listeners_when_grade_computed(self):
"""
Signal all listeners when grades are computed.
"""
responses = GRADES_UPDATED.send_robust(
sender=None,
username=self.student.username,
grade_summary=self.summary,
course_key=self.course.id,
deadline=self.course.end
)
for receiver, response in responses:
log.info(
'Signal fired when student grade is calculated. Receiver: %s. Response: %s',
receiver, response
)
class CourseGradeFactory(object):
"""
Factory class to create Course Grade objects
"""
def __init__(self, student):
self.student = student
def create(self, course):
"""
Returns the CourseGrade object for the given student and course.
"""
course_structure = get_course_blocks(self.student, course.location)
return (
self._get_saved_grade(course, course_structure) or
self._compute_and_update_grade(course, course_structure)
)
def _compute_and_update_grade(self, course, course_structure):
"""
Freshly computes and updates the grade for the student and course.
"""
course_grade = CourseGrade(self.student, course, course_structure)
course_grade.compute()
return course_grade
def _get_saved_grade(self, course, course_structure): # pylint: disable=unused-argument
"""
Returns the saved grade for the given course and student.
"""
if settings.FEATURES.get('ENABLE_SUBSECTION_GRADES_SAVED') and course.enable_subsection_grades_saved:
# TODO LATER Retrieve the saved grade for the course, if it exists.
_pretend_to_save_course_grades()
def _pretend_to_save_course_grades():
"""
Stub to facilitate testing feature flag until robust grade work lands.
"""
pass
"""
SubsectionGrade Class
"""
from collections import OrderedDict
from lazy import lazy
from django.conf import settings
from courseware.model_data import ScoresClient
from lms.djangoapps.grades.scores import get_score, possibly_scored
from student.models import anonymous_id_for_user
from submissions import api as submissions_api
from xmodule import block_metadata_utils, graders
from xmodule.graders import Score
class SubsectionGrade(object):
"""
Class for Subsection Grades.
"""
def __init__(self, subsection):
self.location = subsection.location
self.display_name = block_metadata_utils.display_name_with_default_escaped(subsection)
self.url_name = block_metadata_utils.url_name_for_block(subsection)
self.format = getattr(subsection, 'format', '')
self.due = getattr(subsection, 'due', None)
self.graded = getattr(subsection, 'graded', False)
self.graded_total = None # aggregated grade for all graded problems
self.all_total = None # aggregated grade for all problems, regardless of whether they are graded
self.locations_to_scores = OrderedDict() # dict of problem locations to their Score objects
@lazy
def scores(self):
"""
List of all problem scores in the subsection.
"""
return list(self.locations_to_scores.itervalues())
def compute(self, student, course_structure, scores_client, submissions_scores):
"""
Compute the grade of this subsection for the given student and course.
"""
for descendant_key in course_structure.post_order_traversal(
filter_func=possibly_scored,
start_node=self.location,
):
descendant = course_structure[descendant_key]
if not getattr(descendant, 'has_score', False):
continue
(earned, possible) = get_score(
student,
descendant,
scores_client,
submissions_scores,
)
if earned is None and possible is None:
continue
# cannot grade a problem with a denominator of 0
descendant_graded = descendant.graded if possible > 0 else False
self.locations_to_scores[descendant.location] = Score(
earned,
possible,
descendant_graded,
block_metadata_utils.display_name_with_default_escaped(descendant),
descendant.location,
)
self.all_total, self.graded_total = graders.aggregate_scores(
self.scores, self.display_name,
)
class SubsectionGradeFactory(object):
"""
Factory for Subsection Grades.
"""
def __init__(self, student):
self.student = student
self._scores_client = None
self._submissions_scores = None
def create(self, subsection, course_structure, course):
"""
Returns the SubsectionGrade object for the student and subsection.
"""
return (
self._get_saved_grade(subsection, course) or
self._compute_and_update_grade(subsection, course_structure, course)
)
def _compute_and_update_grade(self, subsection, course_structure, course):
"""
Freshly computes and updates the grade for the student and subsection.
"""
self._prefetch_scores(course_structure, course)
subsection_grade = SubsectionGrade(subsection)
subsection_grade.compute(self.student, course_structure, self._scores_client, self._submissions_scores)
self._update_saved_grade(subsection_grade, subsection, course)
return subsection_grade
def _get_saved_grade(self, subsection, course): # pylint: disable=unused-argument
"""
Returns the saved grade for the given course and student.
"""
if settings.FEATURES.get('ENABLE_SUBSECTION_GRADES_SAVED') and course.enable_subsection_grades_saved:
# TODO Retrieve the saved grade for the subsection, if it exists.
pass
def _update_saved_grade(self, subsection_grade, subsection, course): # pylint: disable=unused-argument
"""
Returns the saved grade for the given course and student.
"""
if settings.FEATURES.get('ENABLE_SUBSECTION_GRADES_SAVED') and course.enable_subsection_grades_saved:
# TODO Update the saved grade for the subsection.
_pretend_to_save_subsection_grades()
def _prefetch_scores(self, course_structure, course):
"""
Returns the prefetched scores for the given student and course.
"""
if not self._scores_client:
scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]
self._scores_client = ScoresClient.create_for_locations(
course.id, self.student.id, scorable_locations
)
self._submissions_scores = submissions_api.get_scores(
unicode(course.id), anonymous_id_for_user(self.student, course.id)
)
def _pretend_to_save_subsection_grades():
"""
Stub to facilitate testing feature flag until robust grade work lands.
"""
pass
""" """
Progress Summary of a learner's course grades. Progress Summary of a learner's course grades.
""" """
from course_blocks.api import get_course_blocks from .new.course_grade import CourseGradeFactory
from courseware.model_data import ScoresClient
from openedx.core.lib.gating import api as gating_api
from student.models import anonymous_id_for_user
from util.db import outer_atomic
from xmodule import graders, block_metadata_utils
from xmodule.graders import Score
from .scores import get_score, possibly_scored
def summary(student, course):
class ProgressSummary(object):
"""
Wrapper class for the computation of a user's scores across a course.
Attributes
chapters: a summary of all sections with problems in the course. It is
organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded
and ungraded problems, and is good for displaying a course summary with
due dates, etc.
weighted_scores: a dictionary mapping module locations to weighted Score
objects.
locations_to_children: a function mapping locations to their
direct descendants.
"""
def __init__(self, chapters=None, weighted_scores=None, locations_to_children=None):
self.chapters = chapters
self.weighted_scores = weighted_scores
self.locations_to_children = locations_to_children
def score_for_module(self, location):
"""
Calculate the aggregate weighted score for any location in the course.
This method returns a tuple containing (earned_score, possible_score).
If the location is of 'problem' type, this method will return the
possible and earned scores for that problem. If the location refers to a
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.weighted_scores:
score = self.weighted_scores[location]
return score.earned, score.possible
children = self.locations_to_children[location]
earned = 0.0
possible = 0.0
for child in children:
child_earned, child_possible = self.score_for_module(child)
earned += child_earned
possible += child_possible
return earned, possible
def summary(student, course, course_structure=None):
""" """
This pulls a summary of all problems in the course. Returns the CourseGrade for the given course and student.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
- None if the student does not have access to load the course module.
Arguments:
student: A User object for the student to grade
course: A Descriptor containing the course to grade
""" """
if course_structure is None: return CourseGradeFactory(student).create(course)
course_structure = get_course_blocks(student, course.location)
if not len(course_structure):
return ProgressSummary()
scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]
with outer_atomic():
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
# We need to import this here to avoid a circular dependency of the form:
# XBlock --> submissions --> Django Rest Framework error strings -->
# Django translation --> ... --> courseware --> submissions
from submissions import api as sub_api # installed from the edx-submissions repository
with outer_atomic():
submissions_scores = sub_api.get_scores(
unicode(course.id), anonymous_id_for_user(student, course.id)
)
# Check for gated content
gated_content = gating_api.get_gated_content(course, student)
chapters = []
locations_to_weighted_scores = {}
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
chapter = course_structure[chapter_key]
sections = []
for section_key in course_structure.get_children(chapter_key):
if unicode(section_key) in gated_content:
continue
section = course_structure[section_key]
graded = getattr(section, 'graded', False)
scores = []
for descendant_key in course_structure.post_order_traversal(
filter_func=possibly_scored,
start_node=section_key,
):
descendant = course_structure[descendant_key]
(correct, total) = get_score(
student,
descendant,
scores_client,
submissions_scores,
)
if correct is None and total is None:
continue
weighted_location_score = Score(
correct,
total,
graded,
block_metadata_utils.display_name_with_default_escaped(descendant),
descendant.location
)
scores.append(weighted_location_score)
locations_to_weighted_scores[descendant.location] = weighted_location_score
escaped_section_name = block_metadata_utils.display_name_with_default_escaped(section)
section_total, _ = graders.aggregate_scores(scores, escaped_section_name)
sections.append({
'display_name': escaped_section_name,
'url_name': block_metadata_utils.url_name_for_block(section),
'scores': scores,
'section_total': section_total,
'format': getattr(section, 'format', ''),
'due': getattr(section, 'due', None),
'graded': graded,
})
chapters.append({
'course': course.display_name_with_default_escaped,
'display_name': block_metadata_utils.display_name_with_default_escaped(chapter),
'url_name': block_metadata_utils.url_name_for_block(chapter),
'sections': sections
})
return ProgressSummary(chapters, locations_to_weighted_scores, course_structure.get_children)
...@@ -7,7 +7,7 @@ from .transformer import GradesTransformer ...@@ -7,7 +7,7 @@ from .transformer import GradesTransformer
@memoized @memoized
def block_types_with_scores(): def block_types_possibly_scored():
""" """
Returns the block types that could have a score. Returns the block types that could have a score.
...@@ -27,20 +27,20 @@ def possibly_scored(usage_key): ...@@ -27,20 +27,20 @@ def possibly_scored(usage_key):
""" """
Returns whether the given block could impact grading (i.e. scored, or has children). Returns whether the given block could impact grading (i.e. scored, or has children).
""" """
return usage_key.block_type in block_types_with_scores() return usage_key.block_type in block_types_possibly_scored()
def weighted_score(raw_correct, raw_total, weight): def weighted_score(raw_earned, raw_possible, weight):
"""Return a tuple that represents the weighted (correct, total) score.""" """Return a tuple that represents the weighted (correct, total) score."""
# If there is no weighting, or weighting can't be applied, return input. # If there is no weighting, or weighting can't be applied, return input.
if weight is None or raw_total == 0: if weight is None or raw_possible == 0:
return (raw_correct, raw_total) return (raw_earned, raw_possible)
return (float(raw_correct) * weight / raw_total, float(weight)) return (float(raw_earned) * weight / raw_possible, float(weight))
def get_score(user, block, scores_client, submissions_scores_cache): def get_score(user, block, scores_client, submissions_scores_cache):
""" """
Return the score for a user on a problem, as a tuple (correct, total). Return the score for a user on a problem, as a tuple (earned, possible).
e.g. (5,7) if you got 5 out of 7 points. e.g. (5,7) if you got 5 out of 7 points.
If this problem doesn't have a score, or we couldn't load it, returns (None, If this problem doesn't have a score, or we couldn't load it, returns (None,
...@@ -73,17 +73,17 @@ def get_score(user, block, scores_client, submissions_scores_cache): ...@@ -73,17 +73,17 @@ def get_score(user, block, scores_client, submissions_scores_cache):
score = scores_client.get(block.location) score = scores_client.get(block.location)
if score and score.total is not None: if score and score.total is not None:
# We have a valid score, just use it. # We have a valid score, just use it.
correct = score.correct if score.correct is not None else 0.0 earned = score.correct if score.correct is not None else 0.0
total = score.total possible = score.total
else: else:
# This means we don't have a valid score entry and we don't have a # This means we don't have a valid score entry and we don't have a
# cached_max_score on hand. We know they've earned 0.0 points on this. # cached_max_score on hand. We know they've earned 0.0 points on this.
correct = 0.0 earned = 0.0
total = block.transformer_data[GradesTransformer].max_score possible = block.transformer_data[GradesTransformer].max_score
# Problem may be an error module (if something in the problem builder failed) # Problem may be an error module (if something in the problem builder failed)
# In which case total might be None # In which case possible might be None
if total is None: if possible is None:
return (None, None) return (None, None)
return weighted_score(correct, total, block.weight) return weighted_score(earned, possible, block.weight)
""" """
Test grade calculation. Test grade calculation.
""" """
import ddt
from django.conf import settings
from django.http import Http404 from django.http import Http404
from django.test import TestCase from django.test import TestCase
from mock import patch, MagicMock from mock import patch, MagicMock
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.module_render import get_module from courseware.module_render import get_module
from courseware.model_data import FieldDataCache, set_score from courseware.model_data import FieldDataCache, set_score
from courseware.tests.helpers import ( from courseware.tests.helpers import (
LoginEnrollmentTestCase, LoginEnrollmentTestCase,
get_request_for_user get_request_for_user
) )
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory from lms.djangoapps.course_blocks.api import get_course_blocks
from student.tests.factories import UserFactory from student.tests.factories import UserFactory
from student.models import CourseEnrollment from student.models import CourseEnrollment
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
...@@ -24,10 +27,11 @@ from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase ...@@ -24,10 +27,11 @@ from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from .. import course_grades from .. import course_grades
from ..course_grades import summary as grades_summary from ..course_grades import summary as grades_summary
from ..module_grades import get_module_score from ..module_grades import get_module_score
from ..progress import ProgressSummary from ..new.course_grade import CourseGrade, CourseGradeFactory
from ..new.subsection_grade import SubsectionGradeFactory
def _grade_with_errors(student, course, keep_raw_scores=False): def _grade_with_errors(student, course):
"""This fake grade method will throw exceptions for student3 and """This fake grade method will throw exceptions for student3 and
student4, but allow any other students to go through normal grading. student4, but allow any other students to go through normal grading.
...@@ -38,7 +42,7 @@ def _grade_with_errors(student, course, keep_raw_scores=False): ...@@ -38,7 +42,7 @@ def _grade_with_errors(student, course, keep_raw_scores=False):
if student.username in ['student3', 'student4']: if student.username in ['student3', 'student4']:
raise Exception("I don't like {}".format(student.username)) raise Exception("I don't like {}".format(student.username))
return grades_summary(student, course, keep_raw_scores=keep_raw_scores) return grades_summary(student, course)
@attr('shard_1') @attr('shard_1')
...@@ -200,9 +204,11 @@ class TestProgressSummary(TestCase): ...@@ -200,9 +204,11 @@ class TestProgressSummary(TestCase):
self.loc_k: [], self.loc_k: [],
self.loc_m: [], self.loc_m: [],
} }
self.progress_summary = ProgressSummary(
None, weighted_scores, locations_to_scored_children course_structure = MagicMock()
) course_structure.get_children = lambda location: locations_to_scored_children[location]
self.course_grade = CourseGrade(student=None, course=None, course_structure=course_structure)
self.course_grade.locations_to_scores = weighted_scores
def create_score(self, earned, possible): def create_score(self, earned, possible):
""" """
...@@ -222,51 +228,135 @@ class TestProgressSummary(TestCase): ...@@ -222,51 +228,135 @@ class TestProgressSummary(TestCase):
) )
def test_score_chapter(self): def test_score_chapter(self):
earned, possible = self.progress_summary.score_for_module(self.loc_a) earned, possible = self.course_grade.score_for_module(self.loc_a)
self.assertEqual(earned, 9) self.assertEqual(earned, 9)
self.assertEqual(possible, 24) self.assertEqual(possible, 24)
def test_score_section_many_leaves(self): def test_score_section_many_leaves(self):
earned, possible = self.progress_summary.score_for_module(self.loc_b) earned, possible = self.course_grade.score_for_module(self.loc_b)
self.assertEqual(earned, 6) self.assertEqual(earned, 6)
self.assertEqual(possible, 14) self.assertEqual(possible, 14)
def test_score_section_one_leaf(self): def test_score_section_one_leaf(self):
earned, possible = self.progress_summary.score_for_module(self.loc_c) earned, possible = self.course_grade.score_for_module(self.loc_c)
self.assertEqual(earned, 3) self.assertEqual(earned, 3)
self.assertEqual(possible, 10) self.assertEqual(possible, 10)
def test_score_vertical_two_leaves(self): def test_score_vertical_two_leaves(self):
earned, possible = self.progress_summary.score_for_module(self.loc_d) earned, possible = self.course_grade.score_for_module(self.loc_d)
self.assertEqual(earned, 5) self.assertEqual(earned, 5)
self.assertEqual(possible, 10) self.assertEqual(possible, 10)
def test_score_vertical_two_leaves_one_unscored(self): def test_score_vertical_two_leaves_one_unscored(self):
earned, possible = self.progress_summary.score_for_module(self.loc_e) earned, possible = self.course_grade.score_for_module(self.loc_e)
self.assertEqual(earned, 1) self.assertEqual(earned, 1)
self.assertEqual(possible, 4) self.assertEqual(possible, 4)
def test_score_vertical_no_score(self): def test_score_vertical_no_score(self):
earned, possible = self.progress_summary.score_for_module(self.loc_f) earned, possible = self.course_grade.score_for_module(self.loc_f)
self.assertEqual(earned, 0) self.assertEqual(earned, 0)
self.assertEqual(possible, 0) self.assertEqual(possible, 0)
def test_score_vertical_one_leaf(self): def test_score_vertical_one_leaf(self):
earned, possible = self.progress_summary.score_for_module(self.loc_g) earned, possible = self.course_grade.score_for_module(self.loc_g)
self.assertEqual(earned, 3) self.assertEqual(earned, 3)
self.assertEqual(possible, 10) self.assertEqual(possible, 10)
def test_score_leaf(self): def test_score_leaf(self):
earned, possible = self.progress_summary.score_for_module(self.loc_h) earned, possible = self.course_grade.score_for_module(self.loc_h)
self.assertEqual(earned, 2) self.assertEqual(earned, 2)
self.assertEqual(possible, 5) self.assertEqual(possible, 5)
def test_score_leaf_no_score(self): def test_score_leaf_no_score(self):
earned, possible = self.progress_summary.score_for_module(self.loc_m) earned, possible = self.course_grade.score_for_module(self.loc_m)
self.assertEqual(earned, 0) self.assertEqual(earned, 0)
self.assertEqual(possible, 0) self.assertEqual(possible, 0)
@ddt.ddt
class TestCourseGradeFactory(SharedModuleStoreTestCase):
"""
Test that CourseGrades are calculated properly
"""
@classmethod
def setUpClass(cls):
super(TestCourseGradeFactory, cls).setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(
parent=cls.course,
category="chapter",
display_name="Test Chapter"
)
cls.sequence = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Test Sequential 1",
graded=True
)
cls.vertical = ItemFactory.create(
parent=cls.sequence,
category='vertical',
display_name='Test Vertical 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
cls.problem = ItemFactory.create(
parent=cls.vertical,
category="problem",
display_name="Test Problem",
data=problem_xml
)
def setUp(self):
"""
Set up test course
"""
super(TestCourseGradeFactory, self).setUp()
self.request = get_request_for_user(UserFactory())
self.client.login(username=self.request.user.username, password="test")
CourseEnrollment.enroll(self.request.user, self.course.id)
@ddt.data(
(True, True),
(True, False),
(False, True),
(False, False),
)
@ddt.unpack
def test_course_grade_feature_gating(self, feature_flag, course_setting):
# Grades are only saved if the feature flag and the advanced setting are
# both set to True.
grade_factory = CourseGradeFactory(self.request.user)
with patch('lms.djangoapps.grades.new.course_grade._pretend_to_save_course_grades') as mock_save_grades:
with patch.dict(settings.FEATURES, {'ENABLE_SUBSECTION_GRADES_SAVED': feature_flag}):
with patch.object(self.course, 'enable_subsection_grades_saved', new=course_setting):
grade_factory.create(self.course)
self.assertEqual(mock_save_grades.called, feature_flag and course_setting)
@ddt.data(
(True, True),
(True, False),
(False, True),
(False, False),
)
@ddt.unpack
def test_subsection_grade_feature_gating(self, feature_flag, course_setting):
# Grades are only saved if the feature flag and the advanced setting are
# both set to True.
grade_factory = SubsectionGradeFactory(self.request.user)
course_structure = get_course_blocks(self.request.user, self.course.location)
with patch(
'lms.djangoapps.grades.new.subsection_grade._pretend_to_save_subsection_grades'
) as mock_save_grades:
with patch.dict(settings.FEATURES, {'ENABLE_SUBSECTION_GRADES_SAVED': feature_flag}):
with patch.object(self.course, 'enable_subsection_grades_saved', new=course_setting):
grade_factory.create(self.sequence, course_structure, self.course)
self.assertEqual(mock_save_grades.called, feature_flag and course_setting)
class TestGetModuleScore(LoginEnrollmentTestCase, SharedModuleStoreTestCase): class TestGetModuleScore(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
""" """
Test get_module_score Test get_module_score
...@@ -376,13 +466,16 @@ class TestGetModuleScore(LoginEnrollmentTestCase, SharedModuleStoreTestCase): ...@@ -376,13 +466,16 @@ class TestGetModuleScore(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
self.client.login(username=self.request.user.username, password="test") self.client.login(username=self.request.user.username, password="test")
CourseEnrollment.enroll(self.request.user, self.course.id) CourseEnrollment.enroll(self.request.user, self.course.id)
# warm up the score cache to allow accurate query counts, even if tests are run in random order
get_module_score(self.request.user, self.course, self.seq1)
def test_get_module_score(self): def test_get_module_score(self):
""" """
Test test_get_module_score Test test_get_module_score
""" """
# One query is for getting the list of disabled XBlocks (which is # One query is for getting the list of disabled XBlocks (which is
# then stored in the request). # then stored in the request).
with self.assertNumQueries(2): with self.assertNumQueries(1):
score = get_module_score(self.request.user, self.course, self.seq1) score = get_module_score(self.request.user, self.course, self.seq1)
self.assertEqual(score, 0) self.assertEqual(score, 0)
......
...@@ -51,7 +51,7 @@ def offline_grade_calculation(course_key): ...@@ -51,7 +51,7 @@ def offline_grade_calculation(course_key):
request.user = student request.user = student
request.session = {} request.session = {}
gradeset = course_grades.summary(student, course, keep_raw_scores=True) gradeset = course_grades.summary(student, course)
# Convert Score namedtuples to dicts: # Convert Score namedtuples to dicts:
totaled_scores = gradeset['totaled_scores'] totaled_scores = gradeset['totaled_scores']
for section in totaled_scores: for section in totaled_scores:
...@@ -84,13 +84,13 @@ def offline_grades_available(course_key): ...@@ -84,13 +84,13 @@ def offline_grades_available(course_key):
return ocgl.latest('created') return ocgl.latest('created')
def student_grades(student, request, course, keep_raw_scores=False, use_offline=False): def student_grades(student, request, course, use_offline=False): # pylint: disable=unused-argument
''' '''
This is the main interface to get grades. It has the same parameters as grades.grade, as well This is the main interface to get grades. It has the same parameters as grades.grade, as well
as use_offline. If use_offline is True then this will look for an offline computed gradeset in the DB. as use_offline. If use_offline is True then this will look for an offline computed gradeset in the DB.
''' '''
if not use_offline: if not use_offline:
return course_grades.summary(student, course, keep_raw_scores=keep_raw_scores) return course_grades.summary(student, course)
try: try:
ocg = models.OfflineComputedGrade.objects.get(user=student, course_id=course.id) ocg = models.OfflineComputedGrade.objects.get(user=student, course_id=course.id)
......
...@@ -933,7 +933,7 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t ...@@ -933,7 +933,7 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t
error_rows = [list(header_row.values()) + ['error_msg']] error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'} current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True): for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
student_fields = [getattr(student, field_name) for field_name in header_row] student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1 task_progress.attempted += 1
......
...@@ -635,7 +635,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): ...@@ -635,7 +635,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
unicode(self.student_2.id), unicode(self.student_2.id),
self.student_2.email, self.student_2.email,
self.student_2.username, self.student_2.username,
'0.0', 'N/A', 'N/A' '0.0', '0.0', '2'
] ]
)) ))
]) ])
...@@ -1671,7 +1671,7 @@ class TestCertificateGeneration(InstructorTaskModuleTestCase): ...@@ -1671,7 +1671,7 @@ class TestCertificateGeneration(InstructorTaskModuleTestCase):
'skipped': 2 'skipped': 2
} }
with self.assertNumQueries(214): with self.assertNumQueries(150):
self.assertCertificatesGenerated(task_input, expected_results) self.assertCertificatesGenerated(task_input, expected_results)
@ddt.data( @ddt.data(
......
...@@ -356,6 +356,12 @@ FEATURES = { ...@@ -356,6 +356,12 @@ FEATURES = {
# lives in the Extended table, saving the frontend from # lives in the Extended table, saving the frontend from
# making multiple queries. # making multiple queries.
'ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES': True, 'ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES': True,
# Temporary feature flag for disabling saving of subsection grades.
# There is also an advanced setting in the course module. The
# feature flag and the advanced setting must both be true for
# a course to use saved grades.
'ENABLE_SUBSECTION_GRADES_SAVED': False,
} }
# Ignore static asset files on import which match this pattern # Ignore static asset files on import which match this pattern
......
...@@ -150,13 +150,13 @@ from django.utils.http import urlquote_plus ...@@ -150,13 +150,13 @@ from django.utils.http import urlquote_plus
%for section in chapter['sections']: %for section in chapter['sections']:
<div> <div>
<% <%
earned = section['section_total'].earned earned = section.all_total.earned
total = section['section_total'].possible total = section.all_total.possible
percentageString = "{0:.0%}".format( float(earned)/total) if earned > 0 and total > 0 else "" percentageString = "{0:.0%}".format( float(earned)/total) if earned > 0 and total > 0 else ""
%> %>
<h3><a href="${reverse('courseware_section', kwargs=dict(course_id=course.id.to_deprecated_string(), chapter=chapter['url_name'], section=section['url_name'])) | h}"> <h3><a href="${reverse('courseware_section', kwargs=dict(course_id=course.id.to_deprecated_string(), chapter=chapter['url_name'], section=section.url_name)) | h}">
${ section['display_name'] | h} ${ section.display_name | h}
%if total > 0 or earned > 0: %if total > 0 or earned > 0:
<span class="sr"> <span class="sr">
${_("{earned} of {total} possible points").format(earned='{:.3n}'.format(float(earned)), total='{:.3n}'.format(float(total))) | h} ${_("{earned} of {total} possible points").format(earned='{:.3n}'.format(float(earned)), total='{:.3n}'.format(float(total))) | h}
...@@ -168,11 +168,11 @@ from django.utils.http import urlquote_plus ...@@ -168,11 +168,11 @@ from django.utils.http import urlquote_plus
%endif %endif
</h3> </h3>
<p> <p>
${section['format'] | h} ${section.format | h}
%if section.get('due') is not None: %if section.due is not None:
<% <%
formatted_string = get_time_display(section['due'], course.due_date_display_format, coerce_tz=settings.TIME_ZONE_DISPLAYED_FOR_DEADLINES) formatted_string = get_time_display(section.due, course.due_date_display_format, coerce_tz=settings.TIME_ZONE_DISPLAYED_FOR_DEADLINES)
due_date = '' if len(formatted_string)==0 else _(u'due {date}').format(date=formatted_string) due_date = '' if len(formatted_string)==0 else _(u'due {date}').format(date=formatted_string)
%> %>
<em> <em>
...@@ -182,10 +182,10 @@ from django.utils.http import urlquote_plus ...@@ -182,10 +182,10 @@ from django.utils.http import urlquote_plus
</p> </p>
<div class="scores"> <div class="scores">
%if len(section['scores']) > 0: %if len(section.scores) > 0:
<h3> ${ _("Problem Scores: ") if section['graded'] else _("Practice Scores: ")} </h3> <h3> ${ _("Problem Scores: ") if section.graded else _("Practice Scores: ")} </h3>
<ol> <ol>
%for score in section['scores']: %for score in section.scores:
<li>${"{0:.3n}/{1:.3n}".format(float(score.earned),float(score.possible)) | h}</li> <li>${"{0:.3n}/{1:.3n}".format(float(score.earned),float(score.possible)) | h}</li>
%endfor %endfor
</ol> </ol>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment