Commit e87388e2 by Nimisha Asthagiri

Grades cleanup before updating grade report

Remove deprecated SingleSectionGrader.  TNL-5987
Remove display_name and module_id from Scores objects
Update CourseGradeFactory.__init__ to not be user-specific
Update some callers to use CourseGrade class instead of "summary" dict
Remove no longer needed course_grades.py module.
Renamed django signal from GRADES_UPDATED to COURSE_GRADE_CHANGED
parent 1956e2cf
......@@ -404,7 +404,7 @@ def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disa
)
if status in {'generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'}:
persisted_grade = CourseGradeFactory(user).get_persisted(course_overview)
persisted_grade = CourseGradeFactory().get_persisted(user, course_overview)
if persisted_grade is not None:
status_dict['grade'] = unicode(persisted_grade.percent)
elif 'grade' in cert_status:
......
......@@ -18,17 +18,13 @@ class ScoreBase(object):
"""
Abstract base class for encapsulating fields of values scores.
Field common to all scores include:
display_name (string) - the display name of the module
module_id (UsageKey) - the location of the module
graded (boolean) - whether or not this module is graded
attempted (boolean) - whether the module was attempted
"""
__metaclass__ = abc.ABCMeta
def __init__(self, graded, display_name, module_id, attempted):
def __init__(self, graded, attempted):
self.graded = graded
self.display_name = display_name
self.module_id = module_id
self.attempted = attempted
def __eq__(self, other):
......@@ -55,10 +51,10 @@ class ProblemScore(ScoreBase):
"""
def __init__(self, raw_earned, raw_possible, weighted_earned, weighted_possible, weight, *args, **kwargs):
super(ProblemScore, self).__init__(*args, **kwargs)
self.raw_earned = raw_earned
self.raw_possible = raw_possible
self.earned = weighted_earned
self.possible = weighted_possible
self.raw_earned = float(raw_earned) if raw_earned is not None else None
self.raw_possible = float(raw_possible) if raw_possible is not None else None
self.earned = float(weighted_earned) if weighted_earned is not None else None
self.possible = float(weighted_possible) if weighted_possible is not None else None
self.weight = weight
......@@ -71,8 +67,8 @@ class AggregatedScore(ScoreBase):
"""
def __init__(self, tw_earned, tw_possible, *args, **kwargs):
super(AggregatedScore, self).__init__(*args, **kwargs)
self.earned = tw_earned
self.possible = tw_possible
self.earned = float(tw_earned) if tw_earned is not None else None
self.possible = float(tw_possible) if tw_possible is not None else None
def float_sum(iterable):
......@@ -82,11 +78,9 @@ def float_sum(iterable):
return float(sum(iterable))
def aggregate_scores(scores, display_name="summary", location=None):
def aggregate_scores(scores):
"""
scores: A list of ScoreBase objects
display_name: The display name for the score object
location: The location under which all objects in scores are located
returns: A tuple (all_total, graded_total).
all_total: A ScoreBase representing the total score summed over all input scores
graded_total: A ScoreBase representing the score summed over all graded input scores
......@@ -100,11 +94,11 @@ def aggregate_scores(scores, display_name="summary", location=None):
any_attempted = any(score.attempted for score in scores)
# regardless of whether it is graded
all_total = AggregatedScore(total_correct, total_possible, False, display_name, location, any_attempted)
all_total = AggregatedScore(total_correct, total_possible, False, any_attempted)
# selecting only graded things
graded_total = AggregatedScore(
total_correct_graded, total_possible_graded, True, display_name, location, any_attempted_graded,
total_correct_graded, total_possible_graded, True, any_attempted_graded,
)
return all_total, graded_total
......@@ -126,9 +120,8 @@ def grader_from_conf(conf):
This creates a CourseGrader from a configuration (such as in course_settings.py).
The conf can simply be an instance of CourseGrader, in which case no work is done.
More commonly, the conf is a list of dictionaries. A WeightedSubsectionsGrader
with AssignmentFormatGrader's or SingleSectionGrader's as subsections will be
generated. Every dictionary should contain the parameters for making either a
AssignmentFormatGrader or SingleSectionGrader, in addition to a 'weight' key.
with AssignmentFormatGrader's will be generated. Every dictionary should contain
the parameters for making an AssignmentFormatGrader, in addition to a 'weight' key.
"""
if isinstance(conf, CourseGrader):
return conf
......@@ -137,27 +130,14 @@ def grader_from_conf(conf):
for subgraderconf in conf:
subgraderconf = subgraderconf.copy()
weight = subgraderconf.pop("weight", 0)
# NOTE: 'name' used to exist in SingleSectionGrader. We are deprecating SingleSectionGrader
# and converting everything into an AssignmentFormatGrader by adding 'min_count' and
# 'drop_count'. AssignmentFormatGrader does not expect 'name', so if it appears
# in bad_args, go ahead remove it (this causes no errors). Eventually, SingleSectionGrader
# should be completely removed.
name = 'name'
try:
if 'min_count' in subgraderconf:
#This is an AssignmentFormatGrader
subgrader_class = AssignmentFormatGrader
elif name in subgraderconf:
#This is an SingleSectionGrader
subgrader_class = SingleSectionGrader
else:
raise ValueError("Configuration has no appropriate grader class.")
bad_args = invalid_args(subgrader_class.__init__, subgraderconf)
# See note above concerning 'name'.
if bad_args.issuperset({name}):
bad_args = bad_args - {name}
del subgraderconf[name]
if len(bad_args) > 0:
log.warning("Invalid arguments for a subgrader: %s", bad_args)
for key in bad_args:
......@@ -264,57 +244,6 @@ class WeightedSubsectionsGrader(CourseGrader):
'grade_breakdown': grade_breakdown}
class SingleSectionGrader(CourseGrader):
"""
This grades a single section with the format 'type' and the name 'name'.
If the name is not appropriate for the short short_label or category, they each may
be specified individually.
"""
def __init__(self, type, name, short_label=None, category=None): # pylint: disable=redefined-builtin
self.type = type
self.name = name
self.short_label = short_label or name
self.category = category or name
def grade(self, grade_sheet, generate_random_scores=False):
found_score = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.display_name == self.name:
found_score = score
break
if found_score or generate_random_scores:
if generate_random_scores: # for debugging!
earned = random.randint(2, 15)
possible = random.randint(earned, 15)
else: # We found the score
earned = found_score.earned
possible = found_score.possible
percent = earned / possible
detail = u"{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(
name=self.name,
percent=percent,
earned=float(earned),
possible=float(possible)
)
else:
percent = 0.0
detail = u"{name} - 0% (?/?)".format(name=self.name)
breakdown = [{'percent': percent, 'label': self.short_label,
'detail': detail, 'category': self.category, 'prominent': True}]
return {
'percent': percent,
'section_breakdown': breakdown,
#No grade_breakdown here
}
class AssignmentFormatGrader(CourseGrader):
"""
Grades all sections matching the format 'type' with an equal weight. A specified
......@@ -332,9 +261,9 @@ class AssignmentFormatGrader(CourseGrader):
hide_average is to suppress the display of the total score in this grader and instead
only show each assignment in this grader in the breakdown.
If there is only a single assignment in this grader, then it acts like a SingleSectionGrader
and returns only one entry for the grader. Since the assignment and the total are the same,
the total is returned but is not labeled as an average.
If there is only a single assignment in this grader, then it returns only one entry for the
grader. Since the assignment and the total are the same, the total is returned but is not
labeled as an average.
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
......@@ -401,8 +330,8 @@ class AssignmentFormatGrader(CourseGrader):
section_name = "Generated"
else:
earned = scores[i].earned
possible = scores[i].possible
earned = scores[i].graded_total.earned
possible = scores[i].graded_total.possible
section_name = scores[i].display_name
percentage = earned / possible
......@@ -442,8 +371,7 @@ class AssignmentFormatGrader(CourseGrader):
if len(breakdown) == 1:
# if there is only one entry in a section, suppress the existing individual entry and the average,
# and just display a single entry for the section. That way it acts automatically like a
# SingleSectionGrader.
# and just display a single entry for the section.
total_detail = u"{section_type} = {percent:.0%}".format(
percent=total_percent,
section_type=self.section_type,
......
"""Grading tests"""
import ddt
import unittest
from xmodule import graders
......@@ -12,13 +13,11 @@ class GradesheetTest(unittest.TestCase):
def test_weighted_grading(self):
scores = []
agg_fields = dict(display_name="aggregated_score", module_id=None, attempted=False)
prob_fields = dict(
display_name="problem_score", module_id=None, raw_earned=0, raw_possible=0, weight=0, attempted=False,
)
agg_fields = dict(attempted=False)
prob_fields = dict(raw_earned=0, raw_possible=0, weight=0, attempted=False)
# No scores
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(
all_total,
AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
......@@ -30,7 +29,7 @@ class GradesheetTest(unittest.TestCase):
# (0/5 non-graded)
scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(
all_total,
AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
......@@ -44,7 +43,7 @@ class GradesheetTest(unittest.TestCase):
prob_fields['attempted'] = True
agg_fields['attempted'] = True
scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(
all_total,
AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
......@@ -56,7 +55,7 @@ class GradesheetTest(unittest.TestCase):
# (0/5 non-graded) + (3/5 graded) + (2/5 graded) = 5/15 total, 5/10 graded
scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(
all_total,
AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
......@@ -67,6 +66,7 @@ class GradesheetTest(unittest.TestCase):
)
@ddt.ddt
class GraderTest(unittest.TestCase):
"""
Tests grader implementations
......@@ -81,50 +81,37 @@ class GraderTest(unittest.TestCase):
'Midterm': [],
}
common_fields = dict(graded=True, module_id=None, attempted=True)
class MockGrade(object):
"""
Mock class for SubsectionGrade object.
"""
def __init__(self, graded_total, display_name):
self.graded_total = graded_total
self.display_name = display_name
common_fields = dict(graded=True, attempted=True)
test_gradesheet = {
'Homework': [
AggregatedScore(tw_earned=2, tw_possible=20.0, display_name='hw1', **common_fields),
AggregatedScore(tw_earned=16, tw_possible=16.0, display_name='hw2', **common_fields),
MockGrade(AggregatedScore(tw_earned=2, tw_possible=20.0, **common_fields), display_name='hw1'),
MockGrade(AggregatedScore(tw_earned=16, tw_possible=16.0, **common_fields), display_name='hw2'),
],
# The dropped scores should be from the assignments that don't exist yet
'Lab': [
AggregatedScore(tw_earned=1, tw_possible=2.0, display_name='lab1', **common_fields), # Dropped
AggregatedScore(tw_earned=1, tw_possible=1.0, display_name='lab2', **common_fields),
AggregatedScore(tw_earned=1, tw_possible=1.0, display_name='lab3', **common_fields),
AggregatedScore(tw_earned=5, tw_possible=25.0, display_name='lab4', **common_fields), # Dropped
AggregatedScore(tw_earned=3, tw_possible=4.0, display_name='lab5', **common_fields), # Dropped
AggregatedScore(tw_earned=6, tw_possible=7.0, display_name='lab6', **common_fields),
AggregatedScore(tw_earned=5, tw_possible=6.0, display_name='lab7', **common_fields),
MockGrade(AggregatedScore(tw_earned=1, tw_possible=2.0, **common_fields), display_name='lab1'), # Dropped
MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab2'),
MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab3'),
MockGrade(AggregatedScore(tw_earned=5, tw_possible=25.0, **common_fields), display_name='lab4'), # Dropped
MockGrade(AggregatedScore(tw_earned=3, tw_possible=4.0, **common_fields), display_name='lab5'), # Dropped
MockGrade(AggregatedScore(tw_earned=6, tw_possible=7.0, **common_fields), display_name='lab6'),
MockGrade(AggregatedScore(tw_earned=5, tw_possible=6.0, **common_fields), display_name='lab7'),
],
'Midterm': [
AggregatedScore(tw_earned=50.5, tw_possible=100, display_name="Midterm Exam", **common_fields),
MockGrade(AggregatedScore(tw_earned=50.5, tw_possible=100, **common_fields), display_name="Midterm Exam"),
],
}
def test_single_section_grader(self):
midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")
for graded in [
midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet),
bad_lab_grader.grade(self.test_gradesheet),
]:
self.assertEqual(len(graded['section_breakdown']), 1)
self.assertEqual(graded['percent'], 0.0)
graded = midterm_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.505)
self.assertEqual(len(graded['section_breakdown']), 1)
graded = lab4_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2)
self.assertEqual(len(graded['section_breakdown']), 1)
def test_assignment_format_grader(self):
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0)
......@@ -179,8 +166,6 @@ class GraderTest(unittest.TestCase):
# First, a few sub graders
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
# phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that
# will act like SingleSectionGraders on single sections.
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
weighted_grader = graders.WeightedSubsectionsGrader([
......@@ -268,6 +253,8 @@ class GraderTest(unittest.TestCase):
},
{
'type': "Midterm",
'min_count': 0,
'drop_count': 0,
'name': "Midterm Exam",
'short_label': "Midterm",
'weight': 0.5,
......@@ -294,5 +281,25 @@ class GraderTest(unittest.TestCase):
self.assertAlmostEqual(graded['percent'], 0.11)
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
# TODO: How do we test failure cases? The parser only logs an error when
# it can't parse something. Maybe it should throw exceptions?
@ddt.data(
# empty
(
{},
u"Configuration has no appropriate grader class."
),
# no min_count
(
{'type': "Homework", 'drop_count': 0},
u"Configuration has no appropriate grader class."
),
# no drop_count
(
{'type': "Homework", 'min_count': 0},
u"__init__() takes at least 4 arguments (3 given)"
),
)
@ddt.unpack
def test_grader_with_invalid_conf(self, invalid_conf, expected_error_message):
with self.assertRaises(ValueError) as error:
graders.grader_from_conf([invalid_conf])
self.assertIn(expected_error_message, error.exception.message)
......@@ -33,7 +33,7 @@ from courseware.field_overrides import disable_overrides
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, assign_role
from django_comment_common.utils import seed_permissions_roles
from edxmako.shortcuts import render_to_response
from lms.djangoapps.grades.course_grades import iterate_grades_for
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole
......@@ -564,30 +564,30 @@ def ccx_grades_csv(request, course, ccx=None):
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
grades = CourseGradeFactory().iter(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
for student, course_grade, __ in grades:
if course_grade:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
for section in course_grade.summary[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
for section in course_grade.summary[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
course_grade.percent] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
......
......@@ -5,7 +5,7 @@ Management command which fixes ungraded certificates for students
from certificates.models import GeneratedCertificate
from courseware import courses
from lms.djangoapps.grades import course_grades
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from django.test.client import RequestFactory
from django.core.management.base import BaseCommand
from optparse import make_option
......@@ -52,8 +52,8 @@ class Command(BaseCommand):
for cert in ungraded:
# grade the student
grade = course_grades.summary(cert.user, course)
print "grading {0} - {1}".format(cert.user, grade['percent'])
cert.grade = grade['percent']
grade = CourseGradeFactory().create(cert.user, course)
print "grading {0} - {1}".format(cert.user, grade.percent)
cert.grade = grade.percent
if not options['noop']:
cert.save()
......@@ -11,7 +11,7 @@ from django.conf import settings
from django.core.urlresolvers import reverse
from requests.auth import HTTPBasicAuth
from lms.djangoapps.grades import course_grades
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from xmodule.modulestore.django import modulestore
from capa.xqueue_interface import XQueueInterface
from capa.xqueue_interface import make_xheader, make_hashkey
......@@ -271,7 +271,7 @@ class XQueueCertInterface(object):
self.request.session = {}
is_whitelisted = self.whitelist.filter(user=student, course_id=course_id, whitelist=True).exists()
grade = course_grades.summary(student, course)
grade = CourseGradeFactory().create(student, course).summary
enrollment_mode, __ = CourseEnrollment.enrollment_mode_for_user(student, course_id)
mode_is_verified = enrollment_mode in GeneratedCertificate.VERIFIED_CERTS_MODES
user_is_verified = SoftwareSecurePhotoVerification.user_is_verified(student)
......
......@@ -22,7 +22,7 @@ from capa.tests.response_xml_factory import (
from course_modes.models import CourseMode
from courseware.models import StudentModule, BaseStudentModuleHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.grades import course_grades, progress
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from openedx.core.djangoapps.credit.api import (
set_credit_requirements, get_credit_requirement_status
)
......@@ -270,39 +270,17 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
def get_course_grade(self):
"""
calls course_grades.summary for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
return course_grades.summary(self.student_user, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
Return CourseGrade for current user and course.
"""
return progress.summary(self.student_user, self.course).chapter_grades
return CourseGradeFactory().create(self.student_user, self.course)
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
self.assertEqual(self.get_course_grade().percent, percent)
def earned_hw_scores(self):
"""
......@@ -310,7 +288,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
return [s.graded_total.earned for s in self.get_course_grade().graded_subsections_by_format['Homework']]
def hw_grade(self, hw_url_name):
"""
......@@ -318,7 +296,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
for chapter in self.get_course_grade().chapter_grades:
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
......@@ -431,8 +409,11 @@ class TestCourseGrader(TestSubmittingProblems):
"drop_count": 0,
"short_label": "HW",
"weight": hw_weight
}, {
},
{
"type": "Final",
"min_count": 0,
"drop_count": 0,
"name": "Final Section",
"short_label": "Final",
"weight": final_weight
......@@ -558,7 +539,7 @@ class TestCourseGrader(TestSubmittingProblems):
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
self.assertEqual(self.get_course_grade().letter_grade, None)
def test_b_grade_exact(self):
"""
......@@ -567,7 +548,7 @@ class TestCourseGrader(TestSubmittingProblems):
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
self.assertEqual(self.get_course_grade().letter_grade, 'B')
def test_b_grade_above(self):
"""
......@@ -577,7 +558,7 @@ class TestCourseGrader(TestSubmittingProblems):
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
self.assertEqual(self.get_course_grade().letter_grade, 'B')
def test_a_grade(self):
"""
......@@ -588,7 +569,7 @@ class TestCourseGrader(TestSubmittingProblems):
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
self.assertEqual(self.get_course_grade().letter_grade, 'A')
def test_wrong_answers(self):
"""
......@@ -599,7 +580,7 @@ class TestCourseGrader(TestSubmittingProblems):
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
self.assertEqual(self.get_course_grade().letter_grade, 'B')
def test_submissions_api_overrides_scores(self):
"""
......@@ -610,7 +591,7 @@ class TestCourseGrader(TestSubmittingProblems):
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
self.assertEqual(self.get_course_grade().letter_grade, 'B')
# But now, set the score with the submissions API and watch
# as it overrides the score read from StudentModule and our
......@@ -625,7 +606,7 @@ class TestCourseGrader(TestSubmittingProblems):
submission = submissions_api.create_submission(student_item, 'any answer')
submissions_api.set_score(submission['uuid'], 1, 1)
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
self.assertEqual(self.get_course_grade().letter_grade, 'A')
def test_submissions_api_anonymous_student_id(self):
"""
......@@ -640,7 +621,7 @@ class TestCourseGrader(TestSubmittingProblems):
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
self.get_course_grade()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
......@@ -752,9 +733,6 @@ class TestCourseGrader(TestSubmittingProblems):
# the Django student views, and does not update enrollment if it already exists.
CourseEnrollment.enroll(self.student_user, self.course.id, mode)
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
# Enable the course for credit
CreditCourse.objects.create(course_key=self.course.id, enabled=True)
......@@ -774,7 +752,15 @@ class TestCourseGrader(TestSubmittingProblems):
# Add a single credit requirement (final grade)
set_credit_requirements(self.course.id, requirements)
self.get_grade_summary()
# Credit requirement is not satisfied before passing grade
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
self.assertEqual(req_status[0]["status"], None)
self._stop_signal_patch()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
# Credit requirement is now satisfied after passing grade
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
self.assertEqual(req_status[0]["status"], 'satisfied')
......
......@@ -723,7 +723,7 @@ def _progress(request, course_key, student_id):
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
course_grade = CourseGradeFactory(student).create(course)
course_grade = CourseGradeFactory().create(student, course)
courseware_summary = course_grade.chapter_grades
grade_summary = course_grade.summary
......@@ -1127,7 +1127,7 @@ def is_course_passed(course, grade_summary=None, student=None, request=None):
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = CourseGradeFactory(student).create(course).summary
grade_summary = CourseGradeFactory().create(student, course).summary
return success_cutoff and grade_summary['percent'] >= success_cutoff
......
......@@ -148,7 +148,7 @@ class UserGradeView(GradeViewMixin, GenericAPIView):
return course
prep_course_for_grading(course, request)
course_grade = CourseGradeFactory(request.user).create(course)
course_grade = CourseGradeFactory().create(request.user, course)
return Response([{
'username': username,
......
"""
Grading Context
"""
from collections import defaultdict
from collections import OrderedDict
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from .scores import possibly_scored
def grading_context_for_course(course):
def grading_context_for_course(course_key):
"""
Same as grading_context, but takes in a course object.
"""
course_structure = get_course_in_cache(course.id)
course_structure = get_course_in_cache(course_key)
return grading_context(course_structure)
......@@ -21,16 +21,13 @@ def grading_context(course_structure):
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
all_graded_subsections_by_type - This contains all subsections that are
graded, keyed by subsection format (assignment type).
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_block" : The section block
"scored_descendant_keys" : An array of usage keys for blocks
could possibly be in the section, for any student
The values are arrays of dictionaries containing
"subsection_block" : The subsection block
"scored_descendants" : An array of usage keys for blocks
could possibly be in the subsection, for any student
all_graded_blocks - This contains a list of all blocks that can
affect grading a student. This is used to efficiently fetch
......@@ -39,34 +36,36 @@ def grading_context(course_structure):
"""
all_graded_blocks = []
all_graded_sections = defaultdict(list)
all_graded_subsections_by_type = OrderedDict()
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
for section_key in course_structure.get_children(chapter_key):
section = course_structure[section_key]
scored_descendants_of_section = [section]
if section.graded:
for subsection_key in course_structure.get_children(chapter_key):
subsection = course_structure[subsection_key]
scored_descendants_of_subsection = []
if subsection.graded:
for descendant_key in course_structure.post_order_traversal(
filter_func=possibly_scored,
start_node=section_key,
start_node=subsection_key,
):
scored_descendants_of_section.append(
scored_descendants_of_subsection.append(
course_structure[descendant_key],
)
# include only those blocks that have scores, not if they are just a parent
section_info = {
'section_block': section,
subsection_info = {
'subsection_block': subsection,
'scored_descendants': [
child for child in scored_descendants_of_section
child for child in scored_descendants_of_subsection
if getattr(child, 'has_score', None)
]
}
section_format = getattr(section, 'format', '')
all_graded_sections[section_format].append(section_info)
all_graded_blocks.extend(scored_descendants_of_section)
subsection_format = getattr(subsection, 'format', '')
if subsection_format not in all_graded_subsections_by_type:
all_graded_subsections_by_type[subsection_format] = []
all_graded_subsections_by_type[subsection_format].append(subsection_info)
all_graded_blocks.extend(scored_descendants_of_subsection)
return {
'all_graded_sections': all_graded_sections,
'all_graded_subsections_by_type': all_graded_subsections_by_type,
'all_graded_blocks': all_graded_blocks,
}
"""
Functionality for course-level grades.
"""
from collections import namedtuple
from logging import getLogger
import dogstats_wrapper as dog_stats_api
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_by_id
from .new.course_grade import CourseGradeFactory
log = getLogger(__name__)
GradeResult = namedtuple('GradeResult', ['student', 'gradeset', 'err_msg'])
def iterate_grades_for(course_or_id, students):
"""
Given a course_id and an iterable of students (User), yield a GradeResult
for every student enrolled in the course. GradeResult is a named tuple of:
(student, gradeset, err_msg)
If an error occurred, gradeset will be an empty dict and err_msg will be an
exception message. If there was no error, err_msg is an empty string.
The gradeset is a dictionary with the following fields:
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
- raw_scores: contains scores for every graded module
"""
if isinstance(course_or_id, (basestring, CourseKey)):
course = get_course_by_id(course_or_id)
else:
course = course_or_id
for student in students:
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
try:
gradeset = summary(student, course)
yield GradeResult(student, gradeset, "")
except Exception as exc: # pylint: disable=broad-except
# Keep marching on even if this student couldn't be graded for
# some reason, but log it for future reference.
log.exception(
'Cannot grade student %s (%s) in course %s because of exception: %s',
student.username,
student.id,
course.id,
exc.message
)
yield GradeResult(student, {}, exc.message)
def summary(student, course):
"""
Returns the grade summary of the student for the given course.
Also sends a signal to update the minimum grade requirement status.
"""
return CourseGradeFactory(student).create(course).summary
......@@ -7,7 +7,7 @@ from django.core.management.base import BaseCommand, CommandError
import os
from lms.djangoapps.courseware import courses
from lms.djangoapps.certificates.models import GeneratedCertificate
from lms.djangoapps.grades import course_grades
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
......@@ -124,18 +124,18 @@ class Command(BaseCommand):
count, total, hours, minutes)
start = datetime.datetime.now()
request.user = student
grade = course_grades.summary(student, course)
grade = CourseGradeFactory().create(student, course)
if not header:
header = [section['label'] for section in grade[u'section_breakdown']]
header = [section['label'] for section in grade.summary[u'section_breakdown']]
rows.append(["email", "username", "certificate-grade", "grade"] + header)
percents = {section['label']: section['percent'] for section in grade[u'section_breakdown']}
percents = {section['label']: section['percent'] for section in grade.summary[u'section_breakdown']}
row_percents = [percents[label] for label in header]
if student.username in cert_grades:
rows.append(
[student.email, student.username, cert_grades[student.username], grade['percent']] + row_percents,
[student.email, student.username, cert_grades[student.username], grade.percent] + row_percents,
)
else:
rows.append([student.email, student.username, "N/A", grade['percent']] + row_percents)
rows.append([student.email, student.username, "N/A", grade.percent] + row_percents)
with open(options['output'], 'wb') as f:
writer = csv.writer(f)
writer.writerows(rows)
......@@ -3,15 +3,17 @@ CourseGrade Class
"""
from collections import defaultdict
from collections import namedtuple
from logging import getLogger
from django.conf import settings
from django.core.exceptions import PermissionDenied
import dogstats_wrapper as dog_stats_api
from lazy import lazy
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED
from xmodule import block_metadata_utils
from ..models import PersistentCourseGrade
......@@ -37,7 +39,7 @@ class CourseGrade(object):
self._subsection_grade_factory = SubsectionGradeFactory(self.student, self.course, self.course_structure)
@lazy
def subsection_grade_totals_by_format(self):
def graded_subsections_by_format(self):
"""
Returns grades for the subsections in the course in
a dict keyed by subsection format types.
......@@ -48,7 +50,7 @@ class CourseGrade(object):
if subsection_grade.graded:
graded_total = subsection_grade.graded_total
if graded_total.possible > 0:
subsections_by_format[subsection_grade.format].append(graded_total)
subsections_by_format[subsection_grade.format].append(subsection_grade)
return subsections_by_format
@lazy
......@@ -70,7 +72,7 @@ class CourseGrade(object):
# Grading policy might be overriden by a CCX, need to reset it
self.course.set_grading_policy(self.course.grading_policy)
grade_value = self.course.grader.grade(
self.subsection_grade_totals_by_format,
self.graded_subsections_by_format,
generate_random_scores=settings.GENERATE_PROFILE_SCORES
)
# can't use the existing properties due to recursion issues caused by referencing self.grade_value
......@@ -137,8 +139,6 @@ class CourseGrade(object):
grade_summary = self.grade_value
grade_summary['percent'] = self.percent
grade_summary['grade'] = self.letter_grade
grade_summary['totaled_scores'] = self.subsection_grade_totals_by_format
grade_summary['raw_scores'] = list(self.locations_to_scores.itervalues())
return grade_summary
......@@ -150,7 +150,7 @@ class CourseGrade(object):
"""
subsections_total = sum(len(chapter['sections']) for chapter in self.chapter_grades)
total_graded_subsections = sum(len(x) for x in self.subsection_grade_totals_by_format.itervalues())
total_graded_subsections = sum(len(x) for x in self.graded_subsections_by_format.itervalues())
subsections_created = len(self._subsection_grade_factory._unsaved_subsection_grades) # pylint: disable=protected-access
subsections_read = subsections_total - subsections_created
blocks_total = len(self.locations_to_scores)
......@@ -295,10 +295,10 @@ class CourseGrade(object):
"""
Signal all listeners when grades are computed.
"""
responses = GRADES_UPDATED.send_robust(
responses = COURSE_GRADE_CHANGED.send_robust(
sender=None,
user=self.student,
grade_summary=self.summary,
course_grade=self,
course_key=self.course.id,
deadline=self.course.end
)
......@@ -324,32 +324,60 @@ class CourseGradeFactory(object):
"""
Factory class to create Course Grade objects
"""
def __init__(self, student):
self.student = student
def create(self, course, read_only=True):
def create(self, student, course, read_only=True):
"""
Returns the CourseGrade object for the given student and course.
If read_only is True, doesn't save any updates to the grades.
Raises a PermissionDenied if the user does not have course access.
"""
course_structure = get_course_blocks(self.student, course.location)
course_structure = get_course_blocks(student, course.location)
# if user does not have access to this course, throw an exception
if not self._user_has_access_to_course(course_structure):
raise PermissionDenied("User does not have access to this course")
return (
self._get_saved_grade(course, course_structure) or
self._compute_and_update_grade(course, course_structure, read_only)
self._get_saved_grade(student, course, course_structure) or
self._compute_and_update_grade(student, course, course_structure, read_only)
)
def update(self, course, course_structure):
GradeResult = namedtuple('GradeResult', ['student', 'course_grade', 'err_msg'])
def iter(self, course, students):
"""
Given a course and an iterable of students (User), yield a GradeResult
for every student enrolled in the course. GradeResult is a named tuple of:
(student, course_grade, err_msg)
If an error occurred, course_grade will be None and err_msg will be an
exception message. If there was no error, err_msg is an empty string.
"""
for student in students:
with dog_stats_api.timer('lms.grades.CourseGradeFactory.iter', tags=[u'action:{}'.format(course.id)]):
try:
course_grade = CourseGradeFactory().create(student, course)
yield self.GradeResult(student, course_grade, "")
except Exception as exc: # pylint: disable=broad-except
# Keep marching on even if this student couldn't be graded for
# some reason, but log it for future reference.
log.exception(
'Cannot grade student %s (%s) in course %s because of exception: %s',
student.username,
student.id,
course.id,
exc.message
)
yield self.GradeResult(student, None, exc.message)
def update(self, student, course, course_structure):
"""
Updates the CourseGrade for this Factory's student.
"""
self._compute_and_update_grade(course, course_structure)
self._compute_and_update_grade(student, course, course_structure)
def get_persisted(self, course):
def get_persisted(self, student, course):
"""
Returns the saved grade for the given course and student,
irrespective of whether the saved grade is up-to-date.
......@@ -357,9 +385,9 @@ class CourseGradeFactory(object):
if not PersistentGradesEnabledFlag.feature_enabled(course.id):
return None
return CourseGrade.get_persisted_grade(self.student, course)
return CourseGrade.get_persisted_grade(student, course)
def _get_saved_grade(self, course, course_structure):
def _get_saved_grade(self, student, course, course_structure):
"""
Returns the saved grade for the given course and student.
"""
......@@ -367,18 +395,18 @@ class CourseGradeFactory(object):
return None
return CourseGrade.load_persisted_grade(
self.student,
student,
course,
course_structure
)
def _compute_and_update_grade(self, course, course_structure, read_only=False):
def _compute_and_update_grade(self, student, course, course_structure, read_only=False):
"""
Freshly computes and updates the grade for the student and course.
If read_only is True, doesn't save any updates to the grades.
"""
course_grade = CourseGrade(self.student, course, course_structure)
course_grade = CourseGrade(student, course, course_structure)
course_grade.compute_and_update(read_only)
return course_grade
......
......@@ -68,7 +68,7 @@ class SubsectionGrade(object):
):
self._compute_block_score(descendant_key, course_structure, submissions_scores, csm_scores)
self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
self.all_total, self.graded_total = graders.aggregate_scores(self.scores)
self._log_event(log.debug, u"init_from_structure", student)
return self
......@@ -83,16 +83,12 @@ class SubsectionGrade(object):
tw_earned=model.earned_graded,
tw_possible=model.possible_graded,
graded=True,
display_name=self.display_name,
module_id=self.location,
attempted=model.first_attempted is not None,
)
self.all_total = AggregatedScore(
tw_earned=model.earned_all,
tw_possible=model.possible_all,
graded=False,
display_name=self.display_name,
module_id=self.location,
attempted=model.first_attempted is not None,
)
self._log_event(log.debug, u"init_from_model", student)
......
"""
Progress Summary of a learner's course grades.
"""
from .new.course_grade import CourseGradeFactory
def summary(student, course):
"""
Returns the CourseGrade for the given course and student.
"""
return CourseGradeFactory(student).create(course)
......@@ -122,8 +122,6 @@ def get_score(submissions_scores, csm_scores, persisted_block, block):
weighted_possible,
weight,
graded,
display_name=display_name_with_default_escaped(block),
module_id=block.location,
attempted=attempted,
)
......
......@@ -182,4 +182,4 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
"""
Updates a saved course grade.
"""
CourseGradeFactory(user).update(course, course_structure)
CourseGradeFactory().update(user, course, course_structure)
......@@ -111,7 +111,7 @@ class TestCourseGradeFactory(GradeTestBase):
def test_course_grade_feature_gating(self, feature_flag, course_setting):
# Grades are only saved if the feature flag and the advanced setting are
# both set to True.
grade_factory = CourseGradeFactory(self.request.user)
grade_factory = CourseGradeFactory()
with persistent_grades_feature_flags(
global_flag=feature_flag,
enabled_for_all_courses=False,
......@@ -119,32 +119,32 @@ class TestCourseGradeFactory(GradeTestBase):
enabled_for_course=course_setting
):
with patch('lms.djangoapps.grades.new.course_grade.CourseGrade.load_persisted_grade') as mock_save_grades:
grade_factory.create(self.course)
grade_factory.create(self.request.user, self.course)
self.assertEqual(mock_save_grades.called, feature_flag and course_setting)
def test_course_grade_creation(self):
grade_factory = CourseGradeFactory(self.request.user)
grade_factory = CourseGradeFactory()
with mock_get_score(1, 2):
course_grade = grade_factory.create(self.course)
course_grade = grade_factory.create(self.request.user, self.course)
self.assertEqual(course_grade.letter_grade, u'Pass')
self.assertEqual(course_grade.percent, 0.5)
def test_zero_course_grade(self):
grade_factory = CourseGradeFactory(self.request.user)
grade_factory = CourseGradeFactory()
with mock_get_score(0, 2):
course_grade = grade_factory.create(self.course)
course_grade = grade_factory.create(self.request.user, self.course)
self.assertIsNone(course_grade.letter_grade)
self.assertEqual(course_grade.percent, 0.0)
def test_get_persisted(self):
grade_factory = CourseGradeFactory(self.request.user)
grade_factory = CourseGradeFactory()
# first, create a grade in the database
with mock_get_score(1, 2):
grade_factory.create(self.course, read_only=False)
grade_factory.create(self.request.user, self.course, read_only=False)
# retrieve the grade, ensuring it is as expected and take just one query
with self.assertNumQueries(1):
course_grade = grade_factory.get_persisted(self.course)
course_grade = grade_factory.get_persisted(self.request.user, self.course)
self.assertEqual(course_grade.letter_grade, u'Pass')
self.assertEqual(course_grade.percent, 0.5)
......@@ -168,7 +168,7 @@ class TestCourseGradeFactory(GradeTestBase):
# ensure the grade can still be retrieved via get_persisted
# despite its outdated grading policy
with self.assertNumQueries(1):
course_grade = grade_factory.get_persisted(self.course)
course_grade = grade_factory.get_persisted(self.request.user, self.course)
self.assertEqual(course_grade.letter_grade, u'Pass')
self.assertEqual(course_grade.percent, 0.5)
......@@ -587,7 +587,7 @@ class TestCourseGradeLogging(ProblemSubmissionTestMixin, SharedModuleStoreTestCa
Creates a course grade and asserts that the associated logging
matches the expected totals passed in to the function.
"""
factory.create(self.course, read_only=False)
factory.create(self.request.user, self.course, read_only=False)
log_mock.assert_called_with(
u"Persistent Grades: CourseGrade.{0}, course: {1}, user: {2}".format(
log_statement,
......@@ -597,7 +597,7 @@ class TestCourseGradeLogging(ProblemSubmissionTestMixin, SharedModuleStoreTestCa
)
def test_course_grade_logging(self):
grade_factory = CourseGradeFactory(self.request.user)
grade_factory = CourseGradeFactory()
with persistent_grades_feature_flags(
global_flag=True,
enabled_for_all_courses=False,
......
......@@ -175,9 +175,7 @@ class TestGetScore(TestCase):
self._create_persisted_block(persisted_block_value),
self._create_block(block_value),
)
expected_score = ProblemScore(
display_name=self.display_name, module_id=self.location, **expected_result._asdict()
)
expected_score = ProblemScore(**expected_result._asdict())
self.assertEquals(score, expected_score)
......
......@@ -9,13 +9,15 @@ from xmodule.graders import ProblemScore
@contextmanager
def mock_passing_grade(grade_pass='Pass', percent=0.75):
def mock_passing_grade(grade_pass='Pass', percent=0.75, ):
"""
Mock the grading function to always return a passing grade.
"""
with patch('lms.djangoapps.grades.course_grades.summary') as mock_grade:
mock_grade.return_value = {'grade': grade_pass, 'percent': percent}
yield
with patch('lms.djangoapps.grades.new.course_grade.CourseGrade._compute_letter_grade') as mock_letter_grade:
with patch('lms.djangoapps.grades.new.course_grade.CourseGrade._calc_percent') as mock_percent_grade:
mock_letter_grade.return_value = grade_pass
mock_percent_grade.return_value = percent
yield
@contextmanager
......@@ -31,8 +33,6 @@ def mock_get_score(earned=0, possible=1):
weighted_possible=possible,
weight=1,
graded=True,
display_name=None,
module_id=None,
attempted=True,
)
yield mock_score
......
......@@ -14,7 +14,7 @@ from opaque_keys.edx.keys import CourseKey
from edxmako.shortcuts import render_to_response
from courseware.courses import get_course_with_access
from lms.djangoapps.instructor.views.api import require_level
from lms.djangoapps.grades import course_grades
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from xmodule.modulestore.django import modulestore
......@@ -91,7 +91,7 @@ def get_grade_book_page(request, course, course_key):
'username': student.username,
'id': student.id,
'email': student.email,
'grade_summary': course_grades.summary(student, course)
'grade_summary': CourseGradeFactory().create(student, course).summary
}
for student in enrolled_students
]
......
......@@ -491,14 +491,14 @@ def dump_grading_context(course):
msg += hbar
msg += "Listing grading context for course %s\n" % course.id.to_deprecated_string()
gcontext = grading_context_for_course(course)
gcontext = grading_context_for_course(course.id)
msg += "graded sections:\n"
msg += '%s\n' % gcontext['all_graded_sections'].keys()
for (gsomething, gsvals) in gcontext['all_graded_sections'].items():
msg += '%s\n' % gcontext['all_graded_subsections_by_type'].keys()
for (gsomething, gsvals) in gcontext['all_graded_subsections_by_type'].items():
msg += "--> Section %s:\n" % (gsomething)
for sec in gsvals:
sdesc = sec['section_block']
sdesc = sec['subsection_block']
frmat = getattr(sdesc, 'format', None)
aname = ''
if frmat in graders:
......
......@@ -134,9 +134,9 @@ class TestRescoringTask(TestIntegrationTask):
# are in sync.
expected_subsection_grade = expected_score
course_grade = CourseGradeFactory(user).create(self.course)
course_grade = CourseGradeFactory().create(user, self.course)
self.assertEquals(
course_grade.subsection_grade_totals_by_format['Homework'][0].earned,
course_grade.graded_subsections_by_format['Homework'][0].graded_total.earned,
expected_subsection_grade,
)
......
......@@ -7,7 +7,7 @@ from django.contrib.auth.models import User
from django.dispatch import receiver
import logging
from lms.djangoapps.grades import progress
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from lms.djangoapps.grades.signals.signals import PROBLEM_WEIGHTED_SCORE_CHANGED
from lms import CELERY_APP
from lti_provider.models import GradedAssignment
......@@ -109,8 +109,8 @@ def send_composite_outcome(user_id, course_id, assignment_id, version):
mapped_usage_key = assignment.usage_key.map_into_course(course_key)
user = User.objects.get(id=user_id)
course = modulestore().get_course(course_key, depth=0)
progress_summary = progress.summary(user, course)
earned, possible = progress_summary.score_for_module(mapped_usage_key)
course_grade = CourseGradeFactory().create(user, course)
earned, possible = course_grade.score_for_module(mapped_usage_key)
if possible == 0:
weighted_score = 0
else:
......
......@@ -99,9 +99,9 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
block_type='problem',
block_id='problem',
)
self.weighted_scores = MagicMock()
self.weighted_scores_mock = self.setup_patch(
'lti_provider.tasks.progress.summary', self.weighted_scores
self.course_grade = MagicMock()
self.course_grade_mock = self.setup_patch(
'lti_provider.tasks.CourseGradeFactory.create', self.course_grade
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
......@@ -117,7 +117,7 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible))
self.course_grade.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
......@@ -129,4 +129,4 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.assertEqual(self.weighted_scores_mock.call_count, 0)
self.assertEqual(self.course_grade_mock.call_count, 0)
......@@ -10,7 +10,7 @@ from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import SignalHandler
from openedx.core.djangoapps.credit.verification_access import update_verification_partitions
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED
log = logging.getLogger(__name__)
......@@ -52,14 +52,14 @@ def on_pre_publish(sender, course_key, **kwargs): # pylint: disable=unused-argu
log.info(u"Finished updating in-course reverification access rules")
@receiver(GRADES_UPDATED)
def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadline, **kwargs): # pylint: disable=unused-argument
@receiver(COURSE_GRADE_CHANGED)
def listen_for_grade_calculation(sender, user, course_grade, course_key, deadline, **kwargs): # pylint: disable=unused-argument
"""Receive 'MIN_GRADE_REQUIREMENT_STATUS' signal and update minimum grade requirement status.
Args:
sender: None
user(User): User Model object
grade_summary(dict): Dict containing output from the course grader
course_grade(CourseGrade): CourseGrade object
course_key(CourseKey): The key for the course
deadline(datetime): Course end date or None
......@@ -78,7 +78,7 @@ def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadli
criteria = requirements[0].get('criteria')
if criteria:
min_grade = criteria.get('min_grade')
passing_grade = grade_summary['percent'] >= min_grade
passing_grade = course_grade.percent >= min_grade
now = timezone.now()
status = None
reason = None
......@@ -89,7 +89,7 @@ def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadli
if passing_grade:
# Student received a passing grade
status = 'satisfied'
reason = {'final_grade': grade_summary['percent']}
reason = {'final_grade': course_grade.percent}
else:
# Submission after deadline
......@@ -104,7 +104,7 @@ def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadli
# Student failed to receive minimum grade
status = 'failed'
reason = {
'final_grade': grade_summary['percent'],
'final_grade': course_grade.percent,
'minimum_grade': min_grade
}
......
......@@ -5,6 +5,7 @@ Tests for minimum grade requirement status
import ddt
import pytz
from datetime import timedelta, datetime
from mock import MagicMock
from unittest import skipUnless
from django.conf import settings
......@@ -73,7 +74,9 @@ class TestMinGradedRequirementStatus(ModuleStoreTestCase):
def assert_requirement_status(self, grade, due_date, expected_status):
""" Verify the user's credit requirement status is as expected after simulating a grading calculation. """
listen_for_grade_calculation(None, self.user, {'percent': grade}, self.course.id, due_date)
course_grade = MagicMock()
course_grade.percent = grade
listen_for_grade_calculation(None, self.user, course_grade, self.course.id, due_date)
req_status = get_credit_requirement_status(self.course.id, self.request.user.username, 'grade', 'grade')
self.assertEqual(req_status[0]['status'], expected_status)
......
......@@ -5,8 +5,8 @@ This module contains all signals.
from django.dispatch import Signal
# Signal that fires when a user is graded (in lms/grades/course_grades.py)
GRADES_UPDATED = Signal(providing_args=["user", "grade_summary", "course_key", "deadline"])
# Signal that fires when a user is graded
COURSE_GRADE_CHANGED = Signal(providing_args=["user", "course_grade", "course_key", "deadline"])
# Signal that fires when a user is awarded a certificate in a course (in the certificates django app)
# TODO: runtime coupling between apps will be reduced if this event is changed to carry a username
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment