Commit aca69f78 by Nimisha Asthagiri

Update Subsection Grade Report to differentiate Not Attempted and 0

TNL-5990
parent 3bb5617a
......@@ -10,7 +10,7 @@ class CorrectMap(object):
in a capa problem. The response evaluation result for each answer_id includes
(correctness, npoints, msg, hint, hintmode).
- correctness : 'correct', 'incorrect', or 'partially-correct'
- correctness : 'correct', 'incorrect', 'partially-correct', or 'incomplete'
- npoints : None, or integer specifying number of points awarded for this answer_id
- msg : string (may have HTML) giving extra message response
(displayed below textline or textbox)
......
......@@ -5,6 +5,7 @@ Code used to calculate learner grades.
from __future__ import division
import abc
from collections import OrderedDict
import inspect
import logging
import random
......@@ -174,10 +175,10 @@ class CourseGrader(object):
- section_breakdown: This is a list of dictionaries which provide details on sections
that were graded. These are used for display in a graph or chart. The format for a
section_breakdown dictionary is explained below.
- grade_breakdown: This is a list of dictionaries which provide details on the contributions
of the final percentage grade. This is a higher level breakdown, for when the grade is constructed
of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for
a grade_breakdown is explained below. This section is optional.
- grade_breakdown: This is a dict of dictionaries, keyed by category, which provide details on
the contributions of the final percentage grade. This is a higher level breakdown, for when the
grade is constructed of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final).
The format for a grade_breakdown is explained below. This section is optional.
A dictionary in the section_breakdown list has the following keys:
percent: A float percentage for the section.
......@@ -188,7 +189,7 @@ class CourseGrader(object):
prominent: A boolean value indicating that this section should be displayed as more prominent
than other items.
A dictionary in the grade_breakdown list has the following keys:
A dictionary in the grade_breakdown dict has the following keys:
percent: A float percentage in the breakdown. All percents should add up to the final percentage.
detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%"
category: A string identifying the category. Items with the same category are grouped together
......@@ -221,27 +222,33 @@ class WeightedSubsectionsGrader(CourseGrader):
a value > 1, the student may end up with a percent > 100%. This allows for sections that
are extra credit.
"""
def __init__(self, sections):
self.sections = sections
def __init__(self, subgraders):
self.subgraders = subgraders
def grade(self, grade_sheet, generate_random_scores=False):
total_percent = 0.0
section_breakdown = []
grade_breakdown = []
grade_breakdown = OrderedDict()
for subgrader, category, weight in self.sections:
for subgrader, assignment_type, weight in self.subgraders:
subgrade_result = subgrader.grade(grade_sheet, generate_random_scores)
weighted_percent = subgrade_result['percent'] * weight
section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(category, weighted_percent, weight)
section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(assignment_type, weighted_percent, weight)
total_percent += weighted_percent
section_breakdown += subgrade_result['section_breakdown']
grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category})
grade_breakdown[assignment_type] = {
'percent': weighted_percent,
'detail': section_detail,
'category': assignment_type,
}
return {'percent': total_percent,
'section_breakdown': section_breakdown,
'grade_breakdown': grade_breakdown}
return {
'percent': total_percent,
'section_breakdown': section_breakdown,
'grade_breakdown': grade_breakdown
}
class AssignmentFormatGrader(CourseGrader):
......@@ -302,9 +309,12 @@ class AssignmentFormatGrader(CourseGrader):
def grade(self, grade_sheet, generate_random_scores=False):
def total_with_drops(breakdown, drop_count):
'''calculates total score for a section while dropping lowest scores'''
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
"""
Calculates total score for a section while dropping lowest scores
"""
# Create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent'])
# A list of the indices of the dropped scores
dropped_indices = []
if drop_count > 0:
......@@ -319,8 +329,7 @@ class AssignmentFormatGrader(CourseGrader):
return aggregate_score, dropped_indices
#Figure the homework scores
scores = grade_sheet.get(self.type, [])
scores = grade_sheet.get(self.type, {}).values()
breakdown = []
for i in range(max(self.min_count, len(scores))):
if i < len(scores) or generate_random_scores:
......
......@@ -76,9 +76,9 @@ class GraderTest(unittest.TestCase):
}
incomplete_gradesheet = {
'Homework': [],
'Lab': [],
'Midterm': [],
'Homework': {},
'Lab': {},
'Midterm': {},
}
class MockGrade(object):
......@@ -91,25 +91,31 @@ class GraderTest(unittest.TestCase):
common_fields = dict(graded=True, attempted=True)
test_gradesheet = {
'Homework': [
MockGrade(AggregatedScore(tw_earned=2, tw_possible=20.0, **common_fields), display_name='hw1'),
MockGrade(AggregatedScore(tw_earned=16, tw_possible=16.0, **common_fields), display_name='hw2'),
],
'Homework': {
'hw1': MockGrade(AggregatedScore(tw_earned=2, tw_possible=20.0, **common_fields), display_name='hw1'),
'hw2': MockGrade(AggregatedScore(tw_earned=16, tw_possible=16.0, **common_fields), display_name='hw2'),
},
# The dropped scores should be from the assignments that don't exist yet
'Lab': [
MockGrade(AggregatedScore(tw_earned=1, tw_possible=2.0, **common_fields), display_name='lab1'), # Dropped
MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab2'),
MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab3'),
MockGrade(AggregatedScore(tw_earned=5, tw_possible=25.0, **common_fields), display_name='lab4'), # Dropped
MockGrade(AggregatedScore(tw_earned=3, tw_possible=4.0, **common_fields), display_name='lab5'), # Dropped
MockGrade(AggregatedScore(tw_earned=6, tw_possible=7.0, **common_fields), display_name='lab6'),
MockGrade(AggregatedScore(tw_earned=5, tw_possible=6.0, **common_fields), display_name='lab7'),
],
'Midterm': [
MockGrade(AggregatedScore(tw_earned=50.5, tw_possible=100, **common_fields), display_name="Midterm Exam"),
],
'Lab': {
# Dropped
'lab1': MockGrade(AggregatedScore(tw_earned=1, tw_possible=2.0, **common_fields), display_name='lab1'),
'lab2': MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab2'),
'lab3': MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab3'),
# Dropped
'lab4': MockGrade(AggregatedScore(tw_earned=5, tw_possible=25.0, **common_fields), display_name='lab4'),
# Dropped
'lab5': MockGrade(AggregatedScore(tw_earned=3, tw_possible=4.0, **common_fields), display_name='lab5'),
'lab6': MockGrade(AggregatedScore(tw_earned=6, tw_possible=7.0, **common_fields), display_name='lab6'),
'lab7': MockGrade(AggregatedScore(tw_earned=5, tw_possible=6.0, **common_fields), display_name='lab7'),
},
'Midterm': {
'midterm': MockGrade(
AggregatedScore(tw_earned=50.5, tw_possible=100, **common_fields),
display_name="Midterm Exam",
),
},
}
def test_assignment_format_grader(self):
......
......@@ -1174,11 +1174,8 @@ class TestCCXGrades(FieldOverrideTestMixin, SharedModuleStoreTestCase, LoginEnro
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
student_info = response.mako_context['students'][0] # pylint: disable=no-member
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
self.assertEqual(
student_info['grade_summary']['grade_breakdown'][0]['percent'],
0.5)
self.assertEqual(
len(student_info['grade_summary']['section_breakdown']), 4)
self.assertEqual(student_info['grade_summary']['grade_breakdown'].values()[0]['percent'], 0.5)
self.assertEqual(len(student_info['grade_summary']['section_breakdown']), 4)
def test_grades_csv(self):
self.course.enable_ccx = True
......@@ -1223,7 +1220,7 @@ class TestCCXGrades(FieldOverrideTestMixin, SharedModuleStoreTestCase, LoginEnro
self.assertEqual(response.status_code, 200)
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
self.assertEqual(grades['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'].values()[0]['percent'], 0.5)
self.assertEqual(len(grades['section_breakdown']), 4)
......
......@@ -288,7 +288,9 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.graded_total.earned for s in self.get_course_grade().graded_subsections_by_format['Homework']]
return [
s.graded_total.earned for s in self.get_course_grade().graded_subsections_by_format['Homework'].itervalues()
]
def hw_grade(self, hw_url_name):
"""
......
......@@ -1235,7 +1235,7 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
@patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}),
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}),
)
def test_view_certificate_link(self):
"""
......@@ -1294,7 +1294,7 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
@patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
)
def test_view_certificate_link_hidden(self):
"""
......@@ -1341,7 +1341,7 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
)
@ddt.data(
*itertools.product(
......@@ -1381,7 +1381,7 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
@patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
)
def test_page_with_invalidated_certificate_with_html_view(self):
"""
......@@ -1415,7 +1415,7 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
)
def test_page_with_invalidated_certificate_with_pdf(self):
"""
......@@ -1432,7 +1432,7 @@ class ProgressPageTests(ModuleStoreTestCase):
@patch(
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
)
def test_message_for_audit_mode(self):
""" Verify that message appears on progress page, if learner is enrolled
......
......@@ -18,7 +18,7 @@ def grading_context_for_course(course_key):
def grading_context(course_structure):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
a student.
The grading context has two keys:
all_graded_subsections_by_type - This contains all subsections that are
......@@ -27,7 +27,7 @@ def grading_context(course_structure):
The values are arrays of dictionaries containing
"subsection_block" : The subsection block
"scored_descendants" : An array of usage keys for blocks
could possibly be in the subsection, for any student
that could possibly be in the subsection, for any student
all_graded_blocks - This contains a list of all blocks that can
affect grading a student. This is used to efficiently fetch
......
......@@ -2,8 +2,7 @@
CourseGrade Class
"""
from collections import defaultdict
from collections import namedtuple
from collections import defaultdict, namedtuple, OrderedDict
from logging import getLogger
from django.conf import settings
......@@ -44,13 +43,13 @@ class CourseGrade(object):
Returns grades for the subsections in the course in
a dict keyed by subsection format types.
"""
subsections_by_format = defaultdict(list)
subsections_by_format = defaultdict(OrderedDict)
for chapter in self.chapter_grades:
for subsection_grade in chapter['sections']:
if subsection_grade.graded:
graded_total = subsection_grade.graded_total
if graded_total.possible > 0:
subsections_by_format[subsection_grade.format].append(subsection_grade)
subsections_by_format[subsection_grade.format][subsection_grade.location] = subsection_grade
return subsections_by_format
@lazy
......
......@@ -483,7 +483,7 @@ def dump_grading_context(course):
if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):
msg += '\n'
msg += "Graded sections:\n"
for subgrader, category, weight in course.grader.sections:
for subgrader, category, weight in course.grader.subgraders:
msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\
% (subgrader.__class__, subgrader.type, category, weight)
subgrader.index = 1
......
......@@ -2,6 +2,7 @@
Base test classes for LMS instructor-initiated background tasks
"""
# pylint: disable=attribute-defined-outside-init
import os
import json
from mock import Mock, patch
......@@ -12,7 +13,6 @@ from uuid import uuid4
from celery.states import SUCCESS, FAILURE
from django.core.urlresolvers import reverse
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from capa.tests.response_xml_factory import OptionResponseXMLFactory
......@@ -37,7 +37,8 @@ TEST_COURSE_ORG = 'edx'
TEST_COURSE_NAME = 'test_course'
TEST_COURSE_NUMBER = '1.23x'
TEST_COURSE_KEY = SlashSeparatedCourseKey(TEST_COURSE_ORG, TEST_COURSE_NUMBER, TEST_COURSE_NAME)
TEST_SECTION_NAME = "Problem"
TEST_CHAPTER_NAME = "Section"
TEST_SECTION_NAME = "Subsection"
TEST_FAILURE_MESSAGE = 'task failed horribly'
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
......@@ -135,14 +136,18 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase)
Add a chapter and a sequential to the current course.
"""
# Add a chapter to the course
chapter = ItemFactory.create(parent_location=self.course.location,
display_name=TEST_SECTION_NAME)
self.chapter = ItemFactory.create(
parent_location=self.course.location,
display_name=TEST_CHAPTER_NAME,
)
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name=TEST_SECTION_NAME)
self.problem_section = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name=TEST_SECTION_NAME,
)
@staticmethod
def get_user_email(username):
......@@ -335,10 +340,10 @@ class TestReportMixin(object):
file_index (int): Describes which report store file to
open. Files are ordered by last modified date, and 0
corresponds to the most recently modified file.
verify_order (boolean): When True, we verify that both the
content and order of `expected_rows` matches the
actual csv rows. When False (default), we only verify
that the content matches.
verify_order (boolean): When True (default), we verify that
both the content and order of `expected_rows` matches
the actual csv rows. When False, we only verify that
the content matches.
ignore_other_columns (boolean): When True, we verify that `expected_rows`
contain data which is the subset of actual csv rows.
"""
......
......@@ -136,7 +136,7 @@ class TestRescoringTask(TestIntegrationTask):
course_grade = CourseGradeFactory().create(user, self.course)
self.assertEquals(
course_grade.graded_subsections_by_format['Homework'][0].graded_total.earned,
course_grade.graded_subsections_by_format['Homework'][self.problem_section.location].graded_total.earned,
expected_subsection_grade,
)
......@@ -574,13 +574,13 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
self.verify_rows_in_csv(
[
merge_dicts(
{'id': str(student.id), 'username': student.username, 'email': student.email},
{'Student ID': str(student.id), 'Username': student.username, 'Email': student.email},
grades,
user_partition_group(student)
)
for student_grades in students_grades for student, grades in student_grades.iteritems()
],
ignore_other_columns=ignore_other_columns
ignore_other_columns=ignore_other_columns,
)
def test_both_groups_problems(self):
......@@ -604,10 +604,20 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
self.verify_csv_task_success(result)
self.verify_grades_in_csv(
[
{self.student_a: {'grade': '1.0', 'HW': '1.0'}},
{self.student_b: {'grade': '0.5', 'HW': '0.5'}}
{
self.student_a: {
u'Grade': '1.0',
u'Homework': '1.0',
}
},
{
self.student_b: {
u'Grade': '0.5',
u'Homework': '0.5',
}
},
],
ignore_other_columns=True
ignore_other_columns=True,
)
def test_one_group_problem(self):
......@@ -627,8 +637,18 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
self.verify_csv_task_success(result)
self.verify_grades_in_csv(
[
{self.student_a: {'grade': '1.0', 'HW': '1.0'}},
{self.student_b: {'grade': '0.0', 'HW': '0.0'}}
{
self.student_a: {
u'Grade': '1.0',
u'Homework': '1.0',
},
},
{
self.student_b: {
u'Grade': '0.0',
u'Homework': u'Not Accessible',
}
},
],
ignore_other_columns=True
)
......@@ -590,7 +590,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
# technically possible in openedx.
self.student_1 = self.create_student(u'üser_1')
self.student_2 = self.create_student(u'üser_2')
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Grade']
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
def test_no_problems(self, _get_current_task):
......@@ -624,7 +624,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1'])
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
problem_name = u'Homework 1: Problem - Problem1'
problem_name = u'Homework 1: Subsection - Problem1'
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
......@@ -642,7 +642,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
unicode(self.student_2.id),
self.student_2.email,
self.student_2.username,
'0.0', 'Not Attempted', '2.0',
'0.0', u'Not Attempted', '2.0',
]
))
])
......@@ -714,8 +714,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - problem_a_url', u'Homework 1: Problem - problem_b_url']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
problem_names = [u'Homework 1: Subsection - problem_a_url', u'Homework 1: Subsection - problem_b_url']
header_row = [u'Student ID', u'Email', u'Username', u'Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
......@@ -796,7 +796,7 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
title = 'Homework %d 1: Problem section %d - %s' % (i, i, problem_url)
problem_names.append(title)
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
header_row = [u'Student ID', u'Email', u'Username', u'Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
......@@ -861,8 +861,8 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - Problem0', u'Homework 1: Problem - Problem1']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
problem_names = [u'Homework 1: Subsection - Problem0', u'Homework 1: Subsection - Problem1']
header_row = [u'Student ID', u'Email', u'Username', u'Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
......@@ -1507,6 +1507,90 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
)
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that grade report has correct grade values.
"""
def setUp(self):
super(TestGradeReport, self).setUp()
self.create_course()
self.student = self.create_student(u'üser_1')
def create_course(self):
"""
Creates a course with various subsections for testing
"""
self.course = CourseFactory.create(
grading_policy={
"GRADER": [
{
"type": "Homework",
"min_count": 4,
"drop_count": 0,
"weight": 1.0
},
],
},
)
self.chapter = ItemFactory.create(parent=self.course, category='chapter')
self.problem_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name='Subsection'
)
self.define_option_problem(u'Problem1', parent=self.problem_section, num_responses=1)
self.hidden_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
visible_to_staff_only=True,
display_name='Hidden',
)
self.define_option_problem(u'Problem2', parent=self.hidden_section)
self.unattempted_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name='Unattempted',
)
self.define_option_problem(u'Problem3', parent=self.unattempted_section)
self.empty_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name='Empty',
)
def test_grade_report(self):
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 1, 'succeeded': 1, 'failed': 0},
result,
)
self.verify_rows_in_csv(
[
{
u'Student ID': unicode(self.student.id),
u'Email': self.student.email,
u'Username': self.student.username,
u'Grade': '0.13',
u'Homework 1: Subsection': '0.5',
u'Homework 2: Hidden': u'Not Accessible',
u'Homework 3: Unattempted': u'Not Attempted',
u'Homework 4: Empty': u'Not Accessible',
u'Homework (Avg)': '0.125',
},
],
ignore_other_columns=True,
)
@ddt.ddt
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase):
......
......@@ -100,7 +100,7 @@ $(function () {
extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[]
if show_grade_breakdown:
for section in grade_summary['grade_breakdown']:
for section in grade_summary['grade_breakdown'].itervalues():
if section['percent'] > 0:
if section['category'] in categories:
color = categories[ section['category'] ]['color']
......
......@@ -104,7 +104,7 @@ class TestConditionalContent(ModuleStoreTestCase):
"""
Construct a course with graded problems that exist within a split test.
"""
TEST_SECTION_NAME = 'Problem'
TEST_SECTION_NAME = 'Subsection'
def setUp(self):
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment