Commit 382cd8bb by pmitros

Merge pull request #18 from MITx/grade-refactor

Grade refactor
parents 969e001a 3b882b01
"""
Course settings module. The settings are based of django.conf. All settings in
courseware.global_course_settings are first applied, and then any settings
in the settings.DATA_DIR/course_settings.py are applied. A setting must be
in ALL_CAPS.
Settings are used by calling
from courseware import course_settings
Note that courseware.course_settings is not a module -- it's an object. So
importing individual settings is not possible:
from courseware.course_settings import GRADER # This won't work.
"""
import imp
import logging
import sys
import types
from django.conf import settings
from courseware import global_course_settings
from courseware import graders
_log = logging.getLogger("mitx.courseware")
class Settings(object):
def __init__(self):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_course_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_course_settings, setting))
data_dir = settings.DATA_DIR
fp = None
try:
fp, pathname, description = imp.find_module("course_settings", [data_dir])
mod = imp.load_module("course_settings", fp, pathname, description)
except Exception as e:
_log.exception("Unable to import course settings file from " + data_dir + ". Error: " + str(e))
mod = types.ModuleType('course_settings')
finally:
if fp:
fp.close()
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
setattr(self, setting, setting_value)
# Here is where we should parse any configurations, so that we can fail early
self.GRADER = graders.grader_from_conf(self.GRADER)
course_settings = Settings()
\ No newline at end of file
GRADER = [
{
'type' : "Homework",
'min_count' : 12,
'drop_count' : 2,
'short_label' : "HW",
'weight' : 0.15,
},
{
'type' : "Lab",
'min_count' : 12,
'drop_count' : 2,
'category' : "Labs",
'weight' : 0.15
},
{
'type' : "Midterm",
'name' : "Midterm Exam",
'short_label' : "Midterm",
'weight' : 0.3,
},
{
'type' : "Final",
'name' : "Final Exam",
'short_label' : "Final",
'weight' : 0.4,
}
]
import abc
import logging
from django.conf import settings
from collections import namedtuple
log = logging.getLogger("mitx.courseware")
# This is a tuple for holding scores, either from problems or sections.
# Section either indicates the name of the problem or the name of the section
Score = namedtuple("Score", "earned possible graded section")
def grader_from_conf(conf):
"""
This creates a CourseGrader from a configuration (such as in course_settings.py).
The conf can simply be an instance of CourseGrader, in which case no work is done.
More commonly, the conf is a list of dictionaries. A WeightedSubsectionsGrader
with AssignmentFormatGrader's or SingleSectionGrader's as subsections will be
generated. Every dictionary should contain the parameters for making either a
AssignmentFormatGrader or SingleSectionGrader, in addition to a 'weight' key.
"""
if isinstance(conf, CourseGrader):
return conf
subgraders = []
for subgraderconf in conf:
subgraderconf = subgraderconf.copy()
weight = subgraderconf.pop("weight", 0)
try:
if 'min_count' in subgraderconf:
#This is an AssignmentFormatGrader
subgrader = AssignmentFormatGrader(**subgraderconf)
subgraders.append( (subgrader, subgrader.category, weight) )
elif 'name' in subgraderconf:
#This is an SingleSectionGrader
subgrader = SingleSectionGrader(**subgraderconf)
subgraders.append( (subgrader, subgrader.category, weight) )
else:
raise ValueError("Configuration has no appropriate grader class.")
except (TypeError, ValueError) as error:
errorString = "Unable to parse grader configuration:\n " + str(subgraderconf) + "\n Error was:\n " + str(error)
log.critical(errorString)
raise ValueError(errorString)
return WeightedSubsectionsGrader( subgraders )
class CourseGrader(object):
"""
A course grader takes the totaled scores for each graded section (that a student has
started) in the course. From these scores, the grader calculates an overall percentage
grade. The grader should also generate information about how that score was calculated,
to be displayed in graphs or charts.
A grader has one required method, grade(), which is passed a grade_sheet. The grade_sheet
contains scores for all graded section that the student has started. If a student has
a score of 0 for that section, it may be missing from the grade_sheet. The grade_sheet
is keyed by section format. Each value is a list of Score namedtuples for each section
that has the matching section format.
The grader outputs a dictionary with the following keys:
- percent: Contaisn a float value, which is the final percentage score for the student.
- section_breakdown: This is a list of dictionaries which provide details on sections
that were graded. These are used for display in a graph or chart. The format for a
section_breakdown dictionary is explained below.
- grade_breakdown: This is a list of dictionaries which provide details on the contributions
of the final percentage grade. This is a higher level breakdown, for when the grade is constructed
of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for
a grade_breakdown is explained below. This section is optional.
A dictionary in the section_breakdown list has the following keys:
percent: A float percentage for the section.
label: A short string identifying the section. Preferably fixed-length. E.g. "HW 3".
detail: A string explanation of the score. E.g. "Homework 1 - Ohms Law - 83% (5/6)"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
prominent: A boolean value indicating that this section should be displayed as more prominent
than other items.
A dictionary in the grade_breakdown list has the following keys:
percent: A float percentage in the breakdown. All percents should add up to the final percentage.
detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def grade(self, grade_sheet):
raise NotImplementedError
class WeightedSubsectionsGrader(CourseGrader):
"""
This grader takes a list of tuples containing (grader, category_name, weight) and computes
a final grade by totalling the contribution of each sub grader and multiplying it by the
given weight. For example, the sections may be
[ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30), (finalGrader, "Final", 0.40) ]
All items in section_breakdown for each subgrader will be combined. A grade_breakdown will be
composed using the score from each grader.
Note that the sum of the weights is not take into consideration. If the weights add up to
a value > 1, the student may end up with a percent > 100%. This allows for sections that
are extra credit.
"""
def __init__(self, sections):
self.sections = sections
def grade(self, grade_sheet):
total_percent = 0.0
section_breakdown = []
grade_breakdown = []
for subgrader, category, weight in self.sections:
subgrade_result = subgrader.grade(grade_sheet)
weightedPercent = subgrade_result['percent'] * weight
section_detail = "{0} = {1:.1%} of a possible {2:.0%}".format(category, weightedPercent, weight)
total_percent += weightedPercent
section_breakdown += subgrade_result['section_breakdown']
grade_breakdown.append( {'percent' : weightedPercent, 'detail' : section_detail, 'category' : category} )
return {'percent' : total_percent,
'section_breakdown' : section_breakdown,
'grade_breakdown' : grade_breakdown}
class SingleSectionGrader(CourseGrader):
"""
This grades a single section with the format 'type' and the name 'name'.
If the name is not appropriate for the short short_label or category, they each may
be specified individually.
"""
def __init__(self, type, name, short_label = None, category = None):
self.type = type
self.name = name
self.short_label = short_label or name
self.category = category or name
def grade(self, grade_sheet):
foundScore = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
foundScore = score
break
if foundScore:
percent = foundScore.earned / float(foundScore.possible)
detail = "{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format( name = self.name,
percent = percent,
earned = float(foundScore.earned),
possible = float(foundScore.possible))
else:
percent = 0.0
detail = "{name} - 0% (?/?)".format(name = self.name)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(50, 100)
points_earned = random.randrange(40, points_possible)
percent = points_earned / float(points_possible)
detail = "{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format( name = self.name,
percent = percent,
earned = float(points_earned),
possible = float(points_possible))
breakdown = [{'percent': percent, 'label': self.short_label, 'detail': detail, 'category': self.category, 'prominent': True}]
return {'percent' : percent,
'section_breakdown' : breakdown,
#No grade_breakdown here
}
class AssignmentFormatGrader(CourseGrader):
"""
Grades all sections matching the format 'type' with an equal weight. A specified
number of lowest scores can be dropped from the calculation. The minimum number of
sections in this format must be specified (even if those sections haven't been
written yet).
min_count defines how many assignments are expected throughout the course. Placeholder
scores (of 0) will be inserted if the number of matching sections in the course is < min_count.
If there number of matching sections in the course is > min_count, min_count will be ignored.
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
section_type is a string that is the type of a singular section. For example, for Labs it
would be "Lab". This defaults to be the same as category.
short_label is similar to section_type, but shorter. For example, for Homework it would be
"HW".
"""
def __init__(self, type, min_count, drop_count, category = None, section_type = None, short_label = None):
self.type = type
self.min_count = min_count
self.drop_count = drop_count
self.category = category or self.type
self.section_type = section_type or self.type
self.short_label = short_label or self.type
def grade(self, grade_sheet):
def totalWithDrops(breakdown, drop_count):
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown = sorted( enumerate(breakdown), key=lambda x: -x[1]['percent'] )
# A list of the indices of the dropped scores
dropped_indices = []
if drop_count > 0:
dropped_indices = [x[0] for x in sorted_breakdown[-drop_count:]]
aggregate_score = 0
for index, mark in enumerate(breakdown):
if index not in dropped_indices:
aggregate_score += mark['percent']
if (len(breakdown) - drop_count > 0):
aggregate_score /= len(breakdown) - drop_count
return aggregate_score, dropped_indices
#Figure the homework scores
scores = grade_sheet.get(self.type, [])
breakdown = []
for i in range( max(self.min_count, len(scores)) ):
if i < len(scores):
percentage = scores[i].earned / float(scores[i].possible)
summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index = i+1,
section_type = self.section_type,
name = scores[i].section,
percent = percentage,
earned = float(scores[i].earned),
possible = float(scores[i].possible) )
else:
percentage = 0
summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index = i+1, section_type = self.section_type)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(10, 50)
points_earned = random.randrange(5, points_possible)
percentage = points_earned / float(points_possible)
summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index = i+1,
section_type = self.section_type,
name = "Randomly Generated",
percent = percentage,
earned = float(points_earned),
possible = float(points_possible) )
short_label = "{short_label} {index:02d}".format(index = i+1, short_label = self.short_label)
breakdown.append( {'percent': percentage, 'label': short_label, 'detail': summary, 'category': self.category} )
total_percent, dropped_indices = totalWithDrops(breakdown, self.drop_count)
for dropped_index in dropped_indices:
breakdown[dropped_index]['mark'] = {'detail': "The lowest {drop_count} {section_type} scores are dropped.".format(drop_count = self.drop_count, section_type=self.section_type) }
total_detail = "{section_type} Average = {percent:.0%}".format(percent = total_percent, section_type = self.section_type)
total_label = "{short_label} Avg".format(short_label = self.short_label)
breakdown.append( {'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True} )
return {'percent' : total_percent,
'section_breakdown' : breakdown,
#No grade_breakdown here
}
import courseware.content_parser as content_parser
import courseware.modules
import logging
from lxml import etree
import random
import urllib
from collections import namedtuple
from django.conf import settings
from lxml import etree
from models import StudentModule
from student.models import UserProfile
log = logging.getLogger("mitx.courseware")
Score = namedtuple("Score", "earned possible weight graded section")
def get_grade(user, problem, cache):
## HACK: assumes max score is fixed per problem
id = problem.get('id')
correct = 0
# If the ID is not in the cache, add the item
if id not in cache:
module = StudentModule(module_type = 'problem', # TODO: Move into StudentModule.__init__?
module_id = id,
student = user,
state = None,
grade = 0,
max_grade = None,
done = 'i')
cache[id] = module
# Grab the # correct from cache
if id in cache:
response = cache[id]
if response.grade!=None:
correct=response.grade
# Grab max grade from cache, or if it doesn't exist, compute and save to DB
if id in cache and response.max_grade != None:
total = response.max_grade
else:
total=courseware.modules.capa_module.Module(etree.tostring(problem), "id").max_score()
response.max_grade = total
response.save()
return (correct, total)
from courseware import course_settings
import courseware.content_parser as content_parser
from courseware.graders import Score
import courseware.modules
from models import StudentModule
def grade_sheet(student):
"""
......@@ -54,9 +17,7 @@ def grade_sheet(student):
each containing an array of sections, each containing an array of scores. This contains information for graded and ungraded
problems, and is good for displaying a course summary with due dates, etc.
- grade_summary is a summary of how the final grade breaks down. It is an array of "sections". Each section can either be
a conglomerate of scores (like labs or homeworks) which has subscores and a totalscore, or a section can be all from one assignment
(such as a midterm or final) and only has a totalscore. Each section has a weight that shows how it contributes to the total grade.
- grade_summary is the output from the course grader. More information on the format is in the docstring for CourseGrader.
"""
dom=content_parser.course_file(student)
course = dom.xpath('//course/@name')[0]
......@@ -68,7 +29,6 @@ def grade_sheet(student):
response_by_id[response.module_id] = response
totaled_scores = {}
chapters=[]
for c in xmlChapters:
......@@ -85,33 +45,29 @@ def grade_sheet(student):
scores=[]
if len(problems)>0:
for p in problems:
(correct,total) = get_grade(student, p, response_by_id)
# id = p.get('id')
# correct = 0
# if id in response_by_id:
# response = response_by_id[id]
# if response.grade!=None:
# correct=response.grade
# total=courseware.modules.capa_module.Module(etree.tostring(p), "id").max_score() # TODO: Add state. Not useful now, but maybe someday problems will have randomized max scores?
# print correct, total
(correct,total) = get_score(student, p, response_by_id)
if settings.GENERATE_PROFILE_SCORES:
if total > 1:
correct = random.randrange( max(total-2, 1) , total + 1 )
else:
correct = total
scores.append( Score(int(correct),total, float(p.get("weight", total)), graded, p.get("name")) )
if not total > 0:
#We simply cannot grade a problem that is 12/0, because we might need it as a percentage
graded = False
scores.append( Score(correct,total, graded, p.get("name")) )
section_total, graded_total = aggregate_scores(scores)
section_total, graded_total = aggregate_scores(scores, s.get("name"))
#Add the graded total to totaled_scores
format = s.get('format') if s.get('format') else ""
subtitle = s.get('subtitle') if s.get('subtitle') else format
format = s.get('format', "")
subtitle = s.get('subtitle', format)
if format and graded_total[1] > 0:
format_scores = totaled_scores.get(format, [])
format_scores.append( graded_total )
totaled_scores[ format ] = format_scores
score={'section':s.get("name"),
section_score={'section':s.get("name"),
'scores':scores,
'section_total' : section_total,
'format' : format,
......@@ -119,154 +75,75 @@ def grade_sheet(student):
'due' : s.get("due") or "",
'graded' : graded,
}
sections.append(score)
sections.append(section_score)
chapters.append({'course':course,
'chapter' : c.get("name"),
'sections' : sections,})
grade_summary = grade_summary_6002x(totaled_scores)
return {'courseware_summary' : chapters, #all assessments as they appear in the course definition
'grade_summary' : grade_summary, #graded assessments only
}
grader = course_settings.GRADER
grade_summary = grader.grade(totaled_scores)
return {'courseware_summary' : chapters,
'grade_summary' : grade_summary}
def aggregate_scores(scores):
scores = filter( lambda score: score.possible > 0, scores )
def aggregate_scores(scores, section_name = "summary"):
total_correct_graded = sum(score.earned for score in scores if score.graded)
total_possible_graded = sum(score.possible for score in scores if score.graded)
total_correct_graded = sum((score.earned*1.0/score.possible)*score.weight for score in scores if score.graded)
total_possible_graded = sum(score.weight for score in scores if score.graded)
total_correct = sum((score.earned*1.0/score.possible)*score.weight for score in scores)
total_possible = sum(score.weight for score in scores)
total_correct = sum(score.earned for score in scores)
total_possible = sum(score.possible for score in scores)
#regardless of whether or not it is graded
all_total = Score(total_correct,
total_possible,
1,
False,
"summary")
section_name)
#selecting only graded things
graded_total = Score(total_correct_graded,
total_possible_graded,
1,
True,
"summary")
section_name)
return all_total, graded_total
def grade_summary_6002x(totaled_scores):
"""
This function takes the a dictionary of (graded) section scores, and applies the course grading rules to create
the grade_summary. For 6.002x this means homeworks and labs all have equal weight, with the lowest 2 of each
being dropped. There is one midterm and one final.
"""
def totalWithDrops(scores, drop_count):
#Note that this key will sort the list descending
sorted_scores = sorted( enumerate(scores), key=lambda x: -x[1]['percentage'] )
# A list of the indices of the dropped scores
dropped_indices = [score[0] for score in sorted_scores[-drop_count:]]
aggregate_score = 0
for index, score in enumerate(scores):
if index not in dropped_indices:
aggregate_score += score['percentage']
aggregate_score /= len(scores) - drop_count
return aggregate_score, dropped_indices
#Figure the homework scores
homework_scores = totaled_scores['Homework'] if 'Homework' in totaled_scores else []
homework_percentages = []
for i in range(12):
if i < len(homework_scores):
percentage = homework_scores[i].earned / float(homework_scores[i].possible)
summary = "Homework {0} - {1} - {2:.0%} ({3:g}/{4:g})".format( i + 1, homework_scores[i].section , percentage, homework_scores[i].earned, homework_scores[i].possible )
else:
percentage = 0
summary = "Unreleased Homework {0} - 0% (?/?)".format(i + 1)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(10, 50)
points_earned = random.randrange(5, points_possible)
percentage = points_earned / float(points_possible)
summary = "Random Homework - {0:.0%} ({1:g}/{2:g})".format( percentage, points_earned, points_possible )
label = "HW {0:02d}".format(i + 1)
homework_percentages.append( {'percentage': percentage, 'summary': summary, 'label' : label} )
homework_total, homework_dropped_indices = totalWithDrops(homework_percentages, 2)
#Figure the lab scores
lab_scores = totaled_scores['Lab'] if 'Lab' in totaled_scores else []
lab_percentages = []
for i in range(12):
if i < len(lab_scores):
percentage = lab_scores[i].earned / float(lab_scores[i].possible)
summary = "Lab {0} - {1} - {2:.0%} ({3:g}/{4:g})".format( i + 1, lab_scores[i].section , percentage, lab_scores[i].earned, lab_scores[i].possible )
else:
percentage = 0
summary = "Unreleased Lab {0} - 0% (?/?)".format(i + 1)
if settings.GENERATE_PROFILE_SCORES:
points_possible = random.randrange(10, 50)
points_earned = random.randrange(5, points_possible)
percentage = points_earned / float(points_possible)
summary = "Random Lab - {0:.0%} ({1:g}/{2:g})".format( percentage, points_earned, points_possible )
label = "Lab {0:02d}".format(i + 1)
lab_percentages.append( {'percentage': percentage, 'summary': summary, 'label' : label} )
lab_total, lab_dropped_indices = totalWithDrops(lab_percentages, 2)
#TODO: Pull this data about the midterm and final from the databse. It should be exactly similar to above, but we aren't sure how exams will be done yet.
#This is a hack, but I have no intention of having this function be useful for anything but 6.002x anyway, so I don't want to make it pretty.
midterm_score = totaled_scores['Midterm'][0] if 'Midterm' in totaled_scores else Score('?', '?', '?', True, "?")
midterm_percentage = midterm_score.earned * 1.0 / midterm_score.possible if 'Midterm' in totaled_scores else 0
final_score = totaled_scores['Final'][0] if 'Final' in totaled_scores else Score('?', '?', '?', True, "?")
final_percentage = final_score.earned * 1.0 / final_score.possible if 'Final' in totaled_scores else 0
def get_score(user, problem, cache):
## HACK: assumes max score is fixed per problem
id = problem.get('id')
correct = 0.0
if settings.GENERATE_PROFILE_SCORES:
midterm_score = Score(random.randrange(50, 150), 150, 150, True, "?")
midterm_percentage = midterm_score.earned / float(midterm_score.possible)
# If the ID is not in the cache, add the item
if id not in cache:
module = StudentModule(module_type = 'problem', # TODO: Move into StudentModule.__init__?
module_id = id,
student = user,
state = None,
grade = 0,
max_grade = None,
done = 'i')
cache[id] = module
# Grab the # correct from cache
if id in cache:
response = cache[id]
if response.grade!=None:
correct=float(response.grade)
final_score = Score(random.randrange(100, 300), 300, 300, True, "?")
final_percentage = final_score.earned / float(final_score.possible)
# Grab max grade from cache, or if it doesn't exist, compute and save to DB
if id in cache and response.max_grade != None:
total = response.max_grade
else:
total=float(courseware.modules.capa_module.Module(etree.tostring(problem), "id").max_score())
response.max_grade = total
response.save()
grade_summary = [
{
'category': 'Homework',
'subscores' : homework_percentages,
'dropped_indices' : homework_dropped_indices,
'totalscore' : homework_total,
'totalscore_summary' : "Homework Average - {0:.0%}".format(homework_total),
'totallabel' : 'HW Avg',
'weight' : 0.15,
},
{
'category': 'Labs',
'subscores' : lab_percentages,
'dropped_indices' : lab_dropped_indices,
'totalscore' : lab_total,
'totalscore_summary' : "Lab Average - {0:.0%}".format(lab_total),
'totallabel' : 'Lab Avg',
'weight' : 0.15,
},
{
'category': 'Midterm',
'totalscore' : midterm_percentage,
'totalscore_summary' : "Midterm - {0:.0%} ({1}/{2})".format(midterm_percentage, midterm_score.earned, midterm_score.possible),
'totallabel' : 'Midterm',
'weight' : 0.30,
},
{
'category': 'Final',
'totalscore' : final_percentage,
'totalscore_summary' : "Final - {0:.0%} ({1}/{2})".format(final_percentage, final_score.earned, final_score.possible),
'totallabel' : 'Final',
'weight' : 0.40,
}
]
return grade_summary
#Now we re-weight the problem, if specified
weight = problem.get("weight", None)
if weight:
weight = float(weight)
correct = correct * weight / total
total = weight
return (correct, total)
......@@ -4,7 +4,9 @@ import numpy
import courseware.modules
import courseware.capa.calc as calc
from grades import Score, aggregate_scores
import courseware.graders as graders
from courseware.graders import Score, CourseGrader, WeightedSubsectionsGrader, SingleSectionGrader, AssignmentFormatGrader
from courseware.grades import aggregate_scores
class ModelsTest(unittest.TestCase):
def setUp(self):
......@@ -59,42 +61,218 @@ class ModelsTest(unittest.TestCase):
exception_happened = True
self.assertTrue(exception_happened)
class GraderTest(unittest.TestCase):
class GradesheetTest(unittest.TestCase):
def test_weighted_grading(self):
scores = []
Score.__sub__=lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
all, graded = aggregate_scores(scores)
self.assertEqual(all, Score(earned=0, possible=0, weight=1, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=0, possible=5, weight=1, graded=False, section="summary"))
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
all, graded = aggregate_scores(scores)
self.assertEqual(all, Score(earned=0, possible=1, weight=1, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, weight=1, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=3.0/5, possible=2, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=3.0/5, possible=1, weight=1, graded=True, section="summary"))
self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, weight=2, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
class GraderTest(unittest.TestCase):
scores.append(Score(earned=2, possible=5, weight=0, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
empty_gradesheet = {
}
incomplete_gradesheet = {
'Homework': [],
'Lab': [],
'Midterm' : [],
}
test_gradesheet = {
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
Score(earned=16, possible=16.0, graded=True, section='hw2')],
#The dropped scores should be from the assignments that don't exist yet
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), #Dropped
Score(earned=1, possible=1.0, graded=True, section='lab2'),
Score(earned=1, possible=1.0, graded=True, section='lab3'),
Score(earned=5, possible=25.0, graded=True, section='lab4'), #Dropped
Score(earned=3, possible=4.0, graded=True, section='lab5'), #Dropped
Score(earned=6, possible=7.0, graded=True, section='lab6'),
Score(earned=5, possible=6.0, graded=True, section='lab7')],
'Midterm' : [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"),],
}
def test_SingleSectionGrader(self):
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
for graded in [midtermGrader.grade(self.empty_gradesheet),
midtermGrader.grade(self.incomplete_gradesheet),
badLabGrader.grade(self.test_gradesheet)]:
self.assertEqual( len(graded['section_breakdown']), 1 )
self.assertEqual( graded['percent'], 0.0 )
graded = midtermGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.505 )
self.assertEqual( len(graded['section_breakdown']), 1 )
graded = lab4Grader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.2 )
self.assertEqual( len(graded['section_breakdown']), 1 )
def test_AssignmentFormatGrader(self):
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
#Even though the minimum number is 3, this should grade correctly when 7 assignments are found
overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
#Test the grading of an empty gradesheet
for graded in [ homeworkGrader.grade(self.empty_gradesheet),
noDropGrader.grade(self.empty_gradesheet),
homeworkGrader.grade(self.incomplete_gradesheet),
noDropGrader.grade(self.incomplete_gradesheet) ]:
self.assertAlmostEqual( graded['percent'], 0.0 )
#Make sure the breakdown includes 12 sections, plus one summary
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
graded = homeworkGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.11 ) # 100% + 10% / 10 assignments
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
graded = noDropGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0916666666666666 ) # 100% + 10% / 12 assignments
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
graded = overflowGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.8880952380952382 ) # 100% + 10% / 5 assignments
self.assertEqual( len(graded['section_breakdown']), 7 + 1 )
graded = labGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.9226190476190477 )
self.assertEqual( len(graded['section_breakdown']), 7 + 1 )
def test_WeightedSubsectionsGrader(self):
#First, a few sub graders
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
weightedGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.25), (labGrader, labGrader.category, 0.25),
(midtermGrader, midtermGrader.category, 0.5)] )
overOneWeightsGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.5), (labGrader, labGrader.category, 0.5),
(midtermGrader, midtermGrader.category, 0.5)] )
#The midterm should have all weight on this one
zeroWeightsGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
(midtermGrader, midtermGrader.category, 0.5)] )
#This should always have a final percent of zero
allZeroWeightsGrader = graders.WeightedSubsectionsGrader( [(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
(midtermGrader, midtermGrader.category, 0.0)] )
emptyGrader = graders.WeightedSubsectionsGrader( [] )
graded = weightedGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.5106547619047619 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = overOneWeightsGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.7688095238095238 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = zeroWeightsGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.2525 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = allZeroWeightsGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
for graded in [ weightedGrader.grade(self.empty_gradesheet),
weightedGrader.grade(self.incomplete_gradesheet),
zeroWeightsGrader.grade(self.empty_gradesheet),
allZeroWeightsGrader.grade(self.empty_gradesheet)]:
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = emptyGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), 0 )
self.assertEqual( len(graded['grade_breakdown']), 0 )
scores.append(Score(earned=2, possible=5, weight=3, graded=False, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=13.0/5, possible=7, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
def test_graderFromConf(self):
#Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
#in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
weightedGrader = graders.grader_from_conf([
{
'type' : "Homework",
'min_count' : 12,
'drop_count' : 2,
'short_label' : "HW",
'weight' : 0.25,
},
{
'type' : "Lab",
'min_count' : 7,
'drop_count' : 3,
'category' : "Labs",
'weight' : 0.25
},
{
'type' : "Midterm",
'name' : "Midterm Exam",
'short_label' : "Midterm",
'weight' : 0.5,
},
])
emptyGrader = graders.grader_from_conf([])
graded = weightedGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.5106547619047619 )
self.assertEqual( len(graded['section_breakdown']), (12 + 1) + (7+1) + 1 )
self.assertEqual( len(graded['grade_breakdown']), 3 )
graded = emptyGrader.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.0 )
self.assertEqual( len(graded['section_breakdown']), 0 )
self.assertEqual( len(graded['grade_breakdown']), 0 )
#Test that graders can also be used instead of lists of dictionaries
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
graded = homeworkGrader2.grade(self.test_gradesheet)
self.assertAlmostEqual( graded['percent'], 0.11 )
self.assertEqual( len(graded['section_breakdown']), 12 + 1 )
#TODO: How do we test failure cases? The parser only logs an error when it can't parse something. Maybe it should throw exceptions?
scores.append(Score(earned=2, possible=5, weight=.5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=14.0/5, possible=7.5, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=8.0/5, possible=3.5, weight=1, graded=True, section="summary"))
......@@ -8,7 +8,8 @@
<style type="text/css">
.grade_a {color:green;}
.grade_b {color:Chocolate;}
.grade_c {color:DimGray;}
.grade_c {color:DarkSlateGray;}
.grade_f {color:DimGray;}
.grade_none {color:LightGray;}
</style>
......@@ -29,16 +30,10 @@
<tr> <!-- Header Row -->
<th>Student</th>
%for section in templateSummary:
%if 'subscores' in section:
%for subsection in section['subscores']:
<th>${subsection['label']}</th>
%endfor
<th>${section['totallabel']}</th>
%else:
<th>${section['category']}</th>
%endif
%for section in templateSummary['section_breakdown']:
<th>${section['label']}</th>
%endfor
<th>Total</th>
</tr>
<%def name="percent_data(percentage)">
......@@ -50,6 +45,8 @@
data_class = "grade_b"
elif percentage > .6:
data_class = "grade_c"
elif percentage > 0:
data_class = "grade_f"
%>
<td class="${data_class}">${ "{0:.0%}".format( percentage ) }</td>
</%def>
......@@ -57,16 +54,10 @@
%for student in students:
<tr>
<td><a href="/profile/${student['id']}/">${student['username']}</a></td>
%for section in student['grade_info']['grade_summary']:
%if 'subscores' in section:
%for subsection in section['subscores']:
${percent_data( subsection['percentage'] )}
%endfor
${percent_data( section['totalscore'] )}
%else:
${percent_data( section['totalscore'] )}
%endif
%for section in student['grade_info']['grade_summary']['section_breakdown']:
${percent_data( section['percent'] )}
%endfor
<th>${percent_data( student['grade_info']['grade_summary']['percent'])}</th>
</tr>
%endfor
</table>
......
......@@ -150,11 +150,11 @@ $(function() {
<%
earned = section['section_total'].earned
total = section['section_total'].possible
percentageString = "{0:.0%}".format( float(earned)/total) if earned > 0 else ""
percentageString = "{0:.0%}".format( float(earned)/total) if earned > 0 and total > 0 else ""
%>
<h3><a href="${reverse('courseware_section', args=format_url_params([chapter['course'], chapter['chapter'], section['section']])) }">
${ section['section'] }</a> ${"({0:g}/{1:g}) {2}".format( earned, total, percentageString )}</h3>
${ section['section'] }</a> ${"({0:.3n}/{1:.3n}) {2}".format( float(earned), float(total), percentageString )}</h3>
${section['subtitle']}
%if 'due' in section and section['due']!="":
due ${section['due']}
......@@ -164,7 +164,7 @@ $(function() {
<ol class="scores">
${ "Problem Scores: " if section['graded'] else "Practice Scores: "}
%for score in section['scores']:
<li class="score">${"{0:g}/{1:g}".format(score.earned,score.possible)}</li>
<li class="score">${"{0:.3n}/{1:.3n}".format(float(score.earned),float(score.possible))}</li>
%endfor
</ol>
%endif
......
......@@ -9,7 +9,7 @@ $(function () {
position: 'absolute',
display: 'none',
top: y + 5,
left: x + 5,
left: x + 15,
border: '1px solid #000',
padding: '4px 6px',
color: '#fff',
......@@ -19,96 +19,81 @@ $(function () {
}
/* -------------------------------- Grade detail bars -------------------------------- */
<%
colors = ["#b72121", "#600101", "#666666", "#333333"]
categories = {}
tickIndex = 1
sectionSpacer = 0.5
sectionSpacer = 0.25
sectionIndex = 0
series = []
ticks = [] #These are the indices and x-axis labels for the data
bottomTicks = [] #Labels on the bottom
detail_tooltips = {} #This an dictionary mapping from 'section' -> array of detail_tooltips
droppedScores = [] #These are the datapoints to indicate assignments which aren't factored into the total score
droppedScores = [] #These are the datapoints to indicate assignments which are not factored into the total score
dropped_score_tooltips = []
for section in grade_summary:
if 'subscores' in section: ##This is for sections like labs or homeworks, with several smaller components and a total
series.append({
'label' : section['category'],
'data' : [[i + tickIndex, score['percentage']] for i,score in enumerate(section['subscores'])],
'color' : colors[sectionIndex]
})
ticks += [[i + tickIndex, score['label'] ] for i,score in enumerate(section['subscores'])]
bottomTicks.append( [tickIndex + len(section['subscores'])/2, section['category']] )
detail_tooltips[ section['category'] ] = [score['summary'] for score in section['subscores']]
droppedScores += [[tickIndex + index, 0.05] for index in section['dropped_indices']]
dropExplanation = "The lowest {0} {1} scores are dropped".format( len(section['dropped_indices']), section['category'] )
dropped_score_tooltips += [dropExplanation] * len(section['dropped_indices'])
tickIndex += len(section['subscores']) + sectionSpacer
category_total_label = section['category'] + " Total"
series.append({
'label' : category_total_label,
'data' : [ [tickIndex, section['totalscore']] ],
'color' : colors[sectionIndex]
})
ticks.append( [tickIndex, section['totallabel']] )
detail_tooltips[category_total_label] = [section['totalscore_summary']]
else:
series.append({
'label' : section['category'],
'data' : [ [tickIndex, section['totalscore']] ],
'color' : colors[sectionIndex]
})
ticks.append( [tickIndex, section['totallabel']] )
detail_tooltips[section['category']] = [section['totalscore_summary']]
for section in grade_summary['section_breakdown']:
if section.get('prominent', False):
tickIndex += sectionSpacer
if section['category'] not in categories:
colorIndex = len(categories) % len(colors)
categories[ section['category'] ] = {'label' : section['category'],
'data' : [],
'color' : colors[colorIndex]}
categoryData = categories[ section['category'] ]
categoryData['data'].append( [tickIndex, section['percent']] )
ticks.append( [tickIndex, section['label'] ] )
if section['category'] in detail_tooltips:
detail_tooltips[ section['category'] ].append( section['detail'] )
else:
detail_tooltips[ section['category'] ] = [ section['detail'], ]
if 'mark' in section:
droppedScores.append( [tickIndex, 0.05] )
dropped_score_tooltips.append( section['mark']['detail'] )
tickIndex += 1 + sectionSpacer
sectionIndex += 1
detail_tooltips['Dropped Scores'] = dropped_score_tooltips
tickIndex += 1
if section.get('prominent', False):
tickIndex += sectionSpacer
## ----------------------------- Grade overviewew bar ------------------------- ##
totalWeight = 0.0
sectionIndex = 0
totalScore = 0.0
tickIndex += sectionSpacer
series = categories.values()
overviewBarX = tickIndex
for section in grade_summary:
weighted_score = section['totalscore'] * section['weight']
summary_text = "{0} - {1:.1%} of a possible {2:.0%}".format(section['category'], weighted_score, section['weight'])
weighted_category_label = section['category'] + " - Weighted"
if section['totalscore'] > 0:
extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[]
for section in grade_summary['grade_breakdown']:
if section['percent'] > 0:
if section['category'] in categories:
color = categories[ section['category'] ]['color']
else:
color = colors[ extraColorIndex % len(colors) ]
extraColorIndex += 1
series.append({
'label' : weighted_category_label,
'data' : [ [overviewBarX, weighted_score] ],
'color' : colors[sectionIndex]
'label' : section['category'] + "-grade_breakdown",
'data' : [ [overviewBarX, section['percent']] ],
'color' : color
})
detail_tooltips[weighted_category_label] = [ summary_text ]
sectionIndex += 1
totalWeight += section['weight']
totalScore += section['totalscore'] * section['weight']
detail_tooltips[section['category'] + "-grade_breakdown"] = [ section['detail'] ]
ticks += [ [overviewBarX, "Total"] ]
tickIndex += 1 + sectionSpacer
totalScore = grade_summary['percent']
detail_tooltips['Dropped Scores'] = dropped_score_tooltips
%>
var series = ${ json.dumps(series) };
var series = ${ json.dumps( series ) };
var ticks = ${ json.dumps(ticks) };
var bottomTicks = ${ json.dumps(bottomTicks) };
var detail_tooltips = ${ json.dumps(detail_tooltips) };
......@@ -132,7 +117,7 @@ $(function () {
var $grade_detail_graph = $("#${graph_div_id}");
if ($grade_detail_graph.length > 0) {
var plot = $.plot($grade_detail_graph, series, options);
//We need to put back the plotting of the percent here
var o = plot.pointOffset({x: ${overviewBarX} , y: ${totalScore}});
$grade_detail_graph.append('<div style="position:absolute;left:' + (o.left - 12) + 'px;top:' + (o.top - 20) + 'px">${"{totalscore:.0%}".format(totalscore=totalScore)}</div>');
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment