Commit eff95c26 by David Ormsbee

Merge pull request #15 from MITx/weighted_grading

added weighting system for the midterm

It'd be nice to move the Score.__sub__ as we discussed, but that shouldn't hold up merging the rest so it can be put on the staging environments and tested more thoroughly.
parents a71340d3 7853af54
...@@ -12,7 +12,7 @@ from student.models import UserProfile ...@@ -12,7 +12,7 @@ from student.models import UserProfile
log = logging.getLogger("mitx.courseware") log = logging.getLogger("mitx.courseware")
Score = namedtuple("Score", "earned possible graded section") Score = namedtuple("Score", "earned possible weight graded section")
def get_grade(user, problem, cache): def get_grade(user, problem, cache):
## HACK: assumes max score is fixed per problem ## HACK: assumes max score is fixed per problem
...@@ -100,20 +100,9 @@ def grade_sheet(student): ...@@ -100,20 +100,9 @@ def grade_sheet(student):
correct = random.randrange( max(total-2, 1) , total + 1 ) correct = random.randrange( max(total-2, 1) , total + 1 )
else: else:
correct = total correct = total
scores.append( Score(int(correct),total, float(p.get("weight", 1)), graded, p.get("name")) )
scores.append( Score(int(correct),total, graded, s.get("name")) )
section_total = Score(sum([score.earned for score in scores]),
sum([score.possible for score in scores]),
False,
p.get("id"))
graded_total = Score(sum([score.earned for score in scores if score.graded]),
sum([score.possible for score in scores if score.graded]),
True,
p.get("id"))
section_total, graded_total = aggregate_scores(scores)
#Add the graded total to totaled_scores #Add the graded total to totaled_scores
format = s.get('format') if s.get('format') else "" format = s.get('format') if s.get('format') else ""
subtitle = s.get('subtitle') if s.get('subtitle') else format subtitle = s.get('subtitle') if s.get('subtitle') else format
...@@ -136,12 +125,30 @@ def grade_sheet(student): ...@@ -136,12 +125,30 @@ def grade_sheet(student):
'chapter' : c.get("name"), 'chapter' : c.get("name"),
'sections' : sections,}) 'sections' : sections,})
grade_summary = grade_summary_6002x(totaled_scores) grade_summary = grade_summary_6002x(totaled_scores)
return {'courseware_summary' : chapters, #all assessments as they appear in the course definition
return {'courseware_summary' : chapters, 'grade_summary' : grade_summary, #graded assessments only
'grade_summary' : grade_summary} }
def aggregate_scores(scores):
total_correct_graded = sum((score.earned*1.0/score.possible)*score.weight for score in scores if score.graded)
total_possible_graded = sum(score.weight for score in scores if score.graded)
total_correct = sum((score.earned*1.0/score.possible)*score.weight for score in scores)
total_possible = sum(score.weight for score in scores)
#regardless of whether or not it is graded
all_total = Score(total_correct,
total_possible,
1,
False,
"summary")
#selecting only graded things
graded_total = Score(total_correct_graded,
total_possible_graded,
1,
True,
"summary")
return all_total, graded_total
def grade_summary_6002x(totaled_scores): def grade_summary_6002x(totaled_scores):
""" """
...@@ -210,11 +217,12 @@ def grade_summary_6002x(totaled_scores): ...@@ -210,11 +217,12 @@ def grade_summary_6002x(totaled_scores):
#TODO: Pull this data about the midterm and final from the databse. It should be exactly similar to above, but we aren't sure how exams will be done yet. #TODO: Pull this data about the midterm and final from the databse. It should be exactly similar to above, but we aren't sure how exams will be done yet.
midterm_score = Score('?', '?', True, "?") #This is a hack, but I have no intention of having this function be useful for anything but 6.002x anyway, so I don't want to make it pretty.
midterm_percentage = 0 midterm_score = totaled_scores['Midterm'][0] if 'Midterm' in totaled_scores else Score('?', '?', '?', True, "?")
midterm_percentage = midterm_score.earned * 1.0 / midterm_score.possible if 'Midterm' in totaled_scores else 0
final_score = Score('?', '?', True, "?") final_score = totaled_scores['Final'][0] if 'Final' in totaled_scores else Score('?', '?', '?', True, "?")
final_percentage = 0 final_percentage = final_score.earned * 1.0 / final_score.possible if 'Final' in totaled_scores else 0
if settings.GENERATE_PROFILE_SCORES: if settings.GENERATE_PROFILE_SCORES:
midterm_score = Score(random.randrange(50, 150), 150, True, "?") midterm_score = Score(random.randrange(50, 150), 150, True, "?")
......
...@@ -71,7 +71,9 @@ class Module(XModule): ...@@ -71,7 +71,9 @@ class Module(XModule):
def get_problem_html(self, encapsulate=True): def get_problem_html(self, encapsulate=True):
html = self.lcp.get_html() html = self.lcp.get_html()
content={'name':self.name, content={'name':self.name,
'html':html} 'html':html,
'weight': self.weight,
}
# We using strings as truthy values, because the terminology of the check button # We using strings as truthy values, because the terminology of the check button
# is context-specific. # is context-specific.
...@@ -136,7 +138,7 @@ class Module(XModule): ...@@ -136,7 +138,7 @@ class Module(XModule):
self.max_attempts = None self.max_attempts = None
dom2 = etree.fromstring(xml) dom2 = etree.fromstring(xml)
self.explanation=content_parser.item(dom2.xpath('/problem/@explain'), default="closed") self.explanation=content_parser.item(dom2.xpath('/problem/@explain'), default="closed")
self.explain_available=content_parser.item(dom2.xpath('/problem/@explain_available')) self.explain_available=content_parser.item(dom2.xpath('/problem/@explain_available'))
...@@ -186,6 +188,7 @@ class Module(XModule): ...@@ -186,6 +188,7 @@ class Module(XModule):
self.filename=content_parser.item(dom2.xpath('/problem/@filename')) self.filename=content_parser.item(dom2.xpath('/problem/@filename'))
filename=settings.DATA_DIR+"/problems/"+self.filename+".xml" filename=settings.DATA_DIR+"/problems/"+self.filename+".xml"
self.name=content_parser.item(dom2.xpath('/problem/@name')) self.name=content_parser.item(dom2.xpath('/problem/@name'))
self.weight=content_parser.item(dom2.xpath('/problem/@weight'))
if self.rerandomize == 'never': if self.rerandomize == 'never':
seed = 1 seed = 1
else: else:
......
...@@ -4,6 +4,7 @@ import numpy ...@@ -4,6 +4,7 @@ import numpy
import courseware.modules import courseware.modules
import courseware.capa.calc as calc import courseware.capa.calc as calc
from grades import Score, aggregate_scores
class ModelsTest(unittest.TestCase): class ModelsTest(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -53,3 +54,42 @@ class ModelsTest(unittest.TestCase): ...@@ -53,3 +54,42 @@ class ModelsTest(unittest.TestCase):
exception_happened = True exception_happened = True
self.assertTrue(exception_happened) self.assertTrue(exception_happened)
class GraderTest(unittest.TestCase):
def test_weighted_grading(self):
scores = []
Score.__sub__=lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
all, graded = aggregate_scores(scores)
self.assertEqual(all, Score(earned=0, possible=0, weight=1, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
scores.append(Score(earned=0, possible=5, weight=1, graded=False, section="summary"))
all, graded = aggregate_scores(scores)
self.assertEqual(all, Score(earned=0, possible=1, weight=1, graded=False, section="summary"))
self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, weight=1, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=3.0/5, possible=2, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=3.0/5, possible=1, weight=1, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, weight=2, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, weight=0, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, weight=3, graded=False, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=13.0/5, possible=7, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, weight=.5, graded=True, section="summary"))
all, graded = aggregate_scores(scores)
self.assertAlmostEqual(all, Score(earned=14.0/5, possible=7.5, weight=1, graded=False, section="summary"))
self.assertAlmostEqual(graded, Score(earned=8.0/5, possible=3.5, weight=1, graded=True, section="summary"))
<h2>${ problem['name'] }</h2> <h2 class="problem-header">${ problem['name'] }
% if problem['weight']:
: ${ problem['weight'] } points
% endif
</h2>
<section class="problem"> <section class="problem">
${ problem['html'] } ${ problem['html'] }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment