diff --git a/djangoapps/courseware/grades.py b/djangoapps/courseware/grades.py
index c7e6714..144f926 100644
--- a/djangoapps/courseware/grades.py
+++ b/djangoapps/courseware/grades.py
@@ -12,7 +12,7 @@ from student.models import UserProfile
 
 log = logging.getLogger("mitx.courseware")
 
-Score = namedtuple("Score", "earned possible graded section")
+Score = namedtuple("Score", "earned possible weight graded section")
 
 def get_grade(user, problem, cache):
     ## HACK: assumes max score is fixed per problem
@@ -100,20 +100,9 @@ def grade_sheet(student):
                             correct = random.randrange( max(total-2, 1) , total + 1 )
                         else:
                             correct = total
-                    
-                    scores.append( Score(int(correct),total, graded, s.get("name")) )
-
-                
-                section_total = Score(sum([score.earned for score in scores]), 
-                                        sum([score.possible for score in scores]),
-                                        False,
-                                        p.get("id"))
-
-                graded_total = Score(sum([score.earned for score in scores if score.graded]), 
-                                sum([score.possible for score in scores if score.graded]),
-                                True,
-                                p.get("id"))
+                    scores.append( Score(int(correct),total, float(p.get("weight", 1)), graded, p.get("name")) )
 
+                section_total, graded_total = aggregate_scores(scores)
                 #Add the graded total to totaled_scores
                 format = s.get('format') if s.get('format') else ""
                 subtitle = s.get('subtitle') if s.get('subtitle') else format
@@ -136,12 +125,30 @@ def grade_sheet(student):
                          'chapter' : c.get("name"),
                          'sections' : sections,})
     
-    
     grade_summary = grade_summary_6002x(totaled_scores)
-    
-    return {'courseware_summary' : chapters,
-            'grade_summary' : grade_summary}
+    return {'courseware_summary' : chapters, #all assessments as they appear in the course definition
+            'grade_summary' : grade_summary, #graded assessments only
+            }
+
+def aggregate_scores(scores): 
+    total_correct_graded = sum((score.earned*1.0/score.possible)*score.weight for score in scores if score.graded)
+    total_possible_graded = sum(score.weight for score in scores if score.graded)
+    total_correct = sum((score.earned*1.0/score.possible)*score.weight for score in scores)
+    total_possible = sum(score.weight for score in scores)
+    #regardless of whether or not it is graded
+    all_total = Score(total_correct, 
+                          total_possible,
+                          1,
+                          False,
+                          "summary")
+    #selecting only graded things
+    graded_total = Score(total_correct_graded, 
+                         total_possible_graded, 
+                         1, 
+                         True, 
+                         "summary")
 
+    return all_total, graded_total
 
 def grade_summary_6002x(totaled_scores):
     """
@@ -210,11 +217,12 @@ def grade_summary_6002x(totaled_scores):
     
     
     #TODO: Pull this data about the midterm and final from the databse. It should be exactly similar to above, but we aren't sure how exams will be done yet.
-    midterm_score = Score('?', '?', True, "?")
-    midterm_percentage = 0
+    #This is a hack, but I have no intention of having this function be useful for anything but 6.002x anyway, so I don't want to make it pretty.
+    midterm_score = totaled_scores['Midterm'][0] if 'Midterm' in totaled_scores else Score('?', '?', '?', True, "?")
+    midterm_percentage = midterm_score.earned * 1.0 / midterm_score.possible if 'Midterm' in totaled_scores else 0
     
-    final_score = Score('?', '?', True, "?")
-    final_percentage = 0
+    final_score = totaled_scores['Final'][0] if 'Final' in totaled_scores else Score('?', '?', '?', True, "?")
+    final_percentage = final_score.earned * 1.0 / final_score.possible if 'Final' in totaled_scores else 0
     
     if settings.GENERATE_PROFILE_SCORES:
         midterm_score = Score(random.randrange(50, 150), 150, True, "?")
diff --git a/djangoapps/courseware/modules/capa_module.py b/djangoapps/courseware/modules/capa_module.py
index 71f76fa..86958dc 100644
--- a/djangoapps/courseware/modules/capa_module.py
+++ b/djangoapps/courseware/modules/capa_module.py
@@ -71,7 +71,9 @@ class Module(XModule):
     def get_problem_html(self, encapsulate=True):
         html = self.lcp.get_html()
         content={'name':self.name, 
-                 'html':html}
+                 'html':html, 
+                 'weight': self.weight,
+                 }
         
         # We using strings as truthy values, because the terminology of the check button
         # is context-specific.
@@ -136,7 +138,7 @@ class Module(XModule):
         self.max_attempts = None
         
         dom2 = etree.fromstring(xml)
-
+        
         self.explanation=content_parser.item(dom2.xpath('/problem/@explain'), default="closed")
         self.explain_available=content_parser.item(dom2.xpath('/problem/@explain_available'))
 
@@ -186,6 +188,7 @@ class Module(XModule):
         self.filename=content_parser.item(dom2.xpath('/problem/@filename'))
         filename=settings.DATA_DIR+"/problems/"+self.filename+".xml"
         self.name=content_parser.item(dom2.xpath('/problem/@name'))
+        self.weight=content_parser.item(dom2.xpath('/problem/@weight'))
         if self.rerandomize == 'never':
             seed = 1
         else:
diff --git a/djangoapps/courseware/tests.py b/djangoapps/courseware/tests.py
index 2b2b354..7eb6aa2 100644
--- a/djangoapps/courseware/tests.py
+++ b/djangoapps/courseware/tests.py
@@ -4,6 +4,7 @@ import numpy
 
 import courseware.modules
 import courseware.capa.calc as calc
+from grades import Score, aggregate_scores
 
 class ModelsTest(unittest.TestCase):
     def setUp(self):
@@ -53,3 +54,42 @@ class ModelsTest(unittest.TestCase):
             exception_happened = True
         self.assertTrue(exception_happened)
 
+class GraderTest(unittest.TestCase):
+
+    def test_weighted_grading(self):
+        scores = []
+        Score.__sub__=lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
+
+        all, graded = aggregate_scores(scores)
+        self.assertEqual(all, Score(earned=0, possible=0, weight=1, graded=False, section="summary"))
+        self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
+
+        scores.append(Score(earned=0, possible=5, weight=1, graded=False, section="summary"))
+        all, graded = aggregate_scores(scores)
+        self.assertEqual(all, Score(earned=0, possible=1, weight=1, graded=False, section="summary"))
+        self.assertEqual(graded, Score(earned=0, possible=0, weight=1, graded=True, section="summary"))
+
+        scores.append(Score(earned=3, possible=5, weight=1, graded=True, section="summary"))
+        all, graded = aggregate_scores(scores)
+        self.assertAlmostEqual(all, Score(earned=3.0/5, possible=2, weight=1, graded=False, section="summary"))
+        self.assertAlmostEqual(graded, Score(earned=3.0/5, possible=1, weight=1, graded=True, section="summary"))
+
+        scores.append(Score(earned=2, possible=5, weight=2, graded=True, section="summary"))
+        all, graded = aggregate_scores(scores)
+        self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
+        self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
+
+        scores.append(Score(earned=2, possible=5, weight=0, graded=True, section="summary"))
+        all, graded = aggregate_scores(scores)
+        self.assertAlmostEqual(all, Score(earned=7.0/5, possible=4, weight=1, graded=False, section="summary"))
+        self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
+
+        scores.append(Score(earned=2, possible=5, weight=3, graded=False, section="summary"))
+        all, graded = aggregate_scores(scores)
+        self.assertAlmostEqual(all, Score(earned=13.0/5, possible=7, weight=1, graded=False, section="summary"))
+        self.assertAlmostEqual(graded, Score(earned=7.0/5, possible=3, weight=1, graded=True, section="summary"))
+
+        scores.append(Score(earned=2, possible=5, weight=.5, graded=True, section="summary"))
+        all, graded = aggregate_scores(scores)
+        self.assertAlmostEqual(all, Score(earned=14.0/5, possible=7.5, weight=1, graded=False, section="summary"))
+        self.assertAlmostEqual(graded, Score(earned=8.0/5, possible=3.5, weight=1, graded=True, section="summary"))
diff --git a/templates/problem.html b/templates/problem.html
index f332dda..cd332f1 100644
--- a/templates/problem.html
+++ b/templates/problem.html
@@ -1,4 +1,8 @@
-<h2>${ problem['name'] }</h2>
+<h2 class="problem-header">${ problem['name'] }
+% if problem['weight']:
+: ${ problem['weight'] } points
+% endif
+</h2>
 
 <section class="problem">
   ${ problem['html'] }