Commit b959469a by Matt Drayer Committed by Jonathan Piacenti

mattdrayer/api-proforma-realistic: Apply average score to unscored categories

parent 8d3650d7
...@@ -151,48 +151,57 @@ def get_course_child_content(request, user, course_key, child_descriptor): ...@@ -151,48 +151,57 @@ def get_course_child_content(request, user, course_key, child_descriptor):
def calculate_proforma_grade(grade_summary, grading_policy): def calculate_proforma_grade(grade_summary, grading_policy):
""" """
Calculates a projected (proforma) final grade based on the current state Calculates a projected (proforma) final grade based on the current state
of grades using the provided grading policy. Sections equate to grading policy of grades using the provided grading policy. Categories equate to grading policy
'types' and have values such as 'Homework', 'Lab', 'MidtermExam', and 'FinalExam' 'types' and have values such as 'Homework', 'Lab', 'MidtermExam', and 'FinalExam'
We invert the concepts here and use the section weights as the possible scores by We invert the concepts here and use the category weights as the possible scores by
assuming that the section weights total 100 percent. So, if a Homework section assuming that the weights total 100 percent. So, if a Homework category is worth 15
is worth 15 percent of your overall grade, and you have currently scored 70 percent percent of your overall grade, and you have currently scored 70 percent for that
for that section, the normalized score for the Homework section is 0.105. Note that category, the normalized score for the Homework category is 0.105. Note that
we do not take into account dropped assignments/scores, such as lowest-two homeworks. we do not take into account dropped assignments/scores, such as lowest-two homeworks.
After all scored sections are processed we take the remaining weight at its full After all scored categories are processed we apply the average category score to any
value as a projection of the user obtaining 100 percent of the section potential. unscored categories using the value as a projection of the user's performance in each category.
Example: Example:
- Section: Homework, Weight: 15%, Totaled Score: 70%, Normalized Score: 0.105 - Scored Category: Homework, Weight: 15%, Totaled Score: 70%, Normalized Score: 0.105
- Section: MidtermExam, Weight: 30%, Totaled Score: 80%, Normalized Score: 0.240 - Scored Category: MidtermExam, Weight: 30%, Totaled Score: 80%, Normalized Score: 0.240
- Section: Final Exam, Weight: 40%, Totaled Score: 95%, Normalized Score: 0.380 - Scored Category: Final Exam, Weight: 40%, Totaled Score: 95%, Normalized Score: 0.380
- Remaining Weight: 0.15 (unscored Lab section), assume 100%, of 15% => 0.150 - Average Category Score: (70 + 80 + 95) / 3 = 81.7
- Proforma Grade = 0.105 + 0.240 + 0.380 + 0.150 = 0.875 (87.5%) - Unscored Category: Lab, Weight: 15%, Totaled Score: 81.7%, Normalized Score: 0.123
- Proforma Grade = 0.105 + 0.240 + 0.380 + 0.123 = 0.8475 (84.8%)
""" """
grade_breakdown = grade_summary['grade_breakdown']
remaining_weight = 1.00
proforma_grade = 0.00 proforma_grade = 0.00
totaled_scores = grade_summary['totaled_scores'] totaled_scores = grade_summary['totaled_scores']
grade = 0.00 category_averages = []
for section in totaled_scores: categories_to_estimate = []
section_score = 0.00 for grade_category in grade_breakdown:
section_count = 0.00 category = grade_category['category']
# totaled_scores is a collection of currently-recored scores for a given section item_scores = totaled_scores.get(category)
# we need to iterate through and combine the scores to create an overall score for the section if item_scores is not None and len(item_scores):
# This loop does not take into account dropped assignments (eg, homeworks) total_item_score = 0.00
for score in totaled_scores[section]: items_considered = 0
# Only count grades where points have been scored, or where the due date has passed for item_score in item_scores:
if score.earned or (score.due and score.due < timezone.now()): if item_score.earned or (item_score.due and item_score.due < timezone.now()):
score_percentage = score.earned / score.possible normalized_item_score = item_score.earned / item_score.possible
section_score += score_percentage total_item_score += normalized_item_score
section_count += 1 items_considered += 1
if section_score: if total_item_score:
grade = section_score / section_count category_average_score = total_item_score / items_considered
section_policy = next((policy for policy in grading_policy['GRADER'] if policy['type'] == section), None) category_averages.append(category_average_score)
if section_policy is not None: category_policy = next((policy for policy in grading_policy['GRADER'] if policy['type'] == category), None)
section_weight = section_policy['weight'] category_weight = category_policy['weight']
proforma_grade = proforma_grade + (section_weight * grade) category_grade = category_average_score * category_weight
remaining_weight = remaining_weight - section_weight proforma_grade += category_grade
proforma_grade = proforma_grade + remaining_weight else:
categories_to_estimate.append(category)
else:
categories_to_estimate.append(category)
assumed_category_average = sum(category_averages) / len(category_averages)
for category in categories_to_estimate:
category_policy = next((policy for policy in grading_policy['GRADER'] if policy['type'] == category), None)
category_weight = category_policy['weight']
category_grade = assumed_category_average * category_weight
proforma_grade += category_grade
return proforma_grade return proforma_grade
...@@ -1366,7 +1366,7 @@ class UsersApiTests(ModuleStoreTestCase): ...@@ -1366,7 +1366,7 @@ class UsersApiTests(ModuleStoreTestCase):
self.assertIsNotNone(grading_policy['GRADE_CUTOFFS']) self.assertIsNotNone(grading_policy['GRADE_CUTOFFS'])
self.assertEqual(response.data['current_grade'], 0.7) self.assertEqual(response.data['current_grade'], 0.7)
self.assertEqual(response.data['proforma_grade'], 0.95) self.assertEqual(response.data['proforma_grade'], 0.9375)
def is_user_profile_created_updated(self, response, data): def is_user_profile_created_updated(self, response, data):
"""This function compare response with user profile data """ """This function compare response with user profile data """
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment