Commit e24915d2 by Eric Fischer

Revert "Merge pull request #13614 from edx/efischer/revert_grades"

This reverts commit 9aa35cd8.
parent f0550b15
......@@ -10,13 +10,67 @@ import logging
import random
import sys
from collections import namedtuple
log = logging.getLogger("edx.courseware")
# This is a tuple for holding scores, either from problems or sections.
# Section either indicates the name of the problem or the name of the section
Score = namedtuple("Score", "earned possible graded section module_id")
class ScoreBase(object):
"""
Abstract base class for encapsulating fields of values scores.
Field common to all scores include:
display_name (string) - the display name of the module
module_id (UsageKey) - the location of the module
graded (boolean) - whether or not this module is graded
"""
__metaclass__ = abc.ABCMeta
def __init__(self, graded, display_name, module_id):
self.graded = graded
self.display_name = display_name
self.module_id = module_id
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return u"{class_name}({fields})".format(class_name=self.__class__.__name__, fields=self.__dict__)
class ProblemScore(ScoreBase):
"""
Encapsulates the fields of a Problem's score.
In addition to the fields in ScoreBase, also includes:
raw_earned (float) - raw points earned on this problem
raw_possible (float) - raw points possible to earn on this problem
weighted_earned = earned (float) - weighted value of the points earned
weighted_possible = possible (float) - weighted possible points on this problem
weight (float) - weight of this problem
"""
def __init__(self, raw_earned, raw_possible, weighted_earned, weighted_possible, weight, *args, **kwargs):
super(ProblemScore, self).__init__(*args, **kwargs)
self.raw_earned = raw_earned
self.raw_possible = raw_possible
self.earned = weighted_earned
self.possible = weighted_possible
self.weight = weight
class AggregatedScore(ScoreBase):
"""
Encapsulates the fields of a Subsection's score.
In addition to the fields in ScoreBase, also includes:
tw_earned = earned - total aggregated sum of all weighted earned values
tw_possible = possible - total aggregated sum of all weighted possible values
"""
def __init__(self, tw_earned, tw_possible, *args, **kwargs):
super(AggregatedScore, self).__init__(*args, **kwargs)
self.earned = tw_earned
self.possible = tw_possible
def float_sum(iterable):
......@@ -26,13 +80,14 @@ def float_sum(iterable):
return float(sum(iterable))
def aggregate_scores(scores, section_name="summary", location=None):
def aggregate_scores(scores, display_name="summary", location=None):
"""
scores: A list of Score objects
scores: A list of ScoreBase objects
display_name: The display name for the score object
location: The location under which all objects in scores are located
returns: A tuple (all_total, graded_total).
all_total: A Score representing the total score summed over all input scores
graded_total: A Score representing the score summed over all graded input scores
all_total: A ScoreBase representing the total score summed over all input scores
graded_total: A ScoreBase representing the score summed over all graded input scores
"""
total_correct_graded = float_sum(score.earned for score in scores if score.graded)
total_possible_graded = float_sum(score.possible for score in scores if score.graded)
......@@ -41,10 +96,10 @@ def aggregate_scores(scores, section_name="summary", location=None):
total_possible = float_sum(score.possible for score in scores)
#regardless of whether it is graded
all_total = Score(total_correct, total_possible, False, section_name, location)
all_total = AggregatedScore(total_correct, total_possible, False, display_name, location)
#selecting only graded things
graded_total = Score(total_correct_graded, total_possible_graded, True, section_name, location)
graded_total = AggregatedScore(total_correct_graded, total_possible_graded, True, display_name, location)
return all_total, graded_total
......@@ -220,7 +275,7 @@ class SingleSectionGrader(CourseGrader):
found_score = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
if score.display_name == self.name:
found_score = score
break
......@@ -342,7 +397,7 @@ class AssignmentFormatGrader(CourseGrader):
else:
earned = scores[i].earned
possible = scores[i].possible
section_name = scores[i].section
section_name = scores[i].display_name
percentage = earned / possible
summary_format = u"{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})"
......
......@@ -2,42 +2,67 @@
import unittest
from xmodule import graders
from xmodule.graders import Score, aggregate_scores
from xmodule.graders import ProblemScore, AggregatedScore, aggregate_scores
class GradesheetTest(unittest.TestCase):
'''Tests the aggregate_scores method'''
"""
Tests the aggregate_scores method
"""
def test_weighted_grading(self):
scores = []
Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
agg_fields = dict(display_name="aggregated_score", module_id=None)
prob_fields = dict(display_name="problem_score", module_id=None, raw_earned=0, raw_possible=0, weight=0)
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary", module_id=None))
self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary", module_id=None))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
self.assertEqual(
all_total,
AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
)
self.assertEqual(
graded_total,
AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
)
scores.append(Score(earned=0, possible=5, graded=False, section="summary", module_id=None))
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary", module_id=None))
self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary", module_id=None))
scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
self.assertEqual(
all_total,
AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
)
self.assertEqual(
graded_total,
AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
)
scores.append(Score(earned=3, possible=5, graded=True, section="summary", module_id=None))
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary", module_id=None))
scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
self.assertAlmostEqual(
graded_total, Score(earned=3, possible=5, graded=True, section="summary", module_id=None)
all_total,
AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
)
self.assertAlmostEqual(
graded_total,
AggregatedScore(tw_earned=3, tw_possible=5, graded=True, **agg_fields),
)
scores.append(Score(earned=2, possible=5, graded=True, section="summary", module_id=None))
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary", module_id=None))
scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
self.assertAlmostEqual(
all_total,
AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
)
self.assertAlmostEqual(
graded_total, Score(earned=5, possible=10, graded=True, section="summary", module_id=None)
graded_total,
AggregatedScore(tw_earned=5, tw_possible=10, graded=True, **agg_fields),
)
class GraderTest(unittest.TestCase):
'''Tests grader implementations'''
"""
Tests grader implementations
"""
empty_gradesheet = {
}
......@@ -49,19 +74,25 @@ class GraderTest(unittest.TestCase):
}
test_gradesheet = {
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1', module_id=None),
Score(earned=16, possible=16.0, graded=True, section='hw2', module_id=None)],
# The dropped scores should be from the assignments that don't exist yet
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1', module_id=None), # Dropped
Score(earned=1, possible=1.0, graded=True, section='lab2', module_id=None),
Score(earned=1, possible=1.0, graded=True, section='lab3', module_id=None),
Score(earned=5, possible=25.0, graded=True, section='lab4', module_id=None), # Dropped
Score(earned=3, possible=4.0, graded=True, section='lab5', module_id=None), # Dropped
Score(earned=6, possible=7.0, graded=True, section='lab6', module_id=None),
Score(earned=5, possible=6.0, graded=True, section='lab7', module_id=None)],
'Homework': [
AggregatedScore(tw_earned=2, tw_possible=20.0, graded=True, display_name='hw1', module_id=None),
AggregatedScore(tw_earned=16, tw_possible=16.0, graded=True, display_name='hw2', module_id=None)
],
'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam", module_id=None), ],
# The dropped scores should be from the assignments that don't exist yet
'Lab': [
AggregatedScore(tw_earned=1, tw_possible=2.0, graded=True, display_name='lab1', module_id=None), # Dropped
AggregatedScore(tw_earned=1, tw_possible=1.0, graded=True, display_name='lab2', module_id=None),
AggregatedScore(tw_earned=1, tw_possible=1.0, graded=True, display_name='lab3', module_id=None),
AggregatedScore(tw_earned=5, tw_possible=25.0, graded=True, display_name='lab4', module_id=None), # Dropped
AggregatedScore(tw_earned=3, tw_possible=4.0, graded=True, display_name='lab5', module_id=None), # Dropped
AggregatedScore(tw_earned=6, tw_possible=7.0, graded=True, display_name='lab6', module_id=None),
AggregatedScore(tw_earned=5, tw_possible=6.0, graded=True, display_name='lab7', module_id=None),
],
'Midterm': [
AggregatedScore(tw_earned=50.5, tw_possible=100, graded=True, display_name="Midterm Exam", module_id=None),
],
}
def test_single_section_grader(self):
......@@ -69,9 +100,11 @@ class GraderTest(unittest.TestCase):
lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")
for graded in [midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet),
bad_lab_grader.grade(self.test_gradesheet)]:
for graded in [
midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet),
bad_lab_grader.grade(self.test_gradesheet),
]:
self.assertEqual(len(graded['section_breakdown']), 1)
self.assertEqual(graded['percent'], 0.0)
......@@ -91,10 +124,12 @@ class GraderTest(unittest.TestCase):
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
# Test the grading of an empty gradesheet
for graded in [homework_grader.grade(self.empty_gradesheet),
no_drop_grader.grade(self.empty_gradesheet),
homework_grader.grade(self.incomplete_gradesheet),
no_drop_grader.grade(self.incomplete_gradesheet)]:
for graded in [
homework_grader.grade(self.empty_gradesheet),
no_drop_grader.grade(self.empty_gradesheet),
homework_grader.grade(self.incomplete_gradesheet),
no_drop_grader.grade(self.incomplete_gradesheet),
]:
self.assertAlmostEqual(graded['percent'], 0.0)
# Make sure the breakdown includes 12 sections, plus one summary
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
......@@ -118,8 +153,10 @@ class GraderTest(unittest.TestCase):
def test_assignment_format_grader_on_single_section_entry(self):
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
# Test the grading on a section with one item:
for graded in [midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet)]:
for graded in [
midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet),
]:
self.assertAlmostEqual(graded['percent'], 0.0)
# Make sure the breakdown includes just the one summary
self.assertEqual(len(graded['section_breakdown']), 0 + 1)
......@@ -137,23 +174,31 @@ class GraderTest(unittest.TestCase):
# will act like SingleSectionGraders on single sections.
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
weighted_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.25),
(lab_grader, lab_grader.category, 0.25),
(midterm_grader, midterm_grader.category, 0.5)])
weighted_grader = graders.WeightedSubsectionsGrader([
(homework_grader, homework_grader.category, 0.25),
(lab_grader, lab_grader.category, 0.25),
(midterm_grader, midterm_grader.category, 0.5),
])
over_one_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.5),
(lab_grader, lab_grader.category, 0.5),
(midterm_grader, midterm_grader.category, 0.5)])
over_one_weights_grader = graders.WeightedSubsectionsGrader([
(homework_grader, homework_grader.category, 0.5),
(lab_grader, lab_grader.category, 0.5),
(midterm_grader, midterm_grader.category, 0.5),
])
# The midterm should have all weight on this one
zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.5)])
zero_weights_grader = graders.WeightedSubsectionsGrader([
(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.5),
])
# This should always have a final percent of zero
all_zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.0)])
all_zero_weights_grader = graders.WeightedSubsectionsGrader([
(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.0),
])
empty_grader = graders.WeightedSubsectionsGrader([])
......@@ -177,10 +222,12 @@ class GraderTest(unittest.TestCase):
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
for graded in [weighted_grader.grade(self.empty_gradesheet),
weighted_grader.grade(self.incomplete_gradesheet),
zero_weights_grader.grade(self.empty_gradesheet),
all_zero_weights_grader.grade(self.empty_gradesheet)]:
for graded in [
weighted_grader.grade(self.empty_gradesheet),
weighted_grader.grade(self.incomplete_gradesheet),
zero_weights_grader.grade(self.empty_gradesheet),
all_zero_weights_grader.grade(self.empty_gradesheet),
]:
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
......
<problem url_name="capa-optionresponse">
<optionresponse>
<optioninput options="('Correct', 'Incorrect')" correct="Correct"></optioninput>
<optioninput options="('Correct', 'Incorrect')" correct="Correct"></optioninput>
</optionresponse>
</problem>
<problem display_name="Exercise: apply to each 3" markdown="null" weight="5.0">
<text>
<p>
<b>ESTIMATED TIME TO COMPLETE: 4 minutes</b>
</p>
<pre>
&gt;&gt;&gt; print testList
[1, 16, 64, 81]
</pre>
</text>
<coderesponse queuename="Watcher-MITx-6.00x">
<textbox rows="10" cols="80" mode="python" tabsize="4"/>
<codeparam>
<initial_display>
# Your Code Here
</initial_display>
<answer_display>
def square(a):
return a * a
applyToEach(testList, square)
</answer_display>
<grader_payload>{"grader": "finger_exercises/L6/applyToEach3/grade_ate3.py"}</grader_payload>
</codeparam>
</coderesponse>
</problem>
<library_content display_name="Final Exam" has_score="true" max_count="25" source_library_id="library-v1:MSX+msx_cld213xfinalexam" source_library_version="577b5aca45064f068278faa0">
<problem/>
<problem/>
</library_content>
<lti launch_url="http://www.imsglobal.org/developers/LTI/test/v1p1/tool.php" lti_id="ims"/>
<openassessment url_name="0e2bbf6cc89e45d98b028fa4e2d46314" allow_file_upload="False">
<title></title>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1"/>
<assessment name="self-assessment"/>
</assessments>
<rubric>
<prompt>
Censorship in the Libraries
'All of us can think of a book that we hope none of our children or any
other children have taken off the shelf. But if I have the right to remove
that book from the shelf -- that work I abhor -- then you also have exactly
the same right and so does everyone else. And then we have no books left on
the shelf for any of us.' --Katherine Paterson, Author
Write a persuasive essay to a newspaper reflecting your views on censorship
in libraries. Do you believe that certain materials, such as books, music,
movies, magazines, etc., should be removed from the shelves if they are
found offensive? Support your position with convincing arguments from your
own experience, observations, and/or reading.
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>Ideas</name>
<prompt>Determine if there is a unifying theme or main idea.</prompt>
<option points="0">
<name>Poor</name>
<explanation>
Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus.
</explanation>
</option>
<option points="3">
<name>Fair</name>
<explanation>
Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task.
</explanation>
</option>
<option points="5">
<name>Good</name>
<explanation>
Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task.
</explanation>
</option>
</criterion>
<criterion>
<name>Content</name>
<prompt>Assess the content of the submission</prompt>
<option points="0">
<name>Poor</name>
<explanation>
Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic.
</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>
Includes little information and few or no details.
Explores only one or two facets of the topic.
</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>
Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.
</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>
Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic.
</explanation>
</option>
</criterion>
</rubric>
</openassessment>
......@@ -166,8 +166,8 @@ class TestGatedContent(GatingTestCase, MilestonesTestCaseMixin):
"""
course_grade = CourseGradeFactory(user).create(self.course)
for prob in [self.gating_prob1, self.gated_prob2, self.prob3]:
self.assertIn(prob.location, course_grade.locations_to_weighted_scores)
self.assertNotIn(self.orphan.location, course_grade.locations_to_weighted_scores)
self.assertIn(prob.location, course_grade.locations_to_scores)
self.assertNotIn(self.orphan.location, course_grade.locations_to_scores)
self.assertEquals(course_grade.percent, expected_percent)
......
......@@ -30,7 +30,7 @@ BLOCK_RECORD_LIST_VERSION = 1
# Used to serialize information about a block at the time it was used in
# grade calculation.
BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'max_score'])
BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'raw_possible', 'graded'])
class BlockRecordList(tuple):
......@@ -101,7 +101,8 @@ class BlockRecordList(tuple):
BlockRecord(
locator=UsageKey.from_string(block["locator"]).replace(course_key=course_key),
weight=block["weight"],
max_score=block["max_score"],
raw_possible=block["raw_possible"],
graded=block["graded"],
)
for block in block_dicts
)
......
......@@ -43,15 +43,15 @@ class CourseGrade(object):
return subsections_by_format
@lazy
def locations_to_weighted_scores(self):
def locations_to_scores(self):
"""
Returns a dict of problem scores keyed by their locations.
"""
locations_to_weighted_scores = {}
locations_to_scores = {}
for chapter in self.chapter_grades:
for subsection_grade in chapter['sections']:
locations_to_weighted_scores.update(subsection_grade.locations_to_weighted_scores)
return locations_to_weighted_scores
locations_to_scores.update(subsection_grade.locations_to_scores)
return locations_to_scores
@lazy
def grade_value(self):
......@@ -113,7 +113,7 @@ class CourseGrade(object):
grade_summary['percent'] = self.percent
grade_summary['grade'] = self.letter_grade
grade_summary['totaled_scores'] = self.subsection_grade_totals_by_format
grade_summary['raw_scores'] = list(self.locations_to_weighted_scores.itervalues())
grade_summary['raw_scores'] = list(self.locations_to_scores.itervalues())
return grade_summary
......@@ -141,7 +141,7 @@ class CourseGrade(object):
subsections_total = sum(len(x) for x in self.subsection_grade_totals_by_format.itervalues())
subsections_read = len(subsection_grade_factory._unsaved_subsection_grades) # pylint: disable=protected-access
subsections_created = subsections_total - subsections_read
blocks_total = len(self.locations_to_weighted_scores)
blocks_total = len(self.locations_to_scores)
if not read_only:
subsection_grade_factory.bulk_create_unsaved()
......@@ -166,8 +166,8 @@ class CourseGrade(object):
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.locations_to_weighted_scores:
score, _ = self.locations_to_weighted_scores[location]
if location in self.locations_to_scores:
score = self.locations_to_scores[location]
return score.earned, score.possible
children = self.course_structure.get_children(location)
earned = 0.0
......
......@@ -11,12 +11,11 @@ from courseware.model_data import ScoresClient
from lms.djangoapps.grades.scores import get_score, possibly_scored
from lms.djangoapps.grades.models import BlockRecord, PersistentSubsectionGrade
from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag
from lms.djangoapps.grades.transformer import GradesTransformer
from student.models import anonymous_id_for_user, User
from submissions import api as submissions_api
from traceback import format_exc
from xmodule import block_metadata_utils, graders
from xmodule.graders import Score
from xmodule.graders import AggregatedScore
log = getLogger(__name__)
......@@ -54,62 +53,47 @@ class SubsectionGrade(object):
self.graded_total = None # aggregated grade for all graded problems
self.all_total = None # aggregated grade for all problems, regardless of whether they are graded
self.locations_to_weighted_scores = OrderedDict() # dict of problem locations to (Score, weight) tuples
self._scores = None
self.locations_to_scores = OrderedDict() # dict of problem locations to ProblemScore
@property
def scores(self):
"""
List of all problem scores in the subsection.
"""
if self._scores is None:
self._scores = [score for score, _ in self.locations_to_weighted_scores.itervalues()]
return self._scores
return self.locations_to_scores.values()
def init_from_structure(self, student, course_structure, scores_client, submissions_scores):
def init_from_structure(self, student, course_structure, submissions_scores, csm_scores):
"""
Compute the grade of this subsection for the given student and course.
"""
assert self._scores is None
for descendant_key in course_structure.post_order_traversal(
filter_func=possibly_scored,
start_node=self.location,
):
self._compute_block_score(
student, descendant_key, course_structure, scores_client, submissions_scores, persisted_values={},
)
self._compute_block_score(descendant_key, course_structure, submissions_scores, csm_scores)
self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
self._log_event(log.debug, u"init_from_structure", student)
def init_from_model(self, student, model, course_structure, scores_client, submissions_scores):
def init_from_model(self, student, model, course_structure, submissions_scores, csm_scores):
"""
Load the subsection grade from the persisted model.
"""
assert self._scores is None
for block in model.visible_blocks.blocks:
persisted_values = {'weight': block.weight, 'possible': block.max_score}
self._compute_block_score(
student,
block.locator,
course_structure,
scores_client,
submissions_scores,
persisted_values
)
self._compute_block_score(block.locator, course_structure, submissions_scores, csm_scores, block)
self.graded_total = Score(
earned=model.earned_graded,
possible=model.possible_graded,
self.graded_total = AggregatedScore(
tw_earned=model.earned_graded,
tw_possible=model.possible_graded,
graded=True,
section=self.display_name,
display_name=self.display_name,
module_id=self.location,
)
self.all_total = Score(
earned=model.earned_all,
possible=model.possible_all,
self.all_total = AggregatedScore(
tw_earned=model.earned_all,
tw_possible=model.possible_all,
graded=False,
section=self.display_name,
display_name=self.display_name,
module_id=self.location,
)
self._log_event(log.debug, u"init_from_model", student)
......@@ -140,12 +124,11 @@ class SubsectionGrade(object):
def _compute_block_score(
self,
student,
block_key,
course_structure,
scores_client,
submissions_scores,
persisted_values,
csm_scores,
persisted_block=None,
):
"""
Compute score for the given block. If persisted_values
......@@ -154,54 +137,14 @@ class SubsectionGrade(object):
block = course_structure[block_key]
if getattr(block, 'has_score', False):
possible = persisted_values.get('possible', None)
weight = persisted_values.get('weight', getattr(block, 'weight', None))
(earned, possible) = get_score(
student,
block,
scores_client,
problem_score = get_score(
submissions_scores,
weight,
possible,
csm_scores,
persisted_block,
block,
)
if earned is not None or possible is not None:
# There's a chance that the value of graded is not the same
# value when the problem was scored. Since we get the value
# from the block_structure.
#
# Cannot grade a problem with a denominator of 0.
# TODO: None > 0 is not python 3 compatible.
block_graded = self._get_explicit_graded(block, course_structure) if possible > 0 else False
self.locations_to_weighted_scores[block.location] = (
Score(
earned,
possible,
block_graded,
block_metadata_utils.display_name_with_default_escaped(block),
block.location,
),
weight,
)
def _get_explicit_graded(self, block, course_structure):
"""
Returns the explicit graded field value for the given block
"""
field_value = course_structure.get_transformer_block_field(
block.location,
GradesTransformer,
GradesTransformer.EXPLICIT_GRADED_FIELD_NAME
)
# Set to True if grading is not explicitly disabled for
# this block. This allows us to include the block's score
# in the aggregated self.graded_total, regardless of the
# inherited graded value from the subsection. (TNL-5560)
return True if field_value is None else field_value
if problem_score:
self.locations_to_scores[block_key] = problem_score
def _persisted_model_params(self, student):
"""
......@@ -226,9 +169,9 @@ class SubsectionGrade(object):
Returns the list of visible blocks.
"""
return [
BlockRecord(location, weight, score.possible)
for location, (score, weight) in
self.locations_to_weighted_scores.iteritems()
BlockRecord(location, score.weight, score.raw_possible, score.graded)
for location, score in
self.locations_to_scores.iteritems()
]
def _log_event(self, log_func, log_statement, student):
......@@ -283,7 +226,7 @@ class SubsectionGradeFactory(object):
if not subsection_grade:
subsection_grade = SubsectionGrade(subsection, self.course)
subsection_grade.init_from_structure(
self.student, block_structure, self._scores_client, self._submissions_scores
self.student, block_structure, self._submissions_scores, self._csm_scores,
)
if PersistentGradesEnabledFlag.feature_enabled(self.course.id):
if read_only:
......@@ -313,7 +256,7 @@ class SubsectionGradeFactory(object):
block_structure = self._get_block_structure(block_structure)
subsection_grade = SubsectionGrade(subsection, self.course)
subsection_grade.init_from_structure(
self.student, block_structure, self._scores_client, self._submissions_scores
self.student, block_structure, self._submissions_scores, self._csm_scores
)
if PersistentGradesEnabledFlag.feature_enabled(self.course.id):
......@@ -323,7 +266,7 @@ class SubsectionGradeFactory(object):
return subsection_grade
@lazy
def _scores_client(self):
def _csm_scores(self):
"""
Lazily queries and returns all the scores stored in the user
state (in CSM) for the course, while caching the result.
......@@ -351,7 +294,7 @@ class SubsectionGradeFactory(object):
if saved_subsection_grade:
subsection_grade = SubsectionGrade(subsection, self.course)
subsection_grade.init_from_model(
self.student, saved_subsection_grade, block_structure, self._scores_client, self._submissions_scores
self.student, saved_subsection_grade, block_structure, self._submissions_scores, self._csm_scores,
)
return subsection_grade
......
......@@ -5,106 +5,246 @@ from logging import getLogger
from openedx.core.lib.cache_utils import memoized
from xblock.core import XBlock
from xmodule.block_metadata_utils import display_name_with_default_escaped
from xmodule.graders import ProblemScore
from .transformer import GradesTransformer
log = getLogger(__name__)
@memoized
def block_types_possibly_scored():
def possibly_scored(usage_key):
"""
Returns the block types that could have a score.
Something might be a scored item if it is capable of storing a score
(has_score=True). We also have to include anything that can have children,
since those children might have scores. We can avoid things like Videos,
which have state but cannot ever impact someone's grade.
Returns whether the given block could impact grading (i.e.
has_score or has_children).
"""
return frozenset(
cat for (cat, xblock_class) in XBlock.load_classes() if (
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
)
)
return usage_key.block_type in _block_types_possibly_scored()
def possibly_scored(usage_key):
def get_score(submissions_scores, csm_scores, persisted_block, block):
"""
Returns whether the given block could impact grading (i.e. scored, or has children).
Returns the score for a problem, as a ProblemScore object. It is
assumed that the provided storages have already been filtered for
a single user in question and have user-specific values.
The score is retrieved from the provided storages in the following
order of precedence. If no value for the block is found in a
given storage, the next storage is checked.
submissions_scores (dict of {unicode(usage_key): (earned, possible)}):
A python dictionary of serialized UsageKeys to (earned, possible)
tuples. These values, retrieved using the Submissions API by the
caller (already filtered for the user and course), take precedence
above all other score storages.
When the score is found in this storage, it implies the user's score
for the block was persisted via the submissions API. Typically, this API
is used by ORA.
The returned score includes valid values for:
weighted_earned
weighted_possible
graded - retrieved from the persisted block, if found, else from
the latest block content.
Note: raw_earned and raw_possible are not required when submitting scores
via the submissions API, so those values (along with the unused weight)
are invalid and irrelevant.
csm_scores (ScoresClient):
The ScoresClient object (already filtered for the user and course),
from which a courseware.models.StudentModule object can be retrieved for
the block.
When the score is found from this storage, it implies the user's score
for the block was persisted in the Courseware Student Module. Typically,
this storage is used for all CAPA problems, including scores calculated
by external graders.
The returned score includes valid values for:
raw_earned, raw_possible - retrieved from CSM
weighted_earned, weighted_possible - calculated from the raw scores and weight
weight, graded - retrieved from the persisted block, if found,
else from the latest block content
persisted_block (.models.BlockRecord):
The block values as found in the grades persistence layer. These values
are used only if not found from an earlier storage, and take precedence
over values stored within the latest content-version of the block.
When the score is found from this storage, it implies the user has not
yet attempted this problem, but the user's grade _was_ persisted.
The returned score includes valid values for:
raw_earned - will equal 0.0 since the user's score was not found from
earlier storages
raw_possible - retrieved from the persisted block
weighted_earned, weighted_possible - calculated from the raw scores and weight
weight, graded - retrieved from the persisted block
block (block_structure.BlockData):
Values from the latest content-version of the block are used only if
they were not available from a prior storage.
When the score is found from this storage, it implies the user has not
yet attempted this problem and the user's grade was _not_ yet persisted.
The returned score includes valid values for:
raw_earned - will equal 0.0 since the user's score was not found from
earlier storages
raw_possible - retrieved from the latest block content
weighted_earned, weighted_possible - calculated from the raw scores and weight
weight, graded - retrieved from the latest block content
"""
return usage_key.block_type in block_types_possibly_scored()
weight = _get_weight_from_block(persisted_block, block)
# Priority order for retrieving the scores:
# submissions API -> CSM -> grades persisted block -> latest block content
raw_earned, raw_possible, weighted_earned, weighted_possible = (
_get_score_from_submissions(submissions_scores, block) or
_get_score_from_csm(csm_scores, block, weight) or
_get_score_from_persisted_or_latest_block(persisted_block, block, weight)
)
assert weighted_possible is not None
has_valid_denominator = weighted_possible > 0.0
graded = _get_graded_from_block(persisted_block, block) if has_valid_denominator else False
return ProblemScore(
raw_earned,
raw_possible,
weighted_earned,
weighted_possible,
weight,
graded,
display_name=display_name_with_default_escaped(block),
module_id=block.location,
)
def weighted_score(raw_earned, raw_possible, weight=None):
def weighted_score(raw_earned, raw_possible, weight):
"""
Return a tuple that represents the weighted (earned, possible) score.
Returns a tuple that represents the weighted (earned, possible) score.
If weight is None or raw_possible is 0, returns the original values.
When weight is used, it defines the weighted_possible. This allows
course authors to specify the exact maximum value for a problem when
they provide a weight.
"""
if weight is None or raw_possible == 0:
return (raw_earned, raw_possible)
return float(raw_earned) * weight / raw_possible, float(weight)
assert raw_possible is not None
cannot_compute_with_weight = weight is None or raw_possible == 0
if cannot_compute_with_weight:
return raw_earned, raw_possible
else:
return float(raw_earned) * weight / raw_possible, float(weight)
def get_score(user, block, scores_client, submissions_scores_cache, weight, possible=None):
def _get_score_from_submissions(submissions_scores, block):
"""
Return the score for a user on a problem, as a tuple (earned, possible).
e.g. (5,7) if you got 5 out of 7 points.
Returns the score values from the submissions API if found.
"""
if submissions_scores:
submission_value = submissions_scores.get(unicode(block.location))
if submission_value:
weighted_earned, weighted_possible = submission_value
assert weighted_earned >= 0.0 and weighted_possible > 0.0 # per contract from submissions API
return (None, None) + (weighted_earned, weighted_possible)
If this problem doesn't have a score, or we couldn't load it, returns (None,
None).
user: a Student object
block: a BlockStructure's BlockData object
scores_client: an initialized ScoresClient
submissions_scores_cache: A dict of location names to (earned, possible)
point tuples. If an entry is found in this cache, it takes precedence.
weight: The weight of the problem to use in the calculation. A value of
None signifies that the weight should not be applied.
possible (optional): The possible maximum score of the problem to use in the
calculation. If None, uses the value found either in scores_client or
from the block.
def _get_score_from_csm(csm_scores, block, weight):
"""
Returns the score values from the courseware student module, via
ScoresClient, if found.
"""
# If an entry exists and has raw_possible (total) associated with it, we trust
# that value. This is important for cases where a student might have seen an
# older version of the problem -- they're still graded on what was possible
# when they tried the problem, not what it's worth now.
#
# Note: Storing raw_possible in CSM predates the implementation of the grades
# own persistence layer. Hence, we have duplicate storage locations for
# raw_possible, with potentially conflicting values, when a problem is
# attempted. Even though the CSM persistence for this value is now
# superfluous, for backward compatibility, we continue to use its value for
# raw_possible, giving it precedence over the one in the grades data model.
score = csm_scores.get(block.location)
has_valid_score = score and score.total is not None
if has_valid_score:
raw_earned = score.correct if score.correct is not None else 0.0
raw_possible = score.total
return (raw_earned, raw_possible) + weighted_score(raw_earned, raw_possible, weight)
def _get_score_from_persisted_or_latest_block(persisted_block, block, weight):
"""
submissions_scores_cache = submissions_scores_cache or {}
Returns the score values, now assuming the earned score is 0.0 - since a
score was not found in an earlier storage.
Uses the raw_possible value from the persisted_block if found, else from
the latest block content.
"""
raw_earned = 0.0
if not user.is_authenticated():
return (None, None)
if persisted_block:
raw_possible = persisted_block.raw_possible
else:
raw_possible = block.transformer_data[GradesTransformer].max_score
location_url = unicode(block.location)
if location_url in submissions_scores_cache:
return submissions_scores_cache[location_url]
return (raw_earned, raw_possible) + weighted_score(raw_earned, raw_possible, weight)
if not getattr(block, 'has_score', False):
# These are not problems, and do not have a score
return (None, None)
# Check the score that comes from the ScoresClient (out of CSM).
# If an entry exists and has a total associated with it, we trust that
# value. This is important for cases where a student might have seen an
# older version of the problem -- they're still graded on what was possible
# when they tried the problem, not what it's worth now.
score = scores_client.get(block.location)
if score and score.total is not None:
# We have a valid score, just use it.
earned = score.correct if score.correct is not None else 0.0
if possible is None:
possible = score.total
elif possible != score.total:
log.error(
u"Persistent Grades: scores.get_score, possible value {} != score.total value {}".format(
possible,
score.total
)
)
def _get_weight_from_block(persisted_block, block):
"""
Returns the weighted value from the persisted_block if found, else from
the latest block content.
"""
if persisted_block:
return persisted_block.weight
else:
# This means we don't have a valid score entry and we don't have a
# cached_max_score on hand. We know they've earned 0.0 points on this.
earned = 0.0
if possible is None:
possible = block.transformer_data[GradesTransformer].max_score
# Problem may be an error module (if something in the problem builder failed)
# In which case possible might be None
if possible is None:
return (None, None)
return weighted_score(earned, possible, weight)
return getattr(block, 'weight', None)
def _get_graded_from_block(persisted_block, block):
"""
Returns the graded value from the persisted_block if found, else from
the latest block content.
"""
if persisted_block:
return persisted_block.graded
else:
return _get_explicit_graded(block)
def _get_explicit_graded(block):
"""
Returns the explicit graded field value for the given block.
"""
field_value = getattr(
block.transformer_data[GradesTransformer],
GradesTransformer.EXPLICIT_GRADED_FIELD_NAME,
None,
)
# Set to True if grading is not explicitly disabled for
# this block. This allows us to include the block's score
# in the aggregated self.graded_total, regardless of the
# inherited graded value from the subsection. (TNL-5560)
return True if field_value is None else field_value
@memoized
def _block_types_possibly_scored():
"""
Returns the block types that could have a score.
Something might be a scored item if it is capable of storing a score
(has_score=True). We also have to include anything that can have children,
since those children might have scores. We can avoid things like Videos,
which have state but cannot ever impact someone's grade.
"""
return frozenset(
category for (category, xblock_class) in XBlock.load_classes() if (
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
)
)
......@@ -2,14 +2,21 @@
Test grade calculation.
"""
import ddt
from django.http import Http404
import itertools
from mock import patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.tests.helpers import get_request_for_user
from lms.djangoapps.course_blocks.api import get_course_blocks
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from xmodule.block_metadata_utils import display_name_with_default_escaped
from xmodule.graders import ProblemScore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
......@@ -17,6 +24,7 @@ from .utils import answer_problem
from .. import course_grades
from ..course_grades import summary as grades_summary
from ..new.course_grade import CourseGradeFactory
from ..new.subsection_grade import SubsectionGradeFactory
def _grade_with_errors(student, course):
......@@ -33,6 +41,17 @@ def _grade_with_errors(student, course):
return grades_summary(student, course)
def _create_problem_xml():
"""
Creates and returns XML for a multiple choice response problem
"""
return MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
@attr(shard=1)
class TestGradeIteration(SharedModuleStoreTestCase):
"""
......@@ -134,6 +153,101 @@ class TestGradeIteration(SharedModuleStoreTestCase):
return students_to_gradesets, students_to_errors
@ddt.ddt
class TestWeightedProblems(SharedModuleStoreTestCase):
"""
Test scores and grades with various problem weight values.
"""
@classmethod
def setUpClass(cls):
super(TestWeightedProblems, cls).setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(parent=cls.course, category="chapter", display_name="chapter")
cls.sequential = ItemFactory.create(parent=cls.chapter, category="sequential", display_name="sequential")
cls.vertical = ItemFactory.create(parent=cls.sequential, category="vertical", display_name="vertical1")
problem_xml = _create_problem_xml()
cls.problems = []
for i in range(2):
cls.problems.append(
ItemFactory.create(
parent=cls.vertical,
category="problem",
display_name="problem_{}".format(i),
data=problem_xml,
)
)
def setUp(self):
super(TestWeightedProblems, self).setUp()
self.user = UserFactory()
self.request = get_request_for_user(self.user)
def _verify_grades(self, raw_earned, raw_possible, weight, expected_score):
"""
Verifies the computed grades are as expected.
"""
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
# pylint: disable=no-member
for problem in self.problems:
problem.weight = weight
self.store.update_item(problem, self.user.id)
self.store.publish(self.course.location, self.user.id)
course_structure = get_course_blocks(self.request.user, self.course.location)
# answer all problems
for problem in self.problems:
answer_problem(self.course, self.request, problem, score=raw_earned, max_value=raw_possible)
# get grade
subsection_grade = SubsectionGradeFactory(
self.request.user, self.course, course_structure
).update(self.sequential)
# verify all problem grades
for problem in self.problems:
problem_score = subsection_grade.locations_to_scores[problem.location]
expected_score.display_name = display_name_with_default_escaped(problem)
expected_score.module_id = problem.location
self.assertEquals(problem_score, expected_score)
# verify subsection grades
self.assertEquals(subsection_grade.all_total.earned, expected_score.earned * len(self.problems))
self.assertEquals(subsection_grade.all_total.possible, expected_score.possible * len(self.problems))
@ddt.data(
*itertools.product(
(0.0, 0.5, 1.0, 2.0), # raw_earned
(-2.0, -1.0, 0.0, 0.5, 1.0, 2.0), # raw_possible
(-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 50.0, None), # weight
)
)
@ddt.unpack
def test_problem_weight(self, raw_earned, raw_possible, weight):
use_weight = weight is not None and raw_possible != 0
if use_weight:
expected_w_earned = raw_earned / raw_possible * weight
expected_w_possible = weight
else:
expected_w_earned = raw_earned
expected_w_possible = raw_possible
expected_graded = expected_w_possible > 0
expected_score = ProblemScore(
raw_earned=raw_earned,
raw_possible=raw_possible,
weighted_earned=expected_w_earned,
weighted_possible=expected_w_possible,
weight=weight,
graded=expected_graded,
display_name=None, # problem-specific, filled in by _verify_grades
module_id=None, # problem-specific, filled in by _verify_grades
)
self._verify_grades(raw_earned, raw_possible, weight, expected_score)
class TestScoreForModule(SharedModuleStoreTestCase):
"""
Test the method that calculates the score for a given block based on the
......
......@@ -73,8 +73,8 @@ class GradesModelTestCase(TestCase):
block_type='problem',
block_id='block_id_b'
)
self.record_a = BlockRecord(locator=self.locator_a, weight=1, max_score=10)
self.record_b = BlockRecord(locator=self.locator_b, weight=1, max_score=10)
self.record_a = BlockRecord(locator=self.locator_a, weight=1, raw_possible=10, graded=False)
self.record_b = BlockRecord(locator=self.locator_b, weight=1, raw_possible=10, graded=True)
@ddt.ddt
......@@ -90,29 +90,31 @@ class BlockRecordTest(GradesModelTestCase):
Tests creation of a BlockRecord.
"""
weight = 1
max_score = 10
raw_possible = 10
record = BlockRecord(
self.locator_a,
weight,
max_score,
raw_possible,
graded=False,
)
self.assertEqual(record.locator, self.locator_a)
@ddt.data(
(0, 0, "0123456789abcdef"),
(1, 10, 'totally_a_real_block_key'),
("BlockRecord is", "a dumb data store", "with no validation"),
(0, 0, "0123456789abcdef", True),
(1, 10, 'totally_a_real_block_key', False),
("BlockRecord is", "a dumb data store", "with no validation", None),
)
@ddt.unpack
def test_serialization(self, weight, max_score, block_key):
def test_serialization(self, weight, raw_possible, block_key, graded):
"""
Tests serialization of a BlockRecord using the _asdict() method.
"""
record = BlockRecord(block_key, weight, max_score)
record = BlockRecord(block_key, weight, raw_possible, graded)
expected = OrderedDict([
("locator", block_key),
("weight", weight),
("max_score", max_score),
("raw_possible", raw_possible),
("graded", graded),
])
self.assertEqual(expected, record._asdict())
......@@ -136,7 +138,12 @@ class VisibleBlocksTest(GradesModelTestCase):
for block_dict in list_of_block_dicts:
block_dict['locator'] = unicode(block_dict['locator']) # BlockUsageLocator is not json-serializable
expected_data = {
'blocks': [{'locator': unicode(self.record_a.locator), 'max_score': 10, 'weight': 1}],
'blocks': [{
'locator': unicode(self.record_a.locator),
'raw_possible': 10,
'weight': 1,
'graded': self.record_a.graded,
}],
'course_key': unicode(self.record_a.locator.course_key),
'version': BLOCK_RECORD_LIST_VERSION,
}
......
"""
Test saved subsection grade functionality.
"""
# pylint: disable=protected-access
import datetime
import ddt
from django.conf import settings
from django.db.utils import DatabaseError
from mock import patch
import pytz
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.tests.helpers import get_request_for_user
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.grades.config.tests.utils import persistent_grades_feature_flags
from openedx.core.lib.xblock_utils.test_utils import add_xml_block_from_file
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ..models import PersistentSubsectionGrade
from ..new.course_grade import CourseGradeFactory
from ..new.subsection_grade import SubsectionGrade, SubsectionGradeFactory
from lms.djangoapps.grades.tests.utils import mock_get_score
from .utils import mock_get_score
class GradeTestBase(SharedModuleStoreTestCase):
......@@ -116,7 +122,7 @@ class SubsectionGradeFactoryTest(GradeTestBase):
) as mock_create_grade:
with patch(
'lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory._get_saved_grade',
wraps=self.subsection_grade_factory._get_saved_grade # pylint: disable=protected-access
wraps=self.subsection_grade_factory._get_saved_grade
) as mock_get_saved_grade:
with self.assertNumQueries(14):
grade_a = self.subsection_grade_factory.create(self.sequence)
......@@ -205,8 +211,8 @@ class SubsectionGradeTest(GradeTestBase):
input_grade.init_from_structure(
self.request.user,
self.course_structure,
self.subsection_grade_factory._scores_client, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores,
self.subsection_grade_factory._csm_scores,
)
self.assertEqual(PersistentSubsectionGrade.objects.count(), 0)
......@@ -224,9 +230,154 @@ class SubsectionGradeTest(GradeTestBase):
self.request.user,
saved_model,
self.course_structure,
self.subsection_grade_factory._scores_client, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores,
self.subsection_grade_factory._csm_scores,
)
self.assertEqual(input_grade.url_name, loaded_grade.url_name)
self.assertEqual(input_grade.all_total, loaded_grade.all_total)
@ddt.ddt
class TestMultipleProblemTypesSubsectionScores(ModuleStoreTestCase, ProblemSubmissionTestMixin):
"""
Test grading of different problem types.
"""
default_problem_metadata = {
u'graded': True,
u'weight': 2.5,
u'max_score': 7.0,
u'due': datetime.datetime(2099, 3, 15, 12, 30, 0, tzinfo=pytz.utc),
}
COURSE_NAME = u'Problem Type Test Course'
COURSE_NUM = u'probtype'
def setUp(self):
super(TestMultipleProblemTypesSubsectionScores, self).setUp()
password = u'test'
self.student = UserFactory.create(is_staff=False, username=u'test_student', password=password)
self.client.login(username=self.student.username, password=password)
self.request = get_request_for_user(self.student)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_NUM
)
self.chapter = ItemFactory.create(
parent=self.course,
category=u'chapter',
display_name=u'Test Chapter'
)
self.seq1 = ItemFactory.create(
parent=self.chapter,
category=u'sequential',
display_name=u'Test Sequential 1',
graded=True
)
self.vert1 = ItemFactory.create(
parent=self.seq1,
category=u'vertical',
display_name=u'Test Vertical 1'
)
def _get_fresh_subsection_score(self, course_structure, subsection):
"""
Return a Score object for the specified subsection.
Ensures that a stale cached value is not returned.
"""
subsection_factory = SubsectionGradeFactory(
self.student,
course_structure=course_structure,
course=self.course,
)
return subsection_factory.update(subsection)
def _get_altered_metadata(self, alterations):
"""
Returns a copy of the default_problem_metadata dict updated with the
specified alterations.
"""
metadata = self.default_problem_metadata.copy()
metadata.update(alterations)
return metadata
def _get_score_with_alterations(self, alterations):
"""
Given a dict of alterations to the default_problem_metadata, return
the score when one correct problem (out of two) is submitted.
"""
metadata = self._get_altered_metadata(alterations)
add_xml_block_from_file(u'problem', u'capa.xml', parent=self.vert1, metadata=metadata)
course_structure = get_course_blocks(self.student, self.course.location)
self.submit_question_answer(u'problem', {u'2_1': u'Correct'})
return self._get_fresh_subsection_score(course_structure, self.seq1)
def test_score_submission_for_capa_problems(self):
add_xml_block_from_file(u'problem', u'capa.xml', parent=self.vert1, metadata=self.default_problem_metadata)
course_structure = get_course_blocks(self.student, self.course.location)
score = self._get_fresh_subsection_score(course_structure, self.seq1)
self.assertEqual(score.all_total.earned, 0.0)
self.assertEqual(score.all_total.possible, 2.5)
self.submit_question_answer(u'problem', {u'2_1': u'Correct'})
score = self._get_fresh_subsection_score(course_structure, self.seq1)
self.assertEqual(score.all_total.earned, 1.25)
self.assertEqual(score.all_total.possible, 2.5)
@ddt.data(
(u'openassessment', u'openassessment.xml'),
(u'coderesponse', u'coderesponse.xml'),
(u'lti', u'lti.xml'),
(u'library_content', u'library_content.xml'),
)
@ddt.unpack
def test_loading_different_problem_types(self, block_type, filename):
"""
Test that transformation works for various block types
"""
metadata = self.default_problem_metadata.copy()
if block_type == u'library_content':
# Library content does not have a weight
del metadata[u'weight']
add_xml_block_from_file(block_type, filename, parent=self.vert1, metadata=metadata)
@ddt.data(
({}, 1.25, 2.5),
({u'weight': 27}, 13.5, 27),
({u'weight': 1.0}, 0.5, 1.0),
({u'weight': 0.0}, 0.0, 0.0),
({u'weight': None}, 1.0, 2.0),
)
@ddt.unpack
def test_weight_metadata_alterations(self, alterations, expected_earned, expected_possible):
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.all_total.earned, expected_earned)
self.assertEqual(score.all_total.possible, expected_possible)
@ddt.data(
({u'graded': True}, 1.25, 2.5),
({u'graded': False}, 0.0, 0.0),
)
@ddt.unpack
def test_graded_metadata_alterations(self, alterations, expected_earned, expected_possible):
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.graded_total.earned, expected_earned)
self.assertEqual(score.graded_total.possible, expected_possible)
@ddt.data(
{u'max_score': 99.3},
{u'max_score': 1.0},
{u'max_score': 0.0},
{u'max_score': None},
)
def test_max_score_does_not_change_results(self, alterations):
expected_earned = 1.25
expected_possible = 2.5
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.all_total.earned, expected_earned)
self.assertEqual(score.all_total.possible, expected_possible)
"""
Tests for grades.scores module.
"""
# pylint: disable=protected-access
from collections import namedtuple
import ddt
from django.test import TestCase
import itertools
from lms.djangoapps.grades.models import BlockRecord
import lms.djangoapps.grades.scores as scores
from lms.djangoapps.grades.transformer import GradesTransformer
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from openedx.core.lib.block_structure.block_structure import BlockData
from xmodule.graders import ProblemScore
class TestScoredBlockTypes(TestCase):
"""
Tests for the possibly_scored function.
"""
possibly_scored_block_types = {
'course', 'chapter', 'sequential', 'vertical',
'library_content', 'split_test', 'conditional', 'library', 'randomize',
'problem', 'drag-and-drop-v2', 'openassessment', 'lti', 'lti_consumer',
'videosequence', 'problemset', 'acid_parent', 'done', 'wrapper', 'edx_sga',
}
def test_block_types_possibly_scored(self):
self.assertSetEqual(
self.possibly_scored_block_types,
scores._block_types_possibly_scored()
)
def test_possibly_scored(self):
course_key = CourseLocator(u'org', u'course', u'run')
for block_type in self.possibly_scored_block_types:
usage_key = BlockUsageLocator(course_key, block_type, 'mock_block_id')
self.assertTrue(scores.possibly_scored(usage_key))
@ddt.ddt
class TestGetScore(TestCase):
"""
Tests for get_score
"""
display_name = 'test_name'
location = 'test_location'
SubmissionValue = namedtuple('SubmissionValue', 'exists, weighted_earned, weighted_possible')
CSMValue = namedtuple('CSMValue', 'exists, raw_earned, raw_possible')
PersistedBlockValue = namedtuple('PersistedBlockValue', 'exists, raw_possible, weight, graded')
ContentBlockValue = namedtuple('ContentBlockValue', 'raw_possible, weight, explicit_graded')
ExpectedResult = namedtuple(
'ExpectedResult', 'raw_earned, raw_possible, weighted_earned, weighted_possible, weight, graded'
)
def _create_submissions_scores(self, submission_value):
"""
Creates a stub result from the submissions API for the given values.
"""
if submission_value.exists:
return {self.location: (submission_value.weighted_earned, submission_value.weighted_possible)}
else:
return {}
def _create_csm_scores(self, csm_value):
"""
Creates a stub result from courseware student module for the given values.
"""
if csm_value.exists:
stub_csm_record = namedtuple('stub_csm_record', 'correct, total')
return {self.location: stub_csm_record(correct=csm_value.raw_earned, total=csm_value.raw_possible)}
else:
return {}
def _create_persisted_block(self, persisted_block_value):
"""
Creates and returns a minimal BlockRecord object with the give values.
"""
if persisted_block_value.exists:
return BlockRecord(
self.location,
persisted_block_value.weight,
persisted_block_value.raw_possible,
persisted_block_value.graded,
)
else:
return None
def _create_block(self, content_block_value):
"""
Creates and returns a minimal BlockData object with the give values.
"""
block = BlockData(self.location)
block.display_name = self.display_name
block.weight = content_block_value.weight
block_grades_transformer_data = block.transformer_data.get_or_create(GradesTransformer)
block_grades_transformer_data.max_score = content_block_value.raw_possible
setattr(
block_grades_transformer_data,
GradesTransformer.EXPLICIT_GRADED_FIELD_NAME,
content_block_value.explicit_graded,
)
return block
@ddt.data(
# submissions _trumps_ other values; weighted and graded from persisted-block _trumps_ latest content values
(
SubmissionValue(exists=True, weighted_earned=50, weighted_possible=100),
CSMValue(exists=True, raw_earned=10, raw_possible=40),
PersistedBlockValue(exists=True, raw_possible=5, weight=40, graded=True),
ContentBlockValue(raw_possible=1, weight=20, explicit_graded=False),
ExpectedResult(
raw_earned=None, raw_possible=None, weighted_earned=50, weighted_possible=100, weight=40, graded=True
),
),
# same as above, except submissions doesn't exist; CSM values used
(
SubmissionValue(exists=False, weighted_earned=50, weighted_possible=100),
CSMValue(exists=True, raw_earned=10, raw_possible=40),
PersistedBlockValue(exists=True, raw_possible=5, weight=40, graded=True),
ContentBlockValue(raw_possible=1, weight=20, explicit_graded=False),
ExpectedResult(
raw_earned=10, raw_possible=40, weighted_earned=10, weighted_possible=40, weight=40, graded=True
),
),
# neither submissions nor CSM exist; Persisted values used
(
SubmissionValue(exists=False, weighted_earned=50, weighted_possible=100),
CSMValue(exists=False, raw_earned=10, raw_possible=40),
PersistedBlockValue(exists=True, raw_possible=5, weight=40, graded=True),
ContentBlockValue(raw_possible=1, weight=20, explicit_graded=False),
ExpectedResult(
raw_earned=0, raw_possible=5, weighted_earned=0, weighted_possible=40, weight=40, graded=True
),
),
# none of submissions, CSM, or persisted exist; Latest content values used
(
SubmissionValue(exists=False, weighted_earned=50, weighted_possible=100),
CSMValue(exists=False, raw_earned=10, raw_possible=40),
PersistedBlockValue(exists=False, raw_possible=5, weight=40, graded=True),
ContentBlockValue(raw_possible=1, weight=20, explicit_graded=False),
ExpectedResult(
raw_earned=0, raw_possible=1, weighted_earned=0, weighted_possible=20, weight=20, graded=False
),
),
)
@ddt.unpack
def test_get_score(self, submission_value, csm_value, persisted_block_value, block_value, expected_result):
score = scores.get_score(
self._create_submissions_scores(submission_value),
self._create_csm_scores(csm_value),
self._create_persisted_block(persisted_block_value),
self._create_block(block_value),
)
expected_score = ProblemScore(
display_name=self.display_name, module_id=self.location, **expected_result._asdict()
)
self.assertEquals(score, expected_score)
@ddt.ddt
class TestWeightedScore(TestCase):
"""
Tests the helper method: weighted_score
"""
@ddt.data(
(0, 0, 1),
(5, 0, 0),
(10, 0, None),
(0, 5, None),
(5, 10, None),
(10, 10, None),
)
@ddt.unpack
def test_cannot_compute(self, raw_earned, raw_possible, weight):
self.assertEquals(
scores.weighted_score(raw_earned, raw_possible, weight),
(raw_earned, raw_possible),
)
@ddt.data(
(0, 5, 0, (0, 0)),
(5, 5, 0, (0, 0)),
(2, 5, 1, (.4, 1)),
(5, 5, 1, (1, 1)),
(5, 5, 3, (3, 3)),
(2, 4, 6, (3, 6)),
)
@ddt.unpack
def test_computed(self, raw_earned, raw_possible, weight, expected_score):
self.assertEquals(
scores.weighted_score(raw_earned, raw_possible, weight),
expected_score,
)
def test_assert_on_invalid_r_possible(self):
with self.assertRaises(AssertionError):
scores.weighted_score(raw_earned=1, raw_possible=None, weight=1)
@ddt.ddt
class TestInternalGetGraded(TestCase):
"""
Tests the internal helper method: _get_explicit_graded
"""
def _create_block(self, explicit_graded_value):
"""
Creates and returns a minimal BlockData object with the give value
for explicit_graded.
"""
block = BlockData('any_key')
setattr(
block.transformer_data.get_or_create(GradesTransformer),
GradesTransformer.EXPLICIT_GRADED_FIELD_NAME,
explicit_graded_value,
)
return block
@ddt.data(None, True, False)
def test_with_no_persisted_block(self, explicitly_graded_value):
block = self._create_block(explicitly_graded_value)
self.assertEquals(
scores._get_graded_from_block(None, block),
explicitly_graded_value is not False, # defaults to True unless explicitly False
)
@ddt.data(
*itertools.product((True, False), (True, False, None))
)
@ddt.unpack
def test_with_persisted_block(self, persisted_block_value, block_value):
block = self._create_block(block_value)
block_record = BlockRecord(block.location, 0, 0, persisted_block_value)
self.assertEquals(
scores._get_graded_from_block(block_record, block),
block_record.graded, # persisted value takes precedence
)
@ddt.ddt
class TestInternalGetScoreFromBlock(TestCase):
"""
Tests the internal helper method: _get_score_from_persisted_or_latest_block
"""
def _create_block(self, raw_possible):
"""
Creates and returns a minimal BlockData object with the give value
for raw_possible.
"""
block = BlockData('any_key')
block.transformer_data.get_or_create(GradesTransformer).max_score = raw_possible
return block
def _verify_score_result(self, persisted_block, block, weight, expected_r_possible):
"""
Verifies the result of _get_score_from_persisted_or_latest_block is as expected.
"""
# pylint: disable=unbalanced-tuple-unpacking
raw_earned, raw_possible, weighted_earned, weighted_possible = scores._get_score_from_persisted_or_latest_block(
persisted_block, block, weight,
)
self.assertEquals(raw_earned, 0.0)
self.assertEquals(raw_possible, expected_r_possible)
self.assertEquals(weighted_earned, 0.0)
if weight is None or expected_r_possible == 0:
self.assertEquals(weighted_possible, expected_r_possible)
else:
self.assertEquals(weighted_possible, weight)
@ddt.data(
*itertools.product((0, 1, 5), (None, 0, 1, 5))
)
@ddt.unpack
def test_with_no_persisted_block(self, block_r_possible, weight):
block = self._create_block(block_r_possible)
self._verify_score_result(None, block, weight, block_r_possible)
@ddt.data(
*itertools.product((0, 1, 5), (None, 0, 1, 5), (None, 0, 1, 5))
)
@ddt.unpack
def test_with_persisted_block(self, persisted_block_r_possible, block_r_possible, weight):
block = self._create_block(block_r_possible)
block_record = BlockRecord(block.location, 0, persisted_block_r_possible, False)
self._verify_score_result(block_record, block, weight, persisted_block_r_possible)
......@@ -5,6 +5,7 @@ from contextlib import contextmanager
from mock import patch
from courseware.module_render import get_module
from courseware.model_data import FieldDataCache
from xmodule.graders import ProblemScore
@contextmanager
......@@ -23,7 +24,7 @@ def mock_get_score(earned=0, possible=1):
Mocks the get_score function to return a valid grade.
"""
with patch('lms.djangoapps.grades.new.subsection_grade.get_score') as mock_score:
mock_score.return_value = (earned, possible)
mock_score.return_value = ProblemScore(earned, possible, earned, possible, 1, True, None, None)
yield mock_score
......
......@@ -4,6 +4,7 @@ Enrollment operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
"""
import crum
import json
import logging
from django.contrib.auth.models import User
......@@ -13,11 +14,14 @@ from django.core.mail import send_mail
from django.utils.translation import override as override_language
from course_modes.models import CourseMode
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.models import StudentModule
from edxmako.shortcuts import render_to_string
from grades.scores import weighted_score
from grades.signals.signals import SCORE_CHANGED
from lang_pref import LANGUAGE_KEY
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from submissions import api as sub_api # installed from the edx-submissions repository
from student.models import anonymous_id_for_user
from openedx.core.djangoapps.user_api.models import UserPreference
......@@ -245,6 +249,7 @@ def reset_student_attempts(course_id, student, module_state_key, requesting_user
)
submission_cleared = True
except ItemNotFoundError:
block = None
log.warning("Could not find %s in modulestore when attempting to reset attempts.", module_state_key)
# Reset the student's score in the submissions API, if xblock.clear_student_state has not done so already.
......@@ -267,6 +272,7 @@ def reset_student_attempts(course_id, student, module_state_key, requesting_user
if delete_module:
module_to_reset.delete()
_fire_score_changed_for_block(course_id, student, block, module_state_key)
else:
_reset_module_attempts(module_to_reset)
......@@ -287,6 +293,43 @@ def _reset_module_attempts(studentmodule):
studentmodule.save()
def _fire_score_changed_for_block(course_id, student, block, module_state_key):
"""
Fires a SCORE_CHANGED event for the given module. The earned points are
always zero. We must retrieve the possible points from the XModule, as
noted below.
"""
if block and block.has_score:
cache = FieldDataCache.cache_for_descriptor_descendents(
course_id=course_id,
user=student,
descriptor=block,
depth=0
)
# For implementation reasons, we need to pull the max_score from the XModule,
# even though the data is not user-specific. Here we bind the data to the
# current user.
request = crum.get_current_request()
module = get_module_for_descriptor(
user=student,
request=request,
descriptor=block,
field_data_cache=cache,
course_key=course_id
)
points_earned, points_possible = weighted_score(0, module.max_score(), getattr(module, 'weight', None))
else:
points_earned, points_possible = 0, 0
SCORE_CHANGED.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
user=student,
course_id=course_id,
usage_id=module_state_key
)
def get_email_params(course, auto_enroll, secure=True, course_key=None, display_name=None):
"""
Generate parameters used when parsing email templates.
......
......@@ -8,6 +8,7 @@ import mock
from mock import patch
from abc import ABCMeta
from courseware.models import StudentModule
from courseware.tests.helpers import get_request_for_user
from django.conf import settings
from django.utils.translation import get_language
from django.utils.translation import override as override_language
......@@ -16,7 +17,11 @@ from ccx_keys.locator import CCXLocator
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from grades.new.subsection_grade import SubsectionGradeFactory
from grades.tests.utils import answer_problem
from lms.djangoapps.ccx.tests.factories import CcxFactory
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from student.roles import CourseCcxCoachRole
......@@ -494,6 +499,80 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
self.assertEqual(unrelated_state['brains'], 'zombie')
class TestStudentModuleGrading(SharedModuleStoreTestCase):
"""
Tests the effects of student module manipulations
on student grades.
"""
@classmethod
def setUpClass(cls):
super(TestStudentModuleGrading, cls).setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(
parent=cls.course,
category="chapter",
display_name="Test Chapter"
)
cls.sequence = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Test Sequential 1",
graded=True
)
cls.vertical = ItemFactory.create(
parent=cls.sequence,
category='vertical',
display_name='Test Vertical 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
cls.problem = ItemFactory.create(
parent=cls.vertical,
category="problem",
display_name="Test Problem",
data=problem_xml
)
cls.request = get_request_for_user(UserFactory())
cls.user = cls.request.user
def _get_subsection_grade_and_verify(self, all_earned, all_possible, graded_earned, graded_possible):
"""
Retrieves the subsection grade and verifies that
its scores match those expected.
"""
subsection_grade_factory = SubsectionGradeFactory(
self.user,
self.course,
get_course_blocks(self.user, self.course.location)
)
grade = subsection_grade_factory.update(self.sequence)
self.assertEqual(grade.all_total.earned, all_earned)
self.assertEqual(grade.graded_total.earned, graded_earned)
self.assertEqual(grade.all_total.possible, all_possible)
self.assertEqual(grade.graded_total.possible, graded_possible)
@patch('crum.get_current_request')
def test_delete_student_state(self, _crum_mock):
problem_location = self.problem.location
self._get_subsection_grade_and_verify(0, 1, 0, 1)
answer_problem(course=self.course, request=self.request, problem=self.problem, score=1, max_value=1)
self._get_subsection_grade_and_verify(1, 1, 1, 1)
# Delete student state using the instructor dash
reset_student_attempts(
self.course.id,
self.user,
problem_location,
requesting_user=self.user,
delete_module=True,
)
# Verify that the student's grades are reset
self._get_subsection_grade_and_verify(0, 1, 0, 1)
class EnrollmentObjects(object):
"""
Container for enrollment objects.
......
......@@ -948,7 +948,7 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score, _ in gradeset['raw_scores'] if score.graded}
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
......
"""
Utilities for testing xblocks
"""
from django.conf import settings
from xmodule.modulestore.tests.factories import ItemFactory
TEST_DATA_DIR = settings.COMMON_ROOT / u'test/data'
def add_xml_block_from_file(block_type, filename, parent, metadata):
"""
Create a block of the specified type with content included from the
specified XML file.
XML filenames are relative to common/test/data/blocks.
"""
with open(TEST_DATA_DIR / u'blocks' / filename) as datafile:
return ItemFactory.create(
parent=parent,
category=block_type,
data=datafile.read().decode('utf-8'),
metadata=metadata,
display_name=u'problem'
)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment