Commit aae9f9cc by Eric Fischer Committed by GitHub

Merge pull request #13676 from edx/release-10-06-conflict

Merge release to master
parents 6073ea2e 50d55bba
......@@ -4,7 +4,6 @@ Management commands for third_party_auth
"""
from django.core.management.base import BaseCommand, CommandError
import logging
from third_party_auth.models import SAMLConfiguration
from third_party_auth.tasks import fetch_saml_metadata
......@@ -16,9 +15,6 @@ class Command(BaseCommand):
parser.add_argument('--pull', action='store_true', help="Pull updated metadata from external IDPs")
def handle(self, *args, **options):
if not SAMLConfiguration.is_enabled():
raise CommandError("SAML support is disabled via SAMLConfiguration.")
if options['pull']:
log_handler = logging.StreamHandler(self.stdout)
log_handler.setLevel(logging.DEBUG)
......
......@@ -10,13 +10,67 @@ import logging
import random
import sys
from collections import namedtuple
log = logging.getLogger("edx.courseware")
# This is a tuple for holding scores, either from problems or sections.
# Section either indicates the name of the problem or the name of the section
Score = namedtuple("Score", "earned possible graded section module_id")
class ScoreBase(object):
"""
Abstract base class for encapsulating fields of values scores.
Field common to all scores include:
display_name (string) - the display name of the module
module_id (UsageKey) - the location of the module
graded (boolean) - whether or not this module is graded
"""
__metaclass__ = abc.ABCMeta
def __init__(self, graded, display_name, module_id):
self.graded = graded
self.display_name = display_name
self.module_id = module_id
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return u"{class_name}({fields})".format(class_name=self.__class__.__name__, fields=self.__dict__)
class ProblemScore(ScoreBase):
"""
Encapsulates the fields of a Problem's score.
In addition to the fields in ScoreBase, also includes:
raw_earned (float) - raw points earned on this problem
raw_possible (float) - raw points possible to earn on this problem
weighted_earned = earned (float) - weighted value of the points earned
weighted_possible = possible (float) - weighted possible points on this problem
weight (float) - weight of this problem
"""
def __init__(self, raw_earned, raw_possible, weighted_earned, weighted_possible, weight, *args, **kwargs):
super(ProblemScore, self).__init__(*args, **kwargs)
self.raw_earned = raw_earned
self.raw_possible = raw_possible
self.earned = weighted_earned
self.possible = weighted_possible
self.weight = weight
class AggregatedScore(ScoreBase):
"""
Encapsulates the fields of a Subsection's score.
In addition to the fields in ScoreBase, also includes:
tw_earned = earned - total aggregated sum of all weighted earned values
tw_possible = possible - total aggregated sum of all weighted possible values
"""
def __init__(self, tw_earned, tw_possible, *args, **kwargs):
super(AggregatedScore, self).__init__(*args, **kwargs)
self.earned = tw_earned
self.possible = tw_possible
def float_sum(iterable):
......@@ -26,13 +80,14 @@ def float_sum(iterable):
return float(sum(iterable))
def aggregate_scores(scores, section_name="summary", location=None):
def aggregate_scores(scores, display_name="summary", location=None):
"""
scores: A list of Score objects
scores: A list of ScoreBase objects
display_name: The display name for the score object
location: The location under which all objects in scores are located
returns: A tuple (all_total, graded_total).
all_total: A Score representing the total score summed over all input scores
graded_total: A Score representing the score summed over all graded input scores
all_total: A ScoreBase representing the total score summed over all input scores
graded_total: A ScoreBase representing the score summed over all graded input scores
"""
total_correct_graded = float_sum(score.earned for score in scores if score.graded)
total_possible_graded = float_sum(score.possible for score in scores if score.graded)
......@@ -41,10 +96,10 @@ def aggregate_scores(scores, section_name="summary", location=None):
total_possible = float_sum(score.possible for score in scores)
#regardless of whether it is graded
all_total = Score(total_correct, total_possible, False, section_name, location)
all_total = AggregatedScore(total_correct, total_possible, False, display_name, location)
#selecting only graded things
graded_total = Score(total_correct_graded, total_possible_graded, True, section_name, location)
graded_total = AggregatedScore(total_correct_graded, total_possible_graded, True, display_name, location)
return all_total, graded_total
......@@ -220,7 +275,7 @@ class SingleSectionGrader(CourseGrader):
found_score = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
if score.display_name == self.name:
found_score = score
break
......@@ -342,7 +397,7 @@ class AssignmentFormatGrader(CourseGrader):
else:
earned = scores[i].earned
possible = scores[i].possible
section_name = scores[i].section
section_name = scores[i].display_name
percentage = earned / possible
summary_format = u"{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})"
......
......@@ -95,8 +95,8 @@ class StaffDebugPage(PageObject):
This delete's a student's state for the problem
"""
if user:
self.q(css='input[id^=sd_fu_]').fill(user)
self.q(css='.staff-modal .staff-debug-sdelete').click()
self.q(css='input[id^=sd_fu_]').first.fill(user)
self.q(css='.staff-modal .staff-debug-sdelete').first.click()
def rescore(self, user=None):
"""
......
......@@ -15,6 +15,7 @@ from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from ...pages.lms.problem import ProblemPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.staff_view import StaffPage, StaffDebugPage
from ...pages.studio.component_editor import ComponentEditorView
from ...pages.studio.utils import type_in_codemirror
from ...pages.studio.overview import CourseOutlinePage
......@@ -192,6 +193,22 @@ class PersistentGradesTest(ProgressPageBaseTest):
type_in_codemirror(self, 0, modified_content)
modal.q(css='.action-save').click()
def _delete_student_state_for_problem(self):
"""
As staff, clicks the "delete student state" button,
deleting the student user's state for the problem.
"""
with self._logged_in_session(staff=True):
self.courseware_page.visit()
staff_page = StaffPage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, "Staff")
staff_page.q(css='a.instructor-info-action').nth(1).click()
staff_debug_page = StaffDebugPage(self.browser)
staff_debug_page.wait_for_page()
staff_debug_page.delete_state(self.USERNAME)
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {0}'.format(self.USERNAME), msg)
@ddt.data(
_edit_problem_content,
_change_subsection_structure,
......@@ -223,6 +240,13 @@ class PersistentGradesTest(ProgressPageBaseTest):
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
def test_progress_page_updates_when_student_state_deleted(self):
self._check_progress_page_with_scored_problem()
self._delete_student_state_for_problem()
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(0, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (0, 2))
class SubsectionGradingPolicyTest(ProgressPageBaseTest):
"""
......
<problem url_name="capa-optionresponse">
<optionresponse>
<optioninput options="('Correct', 'Incorrect')" correct="Correct"></optioninput>
<optioninput options="('Correct', 'Incorrect')" correct="Correct"></optioninput>
</optionresponse>
</problem>
<problem display_name="Exercise: apply to each 3" markdown="null" weight="5.0">
<text>
<p>
<b>ESTIMATED TIME TO COMPLETE: 4 minutes</b>
</p>
<pre>
&gt;&gt;&gt; print testList
[1, 16, 64, 81]
</pre>
</text>
<coderesponse queuename="Watcher-MITx-6.00x">
<textbox rows="10" cols="80" mode="python" tabsize="4"/>
<codeparam>
<initial_display>
# Your Code Here
</initial_display>
<answer_display>
def square(a):
return a * a
applyToEach(testList, square)
</answer_display>
<grader_payload>{"grader": "finger_exercises/L6/applyToEach3/grade_ate3.py"}</grader_payload>
</codeparam>
</coderesponse>
</problem>
<library_content display_name="Final Exam" has_score="true" max_count="25" source_library_id="library-v1:MSX+msx_cld213xfinalexam" source_library_version="577b5aca45064f068278faa0">
<problem/>
<problem/>
</library_content>
<lti launch_url="http://www.imsglobal.org/developers/LTI/test/v1p1/tool.php" lti_id="ims"/>
<openassessment url_name="0e2bbf6cc89e45d98b028fa4e2d46314" allow_file_upload="False">
<title></title>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1"/>
<assessment name="self-assessment"/>
</assessments>
<rubric>
<prompt>
Censorship in the Libraries
'All of us can think of a book that we hope none of our children or any
other children have taken off the shelf. But if I have the right to remove
that book from the shelf -- that work I abhor -- then you also have exactly
the same right and so does everyone else. And then we have no books left on
the shelf for any of us.' --Katherine Paterson, Author
Write a persuasive essay to a newspaper reflecting your views on censorship
in libraries. Do you believe that certain materials, such as books, music,
movies, magazines, etc., should be removed from the shelves if they are
found offensive? Support your position with convincing arguments from your
own experience, observations, and/or reading.
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>Ideas</name>
<prompt>Determine if there is a unifying theme or main idea.</prompt>
<option points="0">
<name>Poor</name>
<explanation>
Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus.
</explanation>
</option>
<option points="3">
<name>Fair</name>
<explanation>
Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task.
</explanation>
</option>
<option points="5">
<name>Good</name>
<explanation>
Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task.
</explanation>
</option>
</criterion>
<criterion>
<name>Content</name>
<prompt>Assess the content of the submission</prompt>
<option points="0">
<name>Poor</name>
<explanation>
Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic.
</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>
Includes little information and few or no details.
Explores only one or two facets of the topic.
</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>
Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.
</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>
Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic.
</explanation>
</option>
</criterion>
</rubric>
</openassessment>
"""
Tests for ProctoredExamTransformer.
"""
from mock import patch
from mock import patch, Mock
from nose.plugins.attrib import attr
import ddt
......@@ -53,8 +53,7 @@ class MilestonesTransformerTestCase(CourseStructureTestCase, MilestonesTestCaseM
'course', 'A', 'B', 'C', 'ProctoredExam', 'D', 'E', 'PracticeExam', 'F', 'G', 'H', 'I', 'TimedExam', 'J', 'K'
)
# The special exams (proctored, practice, timed) are not visible to
# students via the Courses API.
# The special exams (proctored, practice, timed) should never be visible to students
ALL_BLOCKS_EXCEPT_SPECIAL = ('course', 'A', 'B', 'C', 'H', 'I')
def get_course_hierarchy(self):
......@@ -135,27 +134,27 @@ class MilestonesTransformerTestCase(CourseStructureTestCase, MilestonesTestCaseM
(
'H',
'A',
('course', 'A', 'B', 'C'),
'B',
('course', 'A', 'B', 'C',)
),
(
'H',
'ProctoredExam',
'D',
('course', 'A', 'B', 'C'),
),
)
@ddt.unpack
def test_gated(self, gated_block_ref, gating_block_ref, expected_blocks_before_completion):
def test_gated(self, gated_block_ref, gating_block_ref, gating_block_child, expected_blocks_before_completion):
"""
First, checks that a student cannot see the gated block when it is gated
by the gating block and no attempt has been made to complete the gating
block. Then, checks that the student can see the gated block after the
gating block has been completed.
First, checks that a student cannot see the gated block when it is gated by the gating block and no
attempt has been made to complete the gating block.
Then, checks that the student can see the gated block after the gating block has been completed.
expected_blocks_before_completion is the set of blocks we expect to be
visible to the student before the student has completed the gating block.
expected_blocks_before_completion is the set of blocks we expect to be visible to the student
before the student has completed the gating block.
The test data includes one special exam and one non-special block as the
gating blocks.
The test data includes one special exam and one non-special block as the gating blocks.
"""
self.course.enable_subsection_gating = True
self.setup_gated_section(self.blocks[gated_block_ref], self.blocks[gating_block_ref])
......@@ -166,16 +165,16 @@ class MilestonesTransformerTestCase(CourseStructureTestCase, MilestonesTestCaseM
# clear the request cache to simulate a new request
self.clear_caches()
# this call triggers reevaluation of prerequisites fulfilled by the
# gating block.
lms_gating_api.evaluate_prerequisite(
self.course,
self.user,
self.blocks[gating_block_ref].location,
100.0,
)
# mock the api that the lms gating api calls to get the score for each block to always return 1 (ie 100%)
with patch('gating.api.get_module_score', Mock(return_value=1)):
with self.assertNumQueries(3):
# this call triggers reevaluation of prerequisites fulfilled by the parent of the
# block passed in, so we pass in a child of the gating block
lms_gating_api.evaluate_prerequisite(
self.course,
UsageKey.from_string(unicode(self.blocks[gating_block_child].location)),
self.user.id)
with self.assertNumQueries(2):
self.get_blocks_and_check_against_expected(self.user, self.ALL_BLOCKS_EXCEPT_SPECIAL)
def test_staff_access(self):
......
......@@ -6,14 +6,35 @@ import json
from collections import defaultdict
from django.contrib.auth.models import User
from xmodule.modulestore.django import modulestore
from openedx.core.lib.gating import api as gating_api
from lms.djangoapps.grades.module_grades import get_module_score
from util import milestones_helpers
log = logging.getLogger(__name__)
def _get_xblock_parent(xblock, category=None):
"""
Returns the parent of the given XBlock. If an optional category is supplied,
traverses the ancestors of the XBlock and returns the first with the
given category.
Arguments:
xblock (XBlock): Get the parent of this XBlock
category (str): Find an ancestor with this category (e.g. sequential)
"""
parent = xblock.get_parent()
if parent and category:
if parent.category == category:
return parent
else:
return _get_xblock_parent(parent, category)
return parent
@gating_api.gating_enabled(default=False)
def evaluate_prerequisite(course, user, subsection_usage_key, new_score):
def evaluate_prerequisite(course, prereq_content_key, user_id):
"""
Finds the parent subsection of the content in the course and evaluates
any milestone relationships attached to that subsection. If the calculated
......@@ -21,40 +42,44 @@ def evaluate_prerequisite(course, user, subsection_usage_key, new_score):
dependent subsections, the related milestone will be fulfilled for the user.
Arguments:
user (User): User for which evaluation should occur
user_id (int): ID of User for which evaluation should occur
course (CourseModule): The course
subsection_usage_key (UsageKey): Usage key of the updated subsection
new_score (float): New score of the given subsection, in percentage.
prereq_content_key (UsageKey): The prerequisite content usage key
Returns:
None
"""
prereq_milestone = gating_api.get_gating_milestone(
course.id,
subsection_usage_key,
'fulfills'
)
if prereq_milestone:
gated_content_milestones = defaultdict(list)
for milestone in gating_api.find_gating_milestones(course.id, None, 'requires'):
gated_content_milestones[milestone['id']].append(milestone)
gated_content = gated_content_milestones.get(prereq_milestone['id'])
if gated_content:
for milestone in gated_content:
# Default minimum score to 100
min_score = 100.0
requirements = milestone.get('requirements')
if requirements:
try:
min_score = float(requirements.get('min_score'))
except (ValueError, TypeError):
log.warning(
'Failed to find minimum score for gating milestone %s, defaulting to 100',
json.dumps(milestone)
)
if new_score >= min_score:
milestones_helpers.add_user_milestone({'id': user.id}, prereq_milestone)
else:
milestones_helpers.remove_user_milestone({'id': user.id}, prereq_milestone)
xblock = modulestore().get_item(prereq_content_key)
sequential = _get_xblock_parent(xblock, 'sequential')
if sequential:
prereq_milestone = gating_api.get_gating_milestone(
course.id,
sequential.location.for_branch(None),
'fulfills'
)
if prereq_milestone:
gated_content_milestones = defaultdict(list)
for milestone in gating_api.find_gating_milestones(course.id, None, 'requires'):
gated_content_milestones[milestone['id']].append(milestone)
gated_content = gated_content_milestones.get(prereq_milestone['id'])
if gated_content:
user = User.objects.get(id=user_id)
score = get_module_score(user, course, sequential) * 100
for milestone in gated_content:
# Default minimum score to 100
min_score = 100
requirements = milestone.get('requirements')
if requirements:
try:
min_score = int(requirements.get('min_score'))
except (ValueError, TypeError):
log.warning(
'Failed to find minimum score for gating milestone %s, defaulting to 100',
json.dumps(milestone)
)
if score >= min_score:
milestones_helpers.add_user_milestone({'id': user_id}, prereq_milestone)
else:
milestones_helpers.remove_user_milestone({'id': user_id}, prereq_milestone)
......@@ -2,12 +2,14 @@
Signal handlers for the gating djangoapp
"""
from django.dispatch import receiver
from lms.djangoapps.grades.signals.signals import SUBSECTION_SCORE_UPDATED
from opaque_keys.edx.keys import CourseKey, UsageKey
from xmodule.modulestore.django import modulestore
from lms.djangoapps.grades.signals.signals import SCORE_CHANGED
from gating import api as gating_api
@receiver(SUBSECTION_SCORE_UPDATED)
def handle_subsection_score_updated(**kwargs):
@receiver(SCORE_CHANGED)
def handle_score_changed(**kwargs):
"""
Receives the SCORE_CHANGED signal sent by LMS when a student's score has changed
for a given component and triggers the evaluation of any milestone relationships
......@@ -19,13 +21,10 @@ def handle_subsection_score_updated(**kwargs):
Returns:
None
"""
course = kwargs['course']
course = modulestore().get_course(CourseKey.from_string(kwargs.get('course_id')))
if course.enable_subsection_gating:
subsection_grade = kwargs['subsection_grade']
new_score = subsection_grade.graded_total.earned / subsection_grade.graded_total.possible * 100.0
gating_api.evaluate_prerequisite(
course,
kwargs['user'],
subsection_grade.location,
new_score,
UsageKey.from_string(kwargs.get('usage_id')),
kwargs.get('user').id,
)
"""
Unit tests for gating.signals module
"""
from mock import patch, MagicMock
from mock import patch
from opaque_keys.edx.keys import UsageKey
from student.tests.factories import UserFactory
......@@ -9,7 +9,7 @@ from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
from gating.signals import handle_subsection_score_updated
from gating.signals import handle_score_changed
class TestHandleScoreChanged(ModuleStoreTestCase):
......@@ -24,22 +24,28 @@ class TestHandleScoreChanged(ModuleStoreTestCase):
@patch('gating.signals.gating_api.evaluate_prerequisite')
def test_gating_enabled(self, mock_evaluate):
""" Test evaluate_prerequisite is called when course.enable_subsection_gating is True """
self.course.enable_subsection_gating = True
modulestore().update_item(self.course, 0)
handle_subsection_score_updated(
handle_score_changed(
sender=None,
course=self.course,
points_possible=1,
points_earned=1,
user=self.user,
subsection_grade=MagicMock(),
course_id=unicode(self.course.id),
usage_id=unicode(self.test_usage_key)
)
mock_evaluate.assert_called()
mock_evaluate.assert_called_with(self.course, self.test_usage_key, self.user.id) # pylint: disable=no-member
@patch('gating.signals.gating_api.evaluate_prerequisite')
def test_gating_disabled(self, mock_evaluate):
handle_subsection_score_updated(
""" Test evaluate_prerequisite is not called when course.enable_subsection_gating is False """
handle_score_changed(
sender=None,
course=self.course,
points_possible=1,
points_earned=1,
user=self.user,
subsection_grade=MagicMock(),
course_id=unicode(self.course.id),
usage_id=unicode(self.test_usage_key)
)
mock_evaluate.assert_not_called()
......@@ -30,7 +30,7 @@ BLOCK_RECORD_LIST_VERSION = 1
# Used to serialize information about a block at the time it was used in
# grade calculation.
BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'max_score'])
BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'raw_possible', 'graded'])
class BlockRecordList(tuple):
......@@ -101,7 +101,8 @@ class BlockRecordList(tuple):
BlockRecord(
locator=UsageKey.from_string(block["locator"]).replace(course_key=course_key),
weight=block["weight"],
max_score=block["max_score"],
raw_possible=block["raw_possible"],
graded=block["graded"],
)
for block in block_dicts
)
......
"""
Functionality for module-level grades.
"""
# TODO The score computation in this file is not accurate
# since it is summing percentages instead of computing a
# final percentage of the individual sums.
# Regardless, this file and its code should be removed soon
# as part of TNL-5062.
from django.test.client import RequestFactory
from courseware.model_data import FieldDataCache, ScoresClient
from courseware.module_render import get_module_for_descriptor
from opaque_keys.edx.locator import BlockUsageLocator
from util.module_utils import yield_dynamic_descriptor_descendants
def _get_mock_request(student):
"""
Make a fake request because grading code expects to be able to look at
the request. We have to attach the correct user to the request before
grading that student.
"""
request = RequestFactory().get('/')
request.user = student
return request
def _calculate_score_for_modules(user_id, course, modules):
"""
Calculates the cumulative score (percent) of the given modules
"""
# removing branch and version from exam modules locator
# otherwise student module would not return scores since module usage keys would not match
modules = [m for m in modules]
locations = [
BlockUsageLocator(
course_key=course.id,
block_type=module.location.block_type,
block_id=module.location.block_id
)
if isinstance(module.location, BlockUsageLocator) and module.location.version
else module.location
for module in modules
]
scores_client = ScoresClient(course.id, user_id)
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score percentage of user for each of them
module_percentages = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical', 'randomize', 'library_content']
for index, module in enumerate(modules):
if module.category not in ignore_categories and (module.graded or module.has_score):
module_score = scores_client.get(locations[index])
if module_score:
correct = module_score.correct or 0
total = module_score.total or 1
module_percentages.append(correct / total)
return sum(module_percentages) / float(len(module_percentages)) if module_percentages else 0
def get_module_score(user, course, module):
"""
Collects all children of the given module and calculates the cumulative
score for this set of modules for the given user.
Arguments:
user (User): The user
course (CourseModule): The course
module (XBlock): The module
Returns:
float: The cumulative score
"""
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor
"""
field_data_cache = FieldDataCache([descriptor], course.id, user)
return get_module_for_descriptor(
user,
_get_mock_request(user),
descriptor,
field_data_cache,
course.id,
course=course
)
modules = yield_dynamic_descriptor_descendants(
module,
user.id,
inner_get_module
)
return _calculate_score_for_modules(user.id, course, modules)
......@@ -43,15 +43,15 @@ class CourseGrade(object):
return subsections_by_format
@lazy
def locations_to_weighted_scores(self):
def locations_to_scores(self):
"""
Returns a dict of problem scores keyed by their locations.
"""
locations_to_weighted_scores = {}
locations_to_scores = {}
for chapter in self.chapter_grades:
for subsection_grade in chapter['sections']:
locations_to_weighted_scores.update(subsection_grade.locations_to_weighted_scores)
return locations_to_weighted_scores
locations_to_scores.update(subsection_grade.locations_to_scores)
return locations_to_scores
@lazy
def grade_value(self):
......@@ -113,7 +113,7 @@ class CourseGrade(object):
grade_summary['percent'] = self.percent
grade_summary['grade'] = self.letter_grade
grade_summary['totaled_scores'] = self.subsection_grade_totals_by_format
grade_summary['raw_scores'] = list(self.locations_to_weighted_scores.itervalues())
grade_summary['raw_scores'] = list(self.locations_to_scores.itervalues())
return grade_summary
......@@ -141,7 +141,7 @@ class CourseGrade(object):
subsections_total = sum(len(x) for x in self.subsection_grade_totals_by_format.itervalues())
subsections_read = len(subsection_grade_factory._unsaved_subsection_grades) # pylint: disable=protected-access
subsections_created = subsections_total - subsections_read
blocks_total = len(self.locations_to_weighted_scores)
blocks_total = len(self.locations_to_scores)
if not read_only:
subsection_grade_factory.bulk_create_unsaved()
......@@ -166,8 +166,8 @@ class CourseGrade(object):
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.locations_to_weighted_scores:
score, _ = self.locations_to_weighted_scores[location]
if location in self.locations_to_scores:
score = self.locations_to_scores[location]
return score.earned, score.possible
children = self.course_structure.get_children(location)
earned = 0.0
......
......@@ -13,7 +13,7 @@ from openedx.core.djangoapps.content.block_structure.api import get_course_in_ca
from student.models import user_by_anonymous_id
from submissions.models import score_set, score_reset
from .signals import SCORE_CHANGED, SUBSECTION_SCORE_UPDATED
from .signals import SCORE_CHANGED
from ..config.models import PersistentGradesEnabledFlag
from ..transformer import GradesTransformer
from ..new.subsection_grade import SubsectionGradeFactory
......@@ -95,6 +95,8 @@ def recalculate_subsection_grade_handler(sender, **kwargs): # pylint: disable=u
"""
student = kwargs['user']
course_key = CourseLocator.from_string(kwargs['course_id'])
if not PersistentGradesEnabledFlag.feature_enabled(course_key):
return
scored_block_usage_key = UsageKey.from_string(kwargs['usage_id']).replace(course_key=course_key)
collected_block_structure = get_course_in_cache(course_key)
......@@ -113,12 +115,6 @@ def recalculate_subsection_grade_handler(sender, **kwargs): # pylint: disable=u
subsection_usage_key,
collected_block_structure=collected_block_structure,
)
subsection_grade = subsection_grade_factory.update(
subsection_grade_factory.update(
transformed_subsection_structure[subsection_usage_key], transformed_subsection_structure
)
SUBSECTION_SCORE_UPDATED.send(
sender=None,
course=course,
user=student,
subsection_grade=subsection_grade,
)
......@@ -14,20 +14,8 @@ SCORE_CHANGED = Signal(
providing_args=[
'points_possible', # Maximum score available for the exercise
'points_earned', # Score obtained by the user
'user', # User object
'user_id', # Integer User ID
'course_id', # Unicode string representing the course
'usage_id' # Unicode string indicating the courseware instance
]
)
# Signal that indicates that a user's score for a subsection has been updated.
# This is a downstream signal of SCORE_CHANGED sent for each affected containing
# subsection.
SUBSECTION_SCORE_UPDATED = Signal(
providing_args=[
'course', # Course object
'user', # User object
'subsection_grade', # SubsectionGrade object
]
)
......@@ -73,8 +73,8 @@ class GradesModelTestCase(TestCase):
block_type='problem',
block_id='block_id_b'
)
self.record_a = BlockRecord(locator=self.locator_a, weight=1, max_score=10)
self.record_b = BlockRecord(locator=self.locator_b, weight=1, max_score=10)
self.record_a = BlockRecord(locator=self.locator_a, weight=1, raw_possible=10, graded=False)
self.record_b = BlockRecord(locator=self.locator_b, weight=1, raw_possible=10, graded=True)
@ddt.ddt
......@@ -90,29 +90,31 @@ class BlockRecordTest(GradesModelTestCase):
Tests creation of a BlockRecord.
"""
weight = 1
max_score = 10
raw_possible = 10
record = BlockRecord(
self.locator_a,
weight,
max_score,
raw_possible,
graded=False,
)
self.assertEqual(record.locator, self.locator_a)
@ddt.data(
(0, 0, "0123456789abcdef"),
(1, 10, 'totally_a_real_block_key'),
("BlockRecord is", "a dumb data store", "with no validation"),
(0, 0, "0123456789abcdef", True),
(1, 10, 'totally_a_real_block_key', False),
("BlockRecord is", "a dumb data store", "with no validation", None),
)
@ddt.unpack
def test_serialization(self, weight, max_score, block_key):
def test_serialization(self, weight, raw_possible, block_key, graded):
"""
Tests serialization of a BlockRecord using the _asdict() method.
"""
record = BlockRecord(block_key, weight, max_score)
record = BlockRecord(block_key, weight, raw_possible, graded)
expected = OrderedDict([
("locator", block_key),
("weight", weight),
("max_score", max_score),
("raw_possible", raw_possible),
("graded", graded),
])
self.assertEqual(expected, record._asdict())
......@@ -136,7 +138,12 @@ class VisibleBlocksTest(GradesModelTestCase):
for block_dict in list_of_block_dicts:
block_dict['locator'] = unicode(block_dict['locator']) # BlockUsageLocator is not json-serializable
expected_data = {
'blocks': [{'locator': unicode(self.record_a.locator), 'max_score': 10, 'weight': 1}],
'blocks': [{
'locator': unicode(self.record_a.locator),
'raw_possible': 10,
'weight': 1,
'graded': self.record_a.graded,
}],
'course_key': unicode(self.record_a.locator.course_key),
'version': BLOCK_RECORD_LIST_VERSION,
}
......
"""
Test saved subsection grade functionality.
"""
# pylint: disable=protected-access
import datetime
import ddt
from django.conf import settings
from django.db.utils import DatabaseError
from mock import patch
import pytz
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.tests.helpers import get_request_for_user
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.grades.config.tests.utils import persistent_grades_feature_flags
from openedx.core.lib.xblock_utils.test_utils import add_xml_block_from_file
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ..models import PersistentSubsectionGrade
from ..new.course_grade import CourseGradeFactory
from ..new.subsection_grade import SubsectionGrade, SubsectionGradeFactory
from lms.djangoapps.grades.tests.utils import mock_get_score
from .utils import mock_get_score
class GradeTestBase(SharedModuleStoreTestCase):
......@@ -116,7 +122,7 @@ class SubsectionGradeFactoryTest(GradeTestBase):
) as mock_create_grade:
with patch(
'lms.djangoapps.grades.new.subsection_grade.SubsectionGradeFactory._get_saved_grade',
wraps=self.subsection_grade_factory._get_saved_grade # pylint: disable=protected-access
wraps=self.subsection_grade_factory._get_saved_grade
) as mock_get_saved_grade:
with self.assertNumQueries(14):
grade_a = self.subsection_grade_factory.create(self.sequence)
......@@ -205,8 +211,8 @@ class SubsectionGradeTest(GradeTestBase):
input_grade.init_from_structure(
self.request.user,
self.course_structure,
self.subsection_grade_factory._scores_client, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores,
self.subsection_grade_factory._csm_scores,
)
self.assertEqual(PersistentSubsectionGrade.objects.count(), 0)
......@@ -224,9 +230,154 @@ class SubsectionGradeTest(GradeTestBase):
self.request.user,
saved_model,
self.course_structure,
self.subsection_grade_factory._scores_client, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores, # pylint: disable=protected-access
self.subsection_grade_factory._submissions_scores,
self.subsection_grade_factory._csm_scores,
)
self.assertEqual(input_grade.url_name, loaded_grade.url_name)
self.assertEqual(input_grade.all_total, loaded_grade.all_total)
@ddt.ddt
class TestMultipleProblemTypesSubsectionScores(ModuleStoreTestCase, ProblemSubmissionTestMixin):
"""
Test grading of different problem types.
"""
default_problem_metadata = {
u'graded': True,
u'weight': 2.5,
u'max_score': 7.0,
u'due': datetime.datetime(2099, 3, 15, 12, 30, 0, tzinfo=pytz.utc),
}
COURSE_NAME = u'Problem Type Test Course'
COURSE_NUM = u'probtype'
def setUp(self):
super(TestMultipleProblemTypesSubsectionScores, self).setUp()
password = u'test'
self.student = UserFactory.create(is_staff=False, username=u'test_student', password=password)
self.client.login(username=self.student.username, password=password)
self.request = get_request_for_user(self.student)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_NUM
)
self.chapter = ItemFactory.create(
parent=self.course,
category=u'chapter',
display_name=u'Test Chapter'
)
self.seq1 = ItemFactory.create(
parent=self.chapter,
category=u'sequential',
display_name=u'Test Sequential 1',
graded=True
)
self.vert1 = ItemFactory.create(
parent=self.seq1,
category=u'vertical',
display_name=u'Test Vertical 1'
)
def _get_fresh_subsection_score(self, course_structure, subsection):
"""
Return a Score object for the specified subsection.
Ensures that a stale cached value is not returned.
"""
subsection_factory = SubsectionGradeFactory(
self.student,
course_structure=course_structure,
course=self.course,
)
return subsection_factory.update(subsection)
def _get_altered_metadata(self, alterations):
"""
Returns a copy of the default_problem_metadata dict updated with the
specified alterations.
"""
metadata = self.default_problem_metadata.copy()
metadata.update(alterations)
return metadata
def _get_score_with_alterations(self, alterations):
"""
Given a dict of alterations to the default_problem_metadata, return
the score when one correct problem (out of two) is submitted.
"""
metadata = self._get_altered_metadata(alterations)
add_xml_block_from_file(u'problem', u'capa.xml', parent=self.vert1, metadata=metadata)
course_structure = get_course_blocks(self.student, self.course.location)
self.submit_question_answer(u'problem', {u'2_1': u'Correct'})
return self._get_fresh_subsection_score(course_structure, self.seq1)
def test_score_submission_for_capa_problems(self):
add_xml_block_from_file(u'problem', u'capa.xml', parent=self.vert1, metadata=self.default_problem_metadata)
course_structure = get_course_blocks(self.student, self.course.location)
score = self._get_fresh_subsection_score(course_structure, self.seq1)
self.assertEqual(score.all_total.earned, 0.0)
self.assertEqual(score.all_total.possible, 2.5)
self.submit_question_answer(u'problem', {u'2_1': u'Correct'})
score = self._get_fresh_subsection_score(course_structure, self.seq1)
self.assertEqual(score.all_total.earned, 1.25)
self.assertEqual(score.all_total.possible, 2.5)
@ddt.data(
(u'openassessment', u'openassessment.xml'),
(u'coderesponse', u'coderesponse.xml'),
(u'lti', u'lti.xml'),
(u'library_content', u'library_content.xml'),
)
@ddt.unpack
def test_loading_different_problem_types(self, block_type, filename):
"""
Test that transformation works for various block types
"""
metadata = self.default_problem_metadata.copy()
if block_type == u'library_content':
# Library content does not have a weight
del metadata[u'weight']
add_xml_block_from_file(block_type, filename, parent=self.vert1, metadata=metadata)
@ddt.data(
({}, 1.25, 2.5),
({u'weight': 27}, 13.5, 27),
({u'weight': 1.0}, 0.5, 1.0),
({u'weight': 0.0}, 0.0, 0.0),
({u'weight': None}, 1.0, 2.0),
)
@ddt.unpack
def test_weight_metadata_alterations(self, alterations, expected_earned, expected_possible):
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.all_total.earned, expected_earned)
self.assertEqual(score.all_total.possible, expected_possible)
@ddt.data(
({u'graded': True}, 1.25, 2.5),
({u'graded': False}, 0.0, 0.0),
)
@ddt.unpack
def test_graded_metadata_alterations(self, alterations, expected_earned, expected_possible):
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.graded_total.earned, expected_earned)
self.assertEqual(score.graded_total.possible, expected_possible)
@ddt.data(
{u'max_score': 99.3},
{u'max_score': 1.0},
{u'max_score': 0.0},
{u'max_score': None},
)
def test_max_score_does_not_change_results(self, alterations):
expected_earned = 1.25
expected_possible = 2.5
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.all_total.earned, expected_earned)
self.assertEqual(score.all_total.possible, expected_possible)
......@@ -244,7 +244,7 @@ class ScoreChangedUpdatesSubsectionGradeTest(ModuleStoreTestCase):
with self.store.default_store(default_store):
self.set_up_course(enable_subsection_grades=False)
self.assertFalse(PersistentGradesEnabledFlag.feature_enabled(self.course.id))
with check_mongo_calls(2) and self.assertNumQueries(3):
with check_mongo_calls(2) and self.assertNumQueries(0):
recalculate_subsection_grade_handler(None, **self.score_changed_kwargs)
@skip("Pending completion of TNL-5089")
......
......@@ -5,6 +5,7 @@ from contextlib import contextmanager
from mock import patch
from courseware.module_render import get_module
from courseware.model_data import FieldDataCache
from xmodule.graders import ProblemScore
@contextmanager
......@@ -23,20 +24,18 @@ def mock_get_score(earned=0, possible=1):
Mocks the get_score function to return a valid grade.
"""
with patch('lms.djangoapps.grades.new.subsection_grade.get_score') as mock_score:
mock_score.return_value = (earned, possible)
mock_score.return_value = ProblemScore(earned, possible, earned, possible, 1, True, None, None)
yield mock_score
def answer_problem(course, request, problem, score=1, max_value=1):
"""
Records an answer for the given problem.
Records a correct answer for the given problem.
Arguments:
course (Course): Course object, the course the required problem is in
request (Request): request Object
problem (xblock): xblock object, the problem to be answered
score (float): The new score for the problem
max_value (float): The new maximum score for the problem
"""
user = request.user
......@@ -47,10 +46,11 @@ def answer_problem(course, request, problem, score=1, max_value=1):
course,
depth=2
)
# pylint: disable=protected-access
module = get_module(
user,
request,
problem.location,
problem.scope_ids.usage_id,
field_data_cache,
)
module.runtime.publish(problem, 'grade', grade_dict)
)._xmodule
module.system.publish(problem, 'grade', grade_dict)
......@@ -3,6 +3,7 @@ Grades Transformer
"""
from django.test.client import RequestFactory
from functools import reduce as functools_reduce
from logging import getLogger
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
......@@ -11,6 +12,9 @@ from openedx.core.lib.block_structure.transformer import BlockStructureTransform
from openedx.core.djangoapps.util.user_utils import SystemUser
log = getLogger(__name__)
class GradesTransformer(BlockStructureTransformer):
"""
The GradesTransformer collects grading information and stores it on
......@@ -119,8 +123,10 @@ class GradesTransformer(BlockStructureTransformer):
Collect the `max_score` from the given module, storing it as a
`transformer_block_field` associated with the `GradesTransformer`.
"""
score = module.max_score()
block_structure.set_transformer_block_field(module.location, cls, 'max_score', score)
max_score = module.max_score()
block_structure.set_transformer_block_field(module.location, cls, 'max_score', max_score)
if max_score is None:
log.warning("GradesTransformer: max_score is None for {}".format(module.location))
@staticmethod
def _iter_scorable_xmodules(block_structure):
......
......@@ -4,6 +4,7 @@ Enrollment operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
"""
import crum
import json
import logging
from django.contrib.auth.models import User
......@@ -13,11 +14,14 @@ from django.core.mail import send_mail
from django.utils.translation import override as override_language
from course_modes.models import CourseMode
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.models import StudentModule
from edxmako.shortcuts import render_to_string
from lms.djangoapps.grades.scores import weighted_score
from lms.djangoapps.grades.signals.signals import SCORE_CHANGED
from lang_pref import LANGUAGE_KEY
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from submissions import api as sub_api # installed from the edx-submissions repository
from student.models import anonymous_id_for_user
from openedx.core.djangoapps.user_api.models import UserPreference
......@@ -245,6 +249,7 @@ def reset_student_attempts(course_id, student, module_state_key, requesting_user
)
submission_cleared = True
except ItemNotFoundError:
block = None
log.warning("Could not find %s in modulestore when attempting to reset attempts.", module_state_key)
# Reset the student's score in the submissions API, if xblock.clear_student_state has not done so already.
......@@ -267,6 +272,7 @@ def reset_student_attempts(course_id, student, module_state_key, requesting_user
if delete_module:
module_to_reset.delete()
_fire_score_changed_for_block(course_id, student, block, module_state_key)
else:
_reset_module_attempts(module_to_reset)
......@@ -287,6 +293,47 @@ def _reset_module_attempts(studentmodule):
studentmodule.save()
def _fire_score_changed_for_block(course_id, student, block, module_state_key):
"""
Fires a SCORE_CHANGED event for the given module. The earned points are
always zero. We must retrieve the possible points from the XModule, as
noted below.
"""
if block and block.has_score:
cache = FieldDataCache.cache_for_descriptor_descendents(
course_id=course_id,
user=student,
descriptor=block,
depth=0
)
# For implementation reasons, we need to pull the max_score from the XModule,
# even though the data is not user-specific. Here we bind the data to the
# current user.
request = crum.get_current_request()
module = get_module_for_descriptor(
user=student,
request=request,
descriptor=block,
field_data_cache=cache,
course_key=course_id
)
max_score = module.max_score()
if max_score is None:
return
else:
points_earned, points_possible = weighted_score(0, max_score, getattr(module, 'weight', None))
else:
points_earned, points_possible = 0, 0
SCORE_CHANGED.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
user=student,
course_id=unicode(course_id),
usage_id=unicode(module_state_key)
)
def get_email_params(course, auto_enroll, secure=True, course_key=None, display_name=None):
"""
Generate parameters used when parsing email templates.
......
......@@ -3190,7 +3190,8 @@ class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTes
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
@patch('courseware.module_render.SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
......
......@@ -8,6 +8,7 @@ import mock
from mock import patch
from abc import ABCMeta
from courseware.models import StudentModule
from courseware.tests.helpers import get_request_for_user
from django.conf import settings
from django.utils.translation import get_language
from django.utils.translation import override as override_language
......@@ -16,7 +17,11 @@ from ccx_keys.locator import CCXLocator
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from grades.new.subsection_grade import SubsectionGradeFactory
from grades.tests.utils import answer_problem
from lms.djangoapps.ccx.tests.factories import CcxFactory
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from student.roles import CourseCcxCoachRole
......@@ -373,7 +378,8 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
reset_student_attempts(self.course_key, self.user, msk, requesting_user=self.user)
self.assertEqual(json.loads(module().state)['attempts'], 0)
def test_delete_student_attempts(self):
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
def test_delete_student_attempts(self, _mock_signal):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
......@@ -399,7 +405,7 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
# Disable the score change signal to prevent other components from being
# pulled into tests.
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
def test_delete_submission_scores(self, _lti_mock):
def test_delete_submission_scores(self, _mock_signal):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
......@@ -494,6 +500,80 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
self.assertEqual(unrelated_state['brains'], 'zombie')
class TestStudentModuleGrading(SharedModuleStoreTestCase):
"""
Tests the effects of student module manipulations
on student grades.
"""
@classmethod
def setUpClass(cls):
super(TestStudentModuleGrading, cls).setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(
parent=cls.course,
category="chapter",
display_name="Test Chapter"
)
cls.sequence = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Test Sequential 1",
graded=True
)
cls.vertical = ItemFactory.create(
parent=cls.sequence,
category='vertical',
display_name='Test Vertical 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
cls.problem = ItemFactory.create(
parent=cls.vertical,
category="problem",
display_name="Test Problem",
data=problem_xml
)
cls.request = get_request_for_user(UserFactory())
cls.user = cls.request.user
def _get_subsection_grade_and_verify(self, all_earned, all_possible, graded_earned, graded_possible):
"""
Retrieves the subsection grade and verifies that
its scores match those expected.
"""
subsection_grade_factory = SubsectionGradeFactory(
self.user,
self.course,
get_course_blocks(self.user, self.course.location)
)
grade = subsection_grade_factory.create(self.sequence)
self.assertEqual(grade.all_total.earned, all_earned)
self.assertEqual(grade.graded_total.earned, graded_earned)
self.assertEqual(grade.all_total.possible, all_possible)
self.assertEqual(grade.graded_total.possible, graded_possible)
@patch('crum.get_current_request')
def test_delete_student_state(self, _crum_mock):
problem_location = self.problem.location
self._get_subsection_grade_and_verify(0, 1, 0, 1)
answer_problem(course=self.course, request=self.request, problem=self.problem, score=1, max_value=1)
self._get_subsection_grade_and_verify(1, 1, 1, 1)
# Delete student state using the instructor dash
reset_student_attempts(
self.course.id,
self.user,
problem_location,
requesting_user=self.user,
delete_module=True,
)
# Verify that the student's grades are reset
self._get_subsection_grade_and_verify(0, 1, 0, 1)
class EnrollmentObjects(object):
"""
Container for enrollment objects.
......
......@@ -49,7 +49,8 @@ class InstructorServiceTests(SharedModuleStoreTestCase):
state=json.dumps({'attempts': 2}),
)
def test_reset_student_attempts_delete(self):
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
"""
Test delete student state.
"""
......
......@@ -948,7 +948,7 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score, _ in gradeset['raw_scores'] if score.graded}
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
......
"""
Utilities for testing xblocks
"""
from django.conf import settings
from xmodule.modulestore.tests.factories import ItemFactory
TEST_DATA_DIR = settings.COMMON_ROOT / u'test/data'
def add_xml_block_from_file(block_type, filename, parent, metadata):
"""
Create a block of the specified type with content included from the
specified XML file.
XML filenames are relative to common/test/data/blocks.
"""
with open(TEST_DATA_DIR / u'blocks' / filename) as datafile:
return ItemFactory.create(
parent=parent,
category=block_type,
data=datafile.read().decode('utf-8'),
metadata=metadata,
display_name=u'problem'
)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment