Commit ddb383c3 by J. Cliff Dyer

Test different problem types.

parent 9ba97722
<problem url_name="capa-optionresponse">
<optionresponse>
<optioninput options="('Correct', 'Incorrect')" correct="Correct"></optioninput>
<optioninput options="('Correct', 'Incorrect')" correct="Correct"></optioninput>
</optionresponse>
</problem>
<problem display_name="Exercise: apply to each 3" markdown="null" weight="5.0">
<text>
<p>
<b>ESTIMATED TIME TO COMPLETE: 4 minutes</b>
</p>
<pre>
&gt;&gt;&gt; print testList
[1, 16, 64, 81]
</pre>
</text>
<coderesponse queuename="Watcher-MITx-6.00x">
<textbox rows="10" cols="80" mode="python" tabsize="4"/>
<codeparam>
<initial_display>
# Your Code Here
</initial_display>
<answer_display>
def square(a):
return a * a
applyToEach(testList, square)
</answer_display>
<grader_payload>{"grader": "finger_exercises/L6/applyToEach3/grade_ate3.py"}</grader_payload>
</codeparam>
</coderesponse>
</problem>
<library_content display_name="Final Exam" has_score="true" max_count="25" source_library_id="library-v1:MSX+msx_cld213xfinalexam" source_library_version="577b5aca45064f068278faa0">
<problem/>
<problem/>
</library_content>
<lti launch_url="http://www.imsglobal.org/developers/LTI/test/v1p1/tool.php" lti_id="ims"/>
<openassessment url_name="0e2bbf6cc89e45d98b028fa4e2d46314" allow_file_upload="False">
<title></title>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1"/>
<assessment name="self-assessment"/>
</assessments>
<rubric>
<prompt>
Censorship in the Libraries
'All of us can think of a book that we hope none of our children or any
other children have taken off the shelf. But if I have the right to remove
that book from the shelf -- that work I abhor -- then you also have exactly
the same right and so does everyone else. And then we have no books left on
the shelf for any of us.' --Katherine Paterson, Author
Write a persuasive essay to a newspaper reflecting your views on censorship
in libraries. Do you believe that certain materials, such as books, music,
movies, magazines, etc., should be removed from the shelves if they are
found offensive? Support your position with convincing arguments from your
own experience, observations, and/or reading.
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>Ideas</name>
<prompt>Determine if there is a unifying theme or main idea.</prompt>
<option points="0">
<name>Poor</name>
<explanation>
Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus.
</explanation>
</option>
<option points="3">
<name>Fair</name>
<explanation>
Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task.
</explanation>
</option>
<option points="5">
<name>Good</name>
<explanation>
Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task.
</explanation>
</option>
</criterion>
<criterion>
<name>Content</name>
<prompt>Assess the content of the submission</prompt>
<option points="0">
<name>Poor</name>
<explanation>
Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic.
</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>
Includes little information and few or no details.
Explores only one or two facets of the topic.
</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>
Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.
</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>
Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic.
</explanation>
</option>
</criterion>
</rubric>
</openassessment>
......@@ -15,10 +15,10 @@ from lms.djangoapps.course_blocks.api import get_course_blocks
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from xmodule.block_metadata_utils import display_name_with_default_escaped
from xmodule.graders import ProblemScore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.graders import ProblemScore
from .utils import answer_problem
from .. import course_grades
......
......@@ -2,24 +2,30 @@
Test saved subsection grade functionality.
"""
# pylint: disable=protected-access
import datetime
import ddt
from django.conf import settings
from django.db.utils import DatabaseError
from mock import patch
import pytz
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.tests.helpers import get_request_for_user
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.grades.config.tests.utils import persistent_grades_feature_flags
from openedx.core.lib.xblock_utils.test_utils import add_xml_block_from_file
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ..models import PersistentSubsectionGrade
from ..new.course_grade import CourseGradeFactory
from ..new.subsection_grade import SubsectionGrade, SubsectionGradeFactory
from lms.djangoapps.grades.tests.utils import mock_get_score
from .utils import mock_get_score
class GradeTestBase(SharedModuleStoreTestCase):
......@@ -230,3 +236,148 @@ class SubsectionGradeTest(GradeTestBase):
self.assertEqual(input_grade.url_name, loaded_grade.url_name)
self.assertEqual(input_grade.all_total, loaded_grade.all_total)
@ddt.ddt
class TestMultipleProblemTypesSubsectionScores(ModuleStoreTestCase, ProblemSubmissionTestMixin):
"""
Test grading of different problem types.
"""
default_problem_metadata = {
u'graded': True,
u'weight': 2.5,
u'max_score': 7.0,
u'due': datetime.datetime(2099, 3, 15, 12, 30, 0, tzinfo=pytz.utc),
}
COURSE_NAME = u'Problem Type Test Course'
COURSE_NUM = u'probtype'
def setUp(self):
super(TestMultipleProblemTypesSubsectionScores, self).setUp()
password = u'test'
self.student = UserFactory.create(is_staff=False, username=u'test_student', password=password)
self.client.login(username=self.student.username, password=password)
self.request = get_request_for_user(self.student)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_NUM
)
self.chapter = ItemFactory.create(
parent=self.course,
category=u'chapter',
display_name=u'Test Chapter'
)
self.seq1 = ItemFactory.create(
parent=self.chapter,
category=u'sequential',
display_name=u'Test Sequential 1',
graded=True
)
self.vert1 = ItemFactory.create(
parent=self.seq1,
category=u'vertical',
display_name=u'Test Vertical 1'
)
def _get_fresh_subsection_score(self, course_structure, subsection):
"""
Return a Score object for the specified subsection.
Ensures that a stale cached value is not returned.
"""
subsection_factory = SubsectionGradeFactory(
self.student,
course_structure=course_structure,
course=self.course,
)
return subsection_factory.update(subsection)
def _get_altered_metadata(self, alterations):
"""
Returns a copy of the default_problem_metadata dict updated with the
specified alterations.
"""
metadata = self.default_problem_metadata.copy()
metadata.update(alterations)
return metadata
def _get_score_with_alterations(self, alterations):
"""
Given a dict of alterations to the default_problem_metadata, return
the score when one correct problem (out of two) is submitted.
"""
metadata = self._get_altered_metadata(alterations)
add_xml_block_from_file(u'problem', u'capa.xml', parent=self.vert1, metadata=metadata)
course_structure = get_course_blocks(self.student, self.course.location)
self.submit_question_answer(u'problem', {u'2_1': u'Correct'})
return self._get_fresh_subsection_score(course_structure, self.seq1)
def test_score_submission_for_capa_problems(self):
add_xml_block_from_file(u'problem', u'capa.xml', parent=self.vert1, metadata=self.default_problem_metadata)
course_structure = get_course_blocks(self.student, self.course.location)
score = self._get_fresh_subsection_score(course_structure, self.seq1)
self.assertEqual(score.all_total.earned, 0.0)
self.assertEqual(score.all_total.possible, 2.5)
self.submit_question_answer(u'problem', {u'2_1': u'Correct'})
score = self._get_fresh_subsection_score(course_structure, self.seq1)
self.assertEqual(score.all_total.earned, 1.25)
self.assertEqual(score.all_total.possible, 2.5)
@ddt.data(
(u'openassessment', u'openassessment.xml'),
(u'coderesponse', u'coderesponse.xml'),
(u'lti', u'lti.xml'),
(u'library_content', u'library_content.xml'),
)
@ddt.unpack
def test_loading_different_problem_types(self, block_type, filename):
"""
Test that transformation works for various block types
"""
metadata = self.default_problem_metadata.copy()
if block_type == u'library_content':
# Library content does not have a weight
del metadata[u'weight']
add_xml_block_from_file(block_type, filename, parent=self.vert1, metadata=metadata)
@ddt.data(
({}, 1.25, 2.5),
({u'weight': 27}, 13.5, 27),
({u'weight': 1.0}, 0.5, 1.0),
({u'weight': 0.0}, 0.0, 0.0),
({u'weight': None}, 1.0, 2.0),
)
@ddt.unpack
def test_weight_metadata_alterations(self, alterations, expected_earned, expected_possible):
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.all_total.earned, expected_earned)
self.assertEqual(score.all_total.possible, expected_possible)
@ddt.data(
({u'graded': True}, 1.25, 2.5),
({u'graded': False}, 0.0, 0.0),
)
@ddt.unpack
def test_graded_metadata_alterations(self, alterations, expected_earned, expected_possible):
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.graded_total.earned, expected_earned)
self.assertEqual(score.graded_total.possible, expected_possible)
@ddt.data(
{u'max_score': 99.3},
{u'max_score': 1.0},
{u'max_score': 0.0},
{u'max_score': None},
)
def test_max_score_does_not_change_results(self, alterations):
expected_earned = 1.25
expected_possible = 2.5
score = self._get_score_with_alterations(alterations)
self.assertEqual(score.all_total.earned, expected_earned)
self.assertEqual(score.all_total.possible, expected_possible)
"""
Utilities for testing xblocks
"""
from django.conf import settings
from xmodule.modulestore.tests.factories import ItemFactory
TEST_DATA_DIR = settings.COMMON_ROOT / u'test/data'
def add_xml_block_from_file(block_type, filename, parent, metadata):
"""
Create a block of the specified type with content included from the
specified XML file.
XML filenames are relative to common/test/data/blocks.
"""
with open(TEST_DATA_DIR / u'blocks' / filename) as datafile:
return ItemFactory.create(
parent=parent,
category=block_type,
data=datafile.read().decode('utf-8'),
metadata=metadata,
display_name=u'problem'
)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment