test_integration.py 23.7 KB
Newer Older
1
"""
2
Integration Tests for LMS instructor-initiated background tasks.
3 4

Runs tasks on answers to course problems to validate that code
5
paths actually work.
6

7
"""
8 9
import logging
import json
Brian Wilson committed
10
from mock import patch
11
import textwrap
12

13
from celery.states import SUCCESS, FAILURE
14 15 16
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse

17
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
18
                                             CustomResponseXMLFactory)
19
from xmodule.modulestore.tests.factories import ItemFactory
20 21

from courseware.model_data import StudentModule
Brian Wilson committed
22

23 24 25 26
from instructor_task.api import (submit_rescore_problem_for_all_students,
                                 submit_rescore_problem_for_student,
                                 submit_reset_problem_attempts_for_all_students,
                                 submit_delete_problem_state_for_all_students)
Brian Wilson committed
27
from instructor_task.models import InstructorTask
28 29
from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER,
                                             OPTION_1, OPTION_2)
30
from capa.responsetypes import StudentInputError
31 32


33
log = logging.getLogger(__name__)
34 35


36
class TestIntegrationTask(InstructorTaskModuleTestCase):
Brian Wilson committed
37
    """
38
    Base class to provide general methods used for "integration" testing of particular tasks.
Brian Wilson committed
39
    """
40 41 42 43 44 45 46 47

    def submit_student_answer(self, username, problem_url_name, responses):
        """
        Use ajax interface to submit a student answer.

        Assumes the input list of responses has two values.
        """
        def get_input_id(response_id):
48
            """Creates input id using information about the test course and the current problem."""
49 50 51
            # Note that this is a capa-specific convention.  The form is a version of the problem's
            # URL, modified so that it can be easily stored in html, prepended with "input-" and
            # appended with a sequence identifier for the particular response the input goes to.
52 53 54 55 56 57
            return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
                                                              TEST_COURSE_NUMBER.replace('.', '_'),
                                                              problem_url_name, response_id)

        # make sure that the requested user is logged in, so that the ajax call works
        # on the right problem:
Brian Wilson committed
58
        self.login_username(username)
59 60
        # make ajax call:
        modx_url = reverse('modx_dispatch',
61
                           kwargs={'course_id': self.course.id,
62
                                   'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
63
                                   'dispatch': 'problem_check', })
64

65
        # we assume we have two responses, so assign them the correct identifiers.
66 67 68 69 70 71
        resp = self.client.post(modx_url, {
            get_input_id('2_1'): responses[0],
            get_input_id('3_1'): responses[1],
        })
        return resp

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
    def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
        """Confirm that expected values are stored in InstructorTask on task failure."""
        instructor_task = InstructorTask.objects.get(id=entry_id)
        self.assertEqual(instructor_task.task_state, FAILURE)
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, task_type)
        task_input = json.loads(instructor_task.task_input)
        self.assertFalse('student' in task_input)
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['exception'], 'ZeroDivisionError')
        self.assertEqual(status['message'], expected_message)
        # check status returned:
        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
        self.assertEqual(status['message'], expected_message)

88

89
class TestRescoringTask(TestIntegrationTask):
Brian Wilson committed
90 91 92 93 94
    """
    Integration-style tests for rescoring problems in a background task.

    Exercises real problems with a minimum of patching.
    """
95

96 97 98 99 100 101 102 103
    def setUp(self):
        self.initialize_course()
        self.create_instructor('instructor')
        self.create_student('u1')
        self.create_student('u2')
        self.create_student('u3')
        self.create_student('u4')
        self.logout()
104

105 106 107 108 109 110
    def render_problem(self, username, problem_url_name):
        """
        Use ajax interface to request html for a problem.
        """
        # make sure that the requested user is logged in, so that the ajax call works
        # on the right problem:
Brian Wilson committed
111
        self.login_username(username)
112 113 114
        # make ajax call:
        modx_url = reverse('modx_dispatch',
                           kwargs={'course_id': self.course.id,
115
                                   'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
116 117 118
                                   'dispatch': 'problem_get', })
        resp = self.client.post(modx_url, {})
        return resp
119 120

    def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
121 122 123 124 125 126 127
        """
        Check that the StudentModule state contains the expected values.

        The student module is found for the test course, given the `username` and problem `descriptor`.

        Values checked include the number of attempts, the score, and the max score for a problem.
        """
128
        module = self.get_student_module(username, descriptor)
Brian Wilson committed
129 130
        self.assertEqual(module.grade, expected_score)
        self.assertEqual(module.max_grade, expected_max_score)
131 132
        state = json.loads(module.state)
        attempts = state['attempts']
Brian Wilson committed
133
        self.assertEqual(attempts, expected_attempts)
134 135 136 137 138 139
        if attempts > 0:
            self.assertTrue('correct_map' in state)
            self.assertTrue('student_answers' in state)
            self.assertGreater(len(state['correct_map']), 0)
            self.assertGreater(len(state['student_answers']), 0)

140 141 142
    def submit_rescore_all_student_answers(self, instructor, problem_url_name):
        """Submits the particular problem for rescoring"""
        return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
143
                                                       InstructorTaskModuleTestCase.problem_location(problem_url_name))
144

145 146 147
    def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
        """Submits the particular problem for rescoring for a particular student"""
        return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id,
148
                                                  InstructorTaskModuleTestCase.problem_location(problem_url_name),
149
                                                  student)
150

151
    def test_rescoring_option_problem(self):
152
        """Run rescore scenario on option problem"""
153 154 155
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
156
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
157 158 159
        descriptor = self.module_store.get_instance(self.course.id, location)

        # first store answers for each of the separate users:
160 161 162 163
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
        self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
        self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
164 165 166 167 168 169 170 171 172 173 174 175 176

        self.check_state('u1', descriptor, 2, 2, 1)
        self.check_state('u2', descriptor, 1, 2, 1)
        self.check_state('u3', descriptor, 1, 2, 1)
        self.check_state('u4', descriptor, 0, 2, 1)

        # update the data in the problem definition
        self.redefine_option_problem(problem_url_name)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade:
        self.render_problem('u1', problem_url_name)
        self.check_state('u1', descriptor, 2, 2, 1)

177
        # rescore the problem for only one student -- only that student's grade should change:
178
        self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
179 180 181 182 183
        self.check_state('u1', descriptor, 0, 2, 1)
        self.check_state('u2', descriptor, 1, 2, 1)
        self.check_state('u3', descriptor, 1, 2, 1)
        self.check_state('u4', descriptor, 0, 2, 1)

184
        # rescore the problem for all students
185
        self.submit_rescore_all_student_answers('instructor', problem_url_name)
186 187 188 189 190
        self.check_state('u1', descriptor, 0, 2, 1)
        self.check_state('u2', descriptor, 1, 2, 1)
        self.check_state('u3', descriptor, 1, 2, 1)
        self.check_state('u4', descriptor, 2, 2, 1)

191 192
    def test_rescoring_failure(self):
        """Simulate a failure in rescoring a problem"""
193 194
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
195
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
196 197

        expected_message = "bad things happened"
198 199
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = ZeroDivisionError(expected_message)
200
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
201
        self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
202

203 204 205 206 207 208 209 210 211
    def test_rescoring_bad_unicode_input(self):
        """Generate a real failure in rescoring a problem, with an answer including unicode"""
        # At one point, the student answers that resulted in StudentInputErrors were being
        # persisted (even though they were not counted as an attempt).  That is not possible
        # now, so it's harder to generate a test for how such input is handled.
        problem_url_name = 'H1P1'
        # set up an option problem -- doesn't matter really what problem it is, but we need
        # it to have an answer.
        self.define_option_problem(problem_url_name)
212
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
213 214 215 216 217 218 219 220 221 222 223 224 225 226

        # return an input error as if it were a numerical response, with an embedded unicode character:
        expected_message = u"Could not interpret '2/3\u03a9' as a number"
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = StudentInputError(expected_message)
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)

        # check instructor_task returned
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, 'SUCCESS')
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, 'rescore_problem')
        task_input = json.loads(instructor_task.task_input)
        self.assertFalse('student' in task_input)
227
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
228 229 230 231 232
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['attempted'], 1)
        self.assertEqual(status['updated'], 0)
        self.assertEqual(status['total'], 1)

233
    def define_code_response_problem(self, problem_url_name):
234 235
        """
        Define an arbitrary code-response problem.
236 237 238 239 240 241 242 243 244 245

        We'll end up mocking its evaluation later.
        """
        factory = CodeResponseXMLFactory()
        grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
        problem_xml = factory.build_xml(initial_display="def square(x):",
                                        answer_display="answer",
                                        grader_payload=grader_payload,
                                        num_responses=2)
        ItemFactory.create(parent_location=self.problem_section.location,
246
                           category="problem",
247 248 249
                           display_name=str(problem_url_name),
                           data=problem_xml)

250 251
    def test_rescoring_code_problem(self):
        """Run rescore scenario on problem with code submission"""
252 253 254 255 256 257 258
        problem_url_name = 'H1P2'
        self.define_code_response_problem(problem_url_name)
        # we fully create the CodeResponse problem, but just pretend that we're queuing it:
        with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
            mock_send_to_queue.return_value = (0, "Successfully queued")
            self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])

259
        instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
Brian Wilson committed
260 261

        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
262 263
        self.assertEqual(instructor_task.task_state, FAILURE)
        status = json.loads(instructor_task.task_output)
264
        self.assertEqual(status['exception'], 'NotImplementedError')
265
        self.assertEqual(status['message'], "Problem's definition does not support rescoring")
266

267
        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
268
        self.assertEqual(status['message'], "Problem's definition does not support rescoring")
269

270 271 272 273 274 275 276 277 278 279 280
    def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
        """
        Defines a custom response problem that uses a random value to determine correctness.

        Generated answer is also returned as the `msg`, so that the value can be used as a
        correct answer by a test.

        If the `redefine` flag is set, then change the definition of correctness (from equals
        to not-equals).
        """
        factory = CustomResponseXMLFactory()
281
        script = textwrap.dedent("""
282 283
                def check_func(expect, answer_given):
                    expected = str(random.randint(0, 100))
284 285
                    return {'ok': answer_given %s expected, 'msg': expected}
            """ % ('!=' if redefine else '=='))
286 287
        problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
        if redefine:
288
            self.module_store.update_item(InstructorTaskModuleTestCase.problem_location(problem_url_name), problem_xml)
289 290 291 292 293 294 295
        else:
            # Use "per-student" rerandomization so that check-problem can be called more than once.
            # Using "always" means we cannot check a problem twice, but we want to call once to get the
            # correct answer, and call a second time with that answer to confirm it's graded as correct.
            # Per-student rerandomization will at least generate different seeds for different users, so
            # we get a little more test coverage.
            ItemFactory.create(parent_location=self.problem_section.location,
296
                               category="problem",
297 298 299 300
                               display_name=str(problem_url_name),
                               data=problem_xml,
                               metadata={"rerandomize": "per_student"})

301 302
    def test_rescoring_randomized_problem(self):
        """Run rescore scenario on custom problem that uses randomize"""
303 304 305
        # First define the custom response problem:
        problem_url_name = 'H1P1'
        self.define_randomized_custom_response_problem(problem_url_name)
306
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
        descriptor = self.module_store.get_instance(self.course.id, location)
        # run with more than one user
        userlist = ['u1', 'u2', 'u3', 'u4']
        for username in userlist:
            # first render the problem, so that a seed will be created for this user
            self.render_problem(username, problem_url_name)
            # submit a bogus answer, in order to get the problem to tell us its real answer
            dummy_answer = "1000"
            self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])
            # we should have gotten the problem wrong, since we're way out of range:
            self.check_state(username, descriptor, 0, 1, 1)
            # dig the correct answer out of the problem's message
            module = self.get_student_module(username, descriptor)
            state = json.loads(module.state)
            correct_map = state['correct_map']
            log.info("Correct Map: %s", correct_map)
            # only one response, so pull it out:
324
            answer = correct_map.values()[0]['msg']
325 326 327 328 329 330 331 332 333 334 335
            self.submit_student_answer(username, problem_url_name, [answer, answer])
            # we should now get the problem right, with a second attempt:
            self.check_state(username, descriptor, 1, 1, 2)

        # redefine the problem (as stored in Mongo) so that the definition of correct changes
        self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade (or the attempts):
        self.render_problem('u1', problem_url_name)
        self.check_state('u1', descriptor, 1, 1, 2)

336
        # rescore the problem for only one student -- only that student's grade should change
337
        # (and none of the attempts):
338
        self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
339 340
        for username in userlist:
            self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)
341

342
        # rescore the problem for all students
343
        self.submit_rescore_all_student_answers('instructor', problem_url_name)
344 345 346 347 348

        # all grades should change to being wrong (with no change in attempts)
        for username in userlist:
            self.check_state(username, descriptor, 0, 1, 2)

349

350
class TestResetAttemptsTask(TestIntegrationTask):
Brian Wilson committed
351 352 353 354 355
    """
    Integration-style tests for resetting problem attempts in a background task.

    Exercises real problems with a minimum of patching.
    """
356 357 358 359 360 361 362 363 364 365
    userlist = ['u1', 'u2', 'u3', 'u4']

    def setUp(self):
        self.initialize_course()
        self.create_instructor('instructor')
        for username in self.userlist:
            self.create_student(username)
        self.logout()

    def get_num_attempts(self, username, descriptor):
366
        """returns number of attempts stored for `username` on problem `descriptor` for test course"""
367 368 369 370 371 372
        module = self.get_student_module(username, descriptor)
        state = json.loads(module.state)
        return state['attempts']

    def reset_problem_attempts(self, instructor, problem_url_name):
        """Submits the current problem for resetting"""
373
        return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor), self.course.id,
374
                                                              InstructorTaskModuleTestCase.problem_location(problem_url_name))
375 376

    def test_reset_attempts_on_problem(self):
377
        """Run reset-attempts scenario on option problem"""
378 379 380
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
381
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
382 383 384 385 386
        descriptor = self.module_store.get_instance(self.course.id, location)
        num_attempts = 3
        # first store answers for each of the separate users:
        for _ in range(num_attempts):
            for username in self.userlist:
387
                self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
388 389 390 391 392 393 394 395 396 397 398 399 400

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)

        self.reset_problem_attempts('instructor', problem_url_name)

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), 0)

    def test_reset_failure(self):
        """Simulate a failure in resetting attempts on a problem"""
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
401
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
402 403 404 405

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.save') as mock_save:
            mock_save.side_effect = ZeroDivisionError(expected_message)
406
            instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
407
        self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
408 409 410 411

    def test_reset_non_problem(self):
        """confirm that a non-problem can still be successfully reset"""
        problem_url_name = self.problem_section.location.url()
412
        instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
Brian Wilson committed
413
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
414
        self.assertEqual(instructor_task.task_state, SUCCESS)
415

416

417
class TestDeleteProblemTask(TestIntegrationTask):
Brian Wilson committed
418 419 420 421 422
    """
    Integration-style tests for deleting problem state in a background task.

    Exercises real problems with a minimum of patching.
    """
423 424 425 426 427 428 429 430 431 432 433 434
    userlist = ['u1', 'u2', 'u3', 'u4']

    def setUp(self):
        self.initialize_course()
        self.create_instructor('instructor')
        for username in self.userlist:
            self.create_student(username)
        self.logout()

    def delete_problem_state(self, instructor, problem_url_name):
        """Submits the current problem for deletion"""
        return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), self.course.id,
435
                                                            InstructorTaskModuleTestCase.problem_location(problem_url_name))
436 437

    def test_delete_problem_state(self):
438
        """Run delete-state scenario on option problem"""
439 440 441
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
442
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
443 444 445
        descriptor = self.module_store.get_instance(self.course.id, location)
        # first store answers for each of the separate users:
        for username in self.userlist:
446
            self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
447 448 449 450 451 452 453 454 455 456 457 458 459 460
        # confirm that state exists:
        for username in self.userlist:
            self.assertTrue(self.get_student_module(username, descriptor) is not None)
        # run delete task:
        self.delete_problem_state('instructor', problem_url_name)
        # confirm that no state can be found:
        for username in self.userlist:
            with self.assertRaises(StudentModule.DoesNotExist):
                self.get_student_module(username, descriptor)

    def test_delete_failure(self):
        """Simulate a failure in deleting state of a problem"""
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
461
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
462 463 464 465

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.delete') as mock_delete:
            mock_delete.side_effect = ZeroDivisionError(expected_message)
466
            instructor_task = self.delete_problem_state('instructor', problem_url_name)
467
        self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
468 469 470 471

    def test_delete_non_problem(self):
        """confirm that a non-problem can still be successfully deleted"""
        problem_url_name = self.problem_section.location.url()
472
        instructor_task = self.delete_problem_state('instructor', problem_url_name)
Brian Wilson committed
473
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
474
        self.assertEqual(instructor_task.task_state, SUCCESS)