test_integration.py 32.2 KB
Newer Older
1
"""
2
Integration Tests for LMS instructor-initiated background tasks.
3 4

Runs tasks on answers to course problems to validate that code
5
paths actually work.
6

7
"""
8
import json
9
import logging
Brian Wilson committed
10
from mock import patch
11
import textwrap
12

13
from celery.states import SUCCESS, FAILURE
14 15 16
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse

17
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
18
                                             CustomResponseXMLFactory)
19
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
20
from xmodule.modulestore.tests.factories import ItemFactory
21
from xmodule.modulestore import ModuleStoreEnum
22
from xmodule.partitions.partitions import Group, UserPartition
23 24

from courseware.model_data import StudentModule
Brian Wilson committed
25

26 27 28 29
from instructor_task.api import (submit_rescore_problem_for_all_students,
                                 submit_rescore_problem_for_student,
                                 submit_reset_problem_attempts_for_all_students,
                                 submit_delete_problem_state_for_all_students)
30
from instructor_task.models import InstructorTask
31 32 33
from instructor_task.tasks_helper import upload_grades_csv
from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TestReportMixin, TEST_COURSE_ORG,
                                             TEST_COURSE_NUMBER, OPTION_1, OPTION_2)
34
from capa.responsetypes import StudentInputError
35
from lms.djangoapps.lms_xblock.runtime import quote_slashes
36 37


38
log = logging.getLogger(__name__)
39 40


41
class TestIntegrationTask(InstructorTaskModuleTestCase):
Brian Wilson committed
42
    """
43
    Base class to provide general methods used for "integration" testing of particular tasks.
Brian Wilson committed
44
    """
45 46 47 48 49 50 51 52

    def submit_student_answer(self, username, problem_url_name, responses):
        """
        Use ajax interface to submit a student answer.

        Assumes the input list of responses has two values.
        """
        def get_input_id(response_id):
53
            """Creates input id using information about the test course and the current problem."""
54 55 56
            # Note that this is a capa-specific convention.  The form is a version of the problem's
            # URL, modified so that it can be easily stored in html, prepended with "input-" and
            # appended with a sequence identifier for the particular response the input goes to.
57 58 59 60 61 62
            return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
                                                              TEST_COURSE_NUMBER.replace('.', '_'),
                                                              problem_url_name, response_id)

        # make sure that the requested user is logged in, so that the ajax call works
        # on the right problem:
Brian Wilson committed
63
        self.login_username(username)
64
        # make ajax call:
65 66 67 68 69 70
        modx_url = reverse('xblock_handler', kwargs={
            'course_id': self.course.id.to_deprecated_string(),
            'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
            'handler': 'xmodule_handler',
            'suffix': 'problem_check',
        })
71

72
        # we assume we have two responses, so assign them the correct identifiers.
73 74 75 76 77 78
        resp = self.client.post(modx_url, {
            get_input_id('2_1'): responses[0],
            get_input_id('3_1'): responses[1],
        })
        return resp

79 80 81 82 83 84 85 86
    def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
        """Confirm that expected values are stored in InstructorTask on task failure."""
        instructor_task = InstructorTask.objects.get(id=entry_id)
        self.assertEqual(instructor_task.task_state, FAILURE)
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, task_type)
        task_input = json.loads(instructor_task.task_input)
        self.assertFalse('student' in task_input)
87
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
88 89 90 91 92 93 94
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['exception'], 'ZeroDivisionError')
        self.assertEqual(status['message'], expected_message)
        # check status returned:
        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
        self.assertEqual(status['message'], expected_message)

95

96
class TestRescoringTask(TestIntegrationTask):
Brian Wilson committed
97 98 99 100 101
    """
    Integration-style tests for rescoring problems in a background task.

    Exercises real problems with a minimum of patching.
    """
102

103
    def setUp(self):
104 105
        super(TestRescoringTask, self).setUp()

106 107 108 109 110 111 112
        self.initialize_course()
        self.create_instructor('instructor')
        self.create_student('u1')
        self.create_student('u2')
        self.create_student('u3')
        self.create_student('u4')
        self.logout()
113

114 115 116
        # set up test user for performing test operations
        self.setup_user()

117 118 119 120 121 122
    def render_problem(self, username, problem_url_name):
        """
        Use ajax interface to request html for a problem.
        """
        # make sure that the requested user is logged in, so that the ajax call works
        # on the right problem:
Brian Wilson committed
123
        self.login_username(username)
124
        # make ajax call:
125 126 127 128 129 130
        modx_url = reverse('xblock_handler', kwargs={
            'course_id': self.course.id.to_deprecated_string(),
            'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
            'handler': 'xmodule_handler',
            'suffix': 'problem_get',
        })
131 132
        resp = self.client.post(modx_url, {})
        return resp
133 134

    def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
135 136 137 138 139 140 141
        """
        Check that the StudentModule state contains the expected values.

        The student module is found for the test course, given the `username` and problem `descriptor`.

        Values checked include the number of attempts, the score, and the max score for a problem.
        """
142
        module = self.get_student_module(username, descriptor)
Brian Wilson committed
143 144
        self.assertEqual(module.grade, expected_score)
        self.assertEqual(module.max_grade, expected_max_score)
145 146
        state = json.loads(module.state)
        attempts = state['attempts']
Brian Wilson committed
147
        self.assertEqual(attempts, expected_attempts)
148 149 150 151 152 153
        if attempts > 0:
            self.assertTrue('correct_map' in state)
            self.assertTrue('student_answers' in state)
            self.assertGreater(len(state['correct_map']), 0)
            self.assertGreater(len(state['student_answers']), 0)

154 155
    def submit_rescore_all_student_answers(self, instructor, problem_url_name):
        """Submits the particular problem for rescoring"""
156
        return submit_rescore_problem_for_all_students(self.create_task_request(instructor),
157
                                                       InstructorTaskModuleTestCase.problem_location(problem_url_name))
158

159 160
    def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
        """Submits the particular problem for rescoring for a particular student"""
161
        return submit_rescore_problem_for_student(self.create_task_request(instructor),
162
                                                  InstructorTaskModuleTestCase.problem_location(problem_url_name),
163
                                                  student)
164

165
    def test_rescoring_option_problem(self):
166
        """Run rescore scenario on option problem"""
167 168 169
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
170
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
171
        descriptor = self.module_store.get_item(location)
172 173

        # first store answers for each of the separate users:
174 175 176 177
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
        self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
        self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
178 179 180 181 182 183 184 185 186 187 188 189 190

        self.check_state('u1', descriptor, 2, 2, 1)
        self.check_state('u2', descriptor, 1, 2, 1)
        self.check_state('u3', descriptor, 1, 2, 1)
        self.check_state('u4', descriptor, 0, 2, 1)

        # update the data in the problem definition
        self.redefine_option_problem(problem_url_name)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade:
        self.render_problem('u1', problem_url_name)
        self.check_state('u1', descriptor, 2, 2, 1)

191
        # rescore the problem for only one student -- only that student's grade should change:
192
        self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
193 194 195 196 197
        self.check_state('u1', descriptor, 0, 2, 1)
        self.check_state('u2', descriptor, 1, 2, 1)
        self.check_state('u3', descriptor, 1, 2, 1)
        self.check_state('u4', descriptor, 0, 2, 1)

198
        # rescore the problem for all students
199
        self.submit_rescore_all_student_answers('instructor', problem_url_name)
200 201 202 203 204
        self.check_state('u1', descriptor, 0, 2, 1)
        self.check_state('u2', descriptor, 1, 2, 1)
        self.check_state('u3', descriptor, 1, 2, 1)
        self.check_state('u4', descriptor, 2, 2, 1)

205 206
    def test_rescoring_failure(self):
        """Simulate a failure in rescoring a problem"""
207 208
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
209
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
210 211

        expected_message = "bad things happened"
212 213
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = ZeroDivisionError(expected_message)
214
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
215
        self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
216

217 218 219 220 221 222 223 224 225
    def test_rescoring_bad_unicode_input(self):
        """Generate a real failure in rescoring a problem, with an answer including unicode"""
        # At one point, the student answers that resulted in StudentInputErrors were being
        # persisted (even though they were not counted as an attempt).  That is not possible
        # now, so it's harder to generate a test for how such input is handled.
        problem_url_name = 'H1P1'
        # set up an option problem -- doesn't matter really what problem it is, but we need
        # it to have an answer.
        self.define_option_problem(problem_url_name)
226
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
227 228 229 230 231 232 233 234 235 236 237 238 239 240

        # return an input error as if it were a numerical response, with an embedded unicode character:
        expected_message = u"Could not interpret '2/3\u03a9' as a number"
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = StudentInputError(expected_message)
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)

        # check instructor_task returned
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, 'SUCCESS')
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, 'rescore_problem')
        task_input = json.loads(instructor_task.task_input)
        self.assertFalse('student' in task_input)
241
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
242 243
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['attempted'], 1)
244
        self.assertEqual(status['succeeded'], 0)
245 246
        self.assertEqual(status['total'], 1)

247
    def define_code_response_problem(self, problem_url_name):
248 249
        """
        Define an arbitrary code-response problem.
250 251 252 253 254 255 256 257 258 259

        We'll end up mocking its evaluation later.
        """
        factory = CodeResponseXMLFactory()
        grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
        problem_xml = factory.build_xml(initial_display="def square(x):",
                                        answer_display="answer",
                                        grader_payload=grader_payload,
                                        num_responses=2)
        ItemFactory.create(parent_location=self.problem_section.location,
260
                           category="problem",
261 262 263
                           display_name=str(problem_url_name),
                           data=problem_xml)

264 265
    def test_rescoring_code_problem(self):
        """Run rescore scenario on problem with code submission"""
266 267 268 269 270 271 272
        problem_url_name = 'H1P2'
        self.define_code_response_problem(problem_url_name)
        # we fully create the CodeResponse problem, but just pretend that we're queuing it:
        with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
            mock_send_to_queue.return_value = (0, "Successfully queued")
            self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])

273
        instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
Brian Wilson committed
274 275

        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
276 277
        self.assertEqual(instructor_task.task_state, FAILURE)
        status = json.loads(instructor_task.task_output)
278
        self.assertEqual(status['exception'], 'NotImplementedError')
279
        self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
280

281
        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
282
        self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
283

284 285 286 287 288 289 290 291 292 293 294
    def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
        """
        Defines a custom response problem that uses a random value to determine correctness.

        Generated answer is also returned as the `msg`, so that the value can be used as a
        correct answer by a test.

        If the `redefine` flag is set, then change the definition of correctness (from equals
        to not-equals).
        """
        factory = CustomResponseXMLFactory()
295
        script = textwrap.dedent("""
296 297
                def check_func(expect, answer_given):
                    expected = str(random.randint(0, 100))
298 299
                    return {'ok': answer_given %s expected, 'msg': expected}
            """ % ('!=' if redefine else '=='))
300 301
        problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
        if redefine:
302 303
            descriptor = self.module_store.get_item(
                InstructorTaskModuleTestCase.problem_location(problem_url_name)
304 305
            )
            descriptor.data = problem_xml
306 307 308
            with self.module_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, descriptor.location.course_key):
                self.module_store.update_item(descriptor, self.user.id)
                self.module_store.publish(descriptor.location, self.user.id)
309 310 311 312 313 314 315
        else:
            # Use "per-student" rerandomization so that check-problem can be called more than once.
            # Using "always" means we cannot check a problem twice, but we want to call once to get the
            # correct answer, and call a second time with that answer to confirm it's graded as correct.
            # Per-student rerandomization will at least generate different seeds for different users, so
            # we get a little more test coverage.
            ItemFactory.create(parent_location=self.problem_section.location,
316
                               category="problem",
317 318 319 320
                               display_name=str(problem_url_name),
                               data=problem_xml,
                               metadata={"rerandomize": "per_student"})

321 322
    def test_rescoring_randomized_problem(self):
        """Run rescore scenario on custom problem that uses randomize"""
323 324 325
        # First define the custom response problem:
        problem_url_name = 'H1P1'
        self.define_randomized_custom_response_problem(problem_url_name)
326
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
327
        descriptor = self.module_store.get_item(location)
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
        # run with more than one user
        userlist = ['u1', 'u2', 'u3', 'u4']
        for username in userlist:
            # first render the problem, so that a seed will be created for this user
            self.render_problem(username, problem_url_name)
            # submit a bogus answer, in order to get the problem to tell us its real answer
            dummy_answer = "1000"
            self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])
            # we should have gotten the problem wrong, since we're way out of range:
            self.check_state(username, descriptor, 0, 1, 1)
            # dig the correct answer out of the problem's message
            module = self.get_student_module(username, descriptor)
            state = json.loads(module.state)
            correct_map = state['correct_map']
            log.info("Correct Map: %s", correct_map)
            # only one response, so pull it out:
344
            answer = correct_map.values()[0]['msg']
345 346 347 348 349 350 351 352 353 354 355
            self.submit_student_answer(username, problem_url_name, [answer, answer])
            # we should now get the problem right, with a second attempt:
            self.check_state(username, descriptor, 1, 1, 2)

        # redefine the problem (as stored in Mongo) so that the definition of correct changes
        self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade (or the attempts):
        self.render_problem('u1', problem_url_name)
        self.check_state('u1', descriptor, 1, 1, 2)

356
        # rescore the problem for only one student -- only that student's grade should change
357
        # (and none of the attempts):
358
        self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
359 360
        for username in userlist:
            self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)
361

362
        # rescore the problem for all students
363
        self.submit_rescore_all_student_answers('instructor', problem_url_name)
364 365 366 367 368

        # all grades should change to being wrong (with no change in attempts)
        for username in userlist:
            self.check_state(username, descriptor, 0, 1, 2)

369

370
class TestResetAttemptsTask(TestIntegrationTask):
Brian Wilson committed
371 372 373 374 375
    """
    Integration-style tests for resetting problem attempts in a background task.

    Exercises real problems with a minimum of patching.
    """
376 377 378
    userlist = ['u1', 'u2', 'u3', 'u4']

    def setUp(self):
379
        super(TestResetAttemptsTask, self).setUp()
380 381 382 383 384 385 386
        self.initialize_course()
        self.create_instructor('instructor')
        for username in self.userlist:
            self.create_student(username)
        self.logout()

    def get_num_attempts(self, username, descriptor):
387
        """returns number of attempts stored for `username` on problem `descriptor` for test course"""
388 389 390 391
        module = self.get_student_module(username, descriptor)
        state = json.loads(module.state)
        return state['attempts']

392
    def reset_problem_attempts(self, instructor, location):
393
        """Submits the current problem for resetting"""
394 395
        return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor),
                                                              location)
396 397

    def test_reset_attempts_on_problem(self):
398
        """Run reset-attempts scenario on option problem"""
399 400 401
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
402
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
403
        descriptor = self.module_store.get_item(location)
404 405 406 407
        num_attempts = 3
        # first store answers for each of the separate users:
        for _ in range(num_attempts):
            for username in self.userlist:
408
                self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
409 410 411 412

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)

413
        self.reset_problem_attempts('instructor', location)
414 415 416 417 418 419 420

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), 0)

    def test_reset_failure(self):
        """Simulate a failure in resetting attempts on a problem"""
        problem_url_name = 'H1P1'
421
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
422
        self.define_option_problem(problem_url_name)
423
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
424 425 426 427

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.save') as mock_save:
            mock_save.side_effect = ZeroDivisionError(expected_message)
428
            instructor_task = self.reset_problem_attempts('instructor', location)
429
        self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
430 431 432

    def test_reset_non_problem(self):
        """confirm that a non-problem can still be successfully reset"""
433 434
        location = self.problem_section.location
        instructor_task = self.reset_problem_attempts('instructor', location)
Brian Wilson committed
435
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
436
        self.assertEqual(instructor_task.task_state, SUCCESS)
437

438

439
class TestDeleteProblemTask(TestIntegrationTask):
Brian Wilson committed
440 441 442 443 444
    """
    Integration-style tests for deleting problem state in a background task.

    Exercises real problems with a minimum of patching.
    """
445 446 447
    userlist = ['u1', 'u2', 'u3', 'u4']

    def setUp(self):
448 449
        super(TestDeleteProblemTask, self).setUp()

450 451 452 453 454 455
        self.initialize_course()
        self.create_instructor('instructor')
        for username in self.userlist:
            self.create_student(username)
        self.logout()

456
    def delete_problem_state(self, instructor, location):
457
        """Submits the current problem for deletion"""
458
        return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), location)
459 460

    def test_delete_problem_state(self):
461
        """Run delete-state scenario on option problem"""
462 463 464
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
465
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
466
        descriptor = self.module_store.get_item(location)
467 468
        # first store answers for each of the separate users:
        for username in self.userlist:
469
            self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
470 471 472 473
        # confirm that state exists:
        for username in self.userlist:
            self.assertTrue(self.get_student_module(username, descriptor) is not None)
        # run delete task:
474
        self.delete_problem_state('instructor', location)
475 476 477 478 479 480 481 482
        # confirm that no state can be found:
        for username in self.userlist:
            with self.assertRaises(StudentModule.DoesNotExist):
                self.get_student_module(username, descriptor)

    def test_delete_failure(self):
        """Simulate a failure in deleting state of a problem"""
        problem_url_name = 'H1P1'
483
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
484
        self.define_option_problem(problem_url_name)
485
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
486 487 488 489

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.delete') as mock_delete:
            mock_delete.side_effect = ZeroDivisionError(expected_message)
490
            instructor_task = self.delete_problem_state('instructor', location)
491
        self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
492 493 494

    def test_delete_non_problem(self):
        """confirm that a non-problem can still be successfully deleted"""
495 496
        location = self.problem_section.location
        instructor_task = self.delete_problem_state('instructor', location)
Brian Wilson committed
497
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
498
        self.assertEqual(instructor_task.task_state, SUCCESS)
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618


class TestGradeReportConditionalContent(TestReportMixin, TestIntegrationTask):
    """
    Check that grade export works when graded content exists within
    split modules.
    """
    def setUp(self):
        """
        Set up a course with graded problems within a split test.

        Course hierarchy is as follows (modeled after how split tests
        are created in studio):
        -> course
            -> chapter
                -> sequential (graded)
                    -> vertical
                        -> split_test
                            -> vertical (Group A)
                                -> problem
                            -> vertical (Group B)
                                -> problem
        """
        super(TestGradeReportConditionalContent, self).setUp()

        # Create user partitions
        self.user_partition_group_a = 0
        self.user_partition_group_b = 1
        self.partition = UserPartition(
            0,
            'first_partition',
            'First Partition',
            [
                Group(self.user_partition_group_a, 'Group A'),
                Group(self.user_partition_group_b, 'Group B')
            ]
        )

        # Create course with group configurations and grading policy
        self.initialize_course(
            course_factory_kwargs={
                'user_partitions': [self.partition],
                'grading_policy': {
                    "GRADER": [{
                        "type": "Homework",
                        "min_count": 1,
                        "drop_count": 0,
                        "short_label": "HW",
                        "weight": 1.0
                    }]
                }
            }
        )

        # Create users and partition them
        self.student_a = self.create_student('student_a')
        self.student_b = self.create_student('student_b')
        UserCourseTagFactory(
            user=self.student_a,
            course_id=self.course.id,
            key='xblock.partition_service.partition_{0}'.format(self.partition.id),  # pylint: disable=no-member
            value=str(self.user_partition_group_a)
        )
        UserCourseTagFactory(
            user=self.student_b,
            course_id=self.course.id,
            key='xblock.partition_service.partition_{0}'.format(self.partition.id),  # pylint: disable=no-member
            value=str(self.user_partition_group_b)
        )

        # Create a vertical to contain our split test
        problem_vertical = ItemFactory.create(
            parent_location=self.problem_section.location,
            category='vertical',
            display_name='Problem Unit'
        )

        # Create the split test and child vertical containers
        vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a')
        vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b')
        self.split_test = ItemFactory.create(
            parent_location=problem_vertical.location,
            category='split_test',
            display_name='Split Test',
            user_partition_id=self.partition.id,  # pylint: disable=no-member
            group_id_to_child={str(index): url for index, url in enumerate([vertical_a_url, vertical_b_url])}
        )
        self.vertical_a = ItemFactory.create(
            parent_location=self.split_test.location,
            category='vertical',
            display_name='Group A problem container',
            location=vertical_a_url
        )
        self.vertical_b = ItemFactory.create(
            parent_location=self.split_test.location,
            category='vertical',
            display_name='Group B problem container',
            location=vertical_b_url
        )

    def verify_csv_task_success(self, task_result):
        """
        Verify that all students were successfully graded by
        `upload_grades_csv`.

        Arguments:
            task_result (dict): Return value of `upload_grades_csv`.
        """
        self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, task_result)

    def verify_grades_in_csv(self, students_grades):
        """
        Verify that the grades CSV contains the expected grades data.

        Arguments:
            students_grades (iterable): An iterable of dictionaries,
                where each dict maps a student to another dict
                representing their grades we expect to see in the CSV.
                For example: [student_a: {'grade': 1.0, 'HW': 1.0}]
        """
619 620 621 622 623 624 625 626 627 628 629
        def merge_dicts(*dicts):
            """
            Return the union of dicts

            Arguments:
                dicts: tuple of dicts
            """
            return dict([item for d in dicts for item in d.items()])

        def user_partition_group(user):
            """Return a dict having single key with value equals to students group in partition"""
630
            group_config_hdr_tpl = 'Experiment Group ({})'
631 632 633 634 635
            return {
                group_config_hdr_tpl.format(self.partition.name): self.partition.scheme.get_group_for_user(   # pylint: disable=E1101
                    self.course.id, user, self.partition, track_function=None
                ).name
            }
636 637 638 639 640

        self.verify_rows_in_csv(
            [
                merge_dicts(
                    {'id': str(student.id), 'username': student.username, 'email': student.email},
641 642
                    grades,
                    user_partition_group(student)
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
                )
                for student_grades in students_grades for student, grades in student_grades.iteritems()
            ]
        )

    def test_both_groups_problems(self):
        """
        Verify that grade export works when each user partition
        receives (different) problems.  Each user's grade on their
        particular problem should show up in the grade report.
        """
        problem_a_url = 'problem_a_url'
        problem_b_url = 'problem_b_url'
        self.define_option_problem(problem_a_url, parent=self.vertical_a)
        self.define_option_problem(problem_b_url, parent=self.vertical_b)
        # student A will get 100%, student B will get 50% because
        # OPTION_1 is the correct option, and OPTION_2 is the
        # incorrect option
        self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])
        self.submit_student_answer(self.student_b.username, problem_b_url, [OPTION_1, OPTION_2])

        with patch('instructor_task.tasks_helper._get_current_task'):
            result = upload_grades_csv(None, None, self.course.id, None, 'graded')
            self.verify_csv_task_success(result)
            self.verify_grades_in_csv(
                [
                    {self.student_a: {'grade': '1.0', 'HW': '1.0'}},
                    {self.student_b: {'grade': '0.5', 'HW': '0.5'}}
                ]
            )

    def test_one_group_problem(self):
        """
        Verify that grade export works when only the Group A user
        partition receives a problem.  We expect to see a column for
        the homework where student_a's entry includes their grade, and
        student b's entry shows a 0.
        """
        problem_a_url = 'problem_a_url'
        self.define_option_problem(problem_a_url, parent=self.vertical_a)

        self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])

        with patch('instructor_task.tasks_helper._get_current_task'):
            result = upload_grades_csv(None, None, self.course.id, None, 'graded')
            self.verify_csv_task_success(result)
            self.verify_grades_in_csv(
                [
                    {self.student_a: {'grade': '1.0', 'HW': '1.0'}},
                    {self.student_b: {'grade': '0.0', 'HW': '0.0'}}
                ]
            )