test_integration.py 30.4 KB
Newer Older
1
"""
2
Integration Tests for LMS instructor-initiated background tasks.
3 4

Runs tasks on answers to course problems to validate that code
5
paths actually work.
6

7
"""
8 9
from collections import namedtuple
import ddt
10
import json
11
import logging
Brian Wilson committed
12
from mock import patch
13
from nose.plugins.attrib import attr
14
import textwrap
15

16
from celery.states import SUCCESS, FAILURE
17 18 19
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse

20
from openedx.core.djangoapps.util.testing import TestConditionalContent
21
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
22
                                             CustomResponseXMLFactory)
23
from xmodule.modulestore.tests.factories import ItemFactory
24
from xmodule.modulestore import ModuleStoreEnum
25 26

from courseware.model_data import StudentModule
Brian Wilson committed
27

28 29 30 31 32 33 34 35 36
from lms.djangoapps.instructor_task.api import (
    submit_rescore_problem_for_all_students,
    submit_rescore_problem_for_student,
    submit_reset_problem_attempts_for_all_students,
    submit_delete_problem_state_for_all_students
)
from lms.djangoapps.instructor_task.models import InstructorTask
from lms.djangoapps.instructor_task.tasks_helper import upload_grades_csv
from lms.djangoapps.instructor_task.tests.test_base import (
37 38 39 40 41
    InstructorTaskModuleTestCase,
    TestReportMixin,
    OPTION_1,
    OPTION_2,
)
42
from capa.responsetypes import StudentInputError
43
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
44
from openedx.core.lib.url_utils import quote_slashes
45 46


47
log = logging.getLogger(__name__)
48 49


50
class TestIntegrationTask(InstructorTaskModuleTestCase):
Brian Wilson committed
51
    """
52
    Base class to provide general methods used for "integration" testing of particular tasks.
Brian Wilson committed
53
    """
54

55 56 57 58 59 60 61
    def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
        """Confirm that expected values are stored in InstructorTask on task failure."""
        instructor_task = InstructorTask.objects.get(id=entry_id)
        self.assertEqual(instructor_task.task_state, FAILURE)
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, task_type)
        task_input = json.loads(instructor_task.task_input)
62
        self.assertNotIn('student', task_input)
63
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
64 65 66 67 68 69 70
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['exception'], 'ZeroDivisionError')
        self.assertEqual(status['message'], expected_message)
        # check status returned:
        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
        self.assertEqual(status['message'], expected_message)

71

72
@attr(shard=3)
73
@ddt.ddt
74
class TestRescoringTask(TestIntegrationTask):
Brian Wilson committed
75 76 77 78 79
    """
    Integration-style tests for rescoring problems in a background task.

    Exercises real problems with a minimum of patching.
    """
80

81
    def setUp(self):
82 83
        super(TestRescoringTask, self).setUp()

84 85
        self.initialize_course()
        self.create_instructor('instructor')
86 87 88 89 90
        self.user1 = self.create_student('u1')
        self.user2 = self.create_student('u2')
        self.user3 = self.create_student('u3')
        self.user4 = self.create_student('u4')
        self.users = [self.user1, self.user2, self.user3, self.user4]
91
        self.logout()
92

93 94 95
        # set up test user for performing test operations
        self.setup_user()

96 97 98 99 100 101
    def render_problem(self, username, problem_url_name):
        """
        Use ajax interface to request html for a problem.
        """
        # make sure that the requested user is logged in, so that the ajax call works
        # on the right problem:
Brian Wilson committed
102
        self.login_username(username)
103
        # make ajax call:
104 105 106 107 108 109
        modx_url = reverse('xblock_handler', kwargs={
            'course_id': self.course.id.to_deprecated_string(),
            'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
            'handler': 'xmodule_handler',
            'suffix': 'problem_get',
        })
110 111
        resp = self.client.post(modx_url, {})
        return resp
112

113
    def check_state(self, user, descriptor, expected_score, expected_max_score, expected_attempts=1):
114 115 116 117 118 119 120
        """
        Check that the StudentModule state contains the expected values.

        The student module is found for the test course, given the `username` and problem `descriptor`.

        Values checked include the number of attempts, the score, and the max score for a problem.
        """
121
        module = self.get_student_module(user.username, descriptor)
Brian Wilson committed
122 123
        self.assertEqual(module.grade, expected_score)
        self.assertEqual(module.max_grade, expected_max_score)
124 125
        state = json.loads(module.state)
        attempts = state['attempts']
Brian Wilson committed
126
        self.assertEqual(attempts, expected_attempts)
127
        if attempts > 0:
128 129
            self.assertIn('correct_map', state)
            self.assertIn('student_answers', state)
130 131 132
            self.assertGreater(len(state['correct_map']), 0)
            self.assertGreater(len(state['student_answers']), 0)

133 134 135 136
        # assume only one problem in the subsection and the grades
        # are in sync.
        expected_subsection_grade = expected_score

137
        course_grade = CourseGradeFactory().create(user, self.course)
138
        self.assertEquals(
139
            course_grade.graded_subsections_by_format['Homework'][self.problem_section.location].graded_total.earned,
140 141 142
            expected_subsection_grade,
        )

143
    def submit_rescore_all_student_answers(self, instructor, problem_url_name, only_if_higher=False):
144
        """Submits the particular problem for rescoring"""
145 146 147 148 149
        return submit_rescore_problem_for_all_students(
            self.create_task_request(instructor),
            InstructorTaskModuleTestCase.problem_location(problem_url_name),
            only_if_higher,
        )
150

151
    def submit_rescore_one_student_answer(self, instructor, problem_url_name, student, only_if_higher=False):
152
        """Submits the particular problem for rescoring for a particular student"""
153 154 155 156 157 158
        return submit_rescore_problem_for_student(
            self.create_task_request(instructor),
            InstructorTaskModuleTestCase.problem_location(problem_url_name),
            student,
            only_if_higher,
        )
159

160
    def verify_rescore_results(self, problem_edit, new_expected_scores, new_expected_max, rescore_if_higher):
161
        """
162 163
        Common helper to verify the results of rescoring for a single
        student and all students are as expected.
164
        """
165 166 167
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
168
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
169
        descriptor = self.module_store.get_item(location)
170 171

        # first store answers for each of the separate users:
172 173 174 175
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
        self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
        self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
176

177 178 179 180 181
        # verify each user's grade
        expected_original_scores = (2, 1, 1, 0)
        expected_original_max = 2
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)
182

183 184 185 186
        # update the data in the problem definition so the answer changes.
        self.redefine_option_problem(problem_url_name, **problem_edit)

        # confirm that simply rendering the problem again does not change the grade
187
        self.render_problem('u1', problem_url_name)
188
        self.check_state(self.user1, descriptor, expected_original_scores[0], expected_original_max)
189

190
        # rescore the problem for only one student -- only that student's grade should change:
191
        self.submit_rescore_one_student_answer('instructor', problem_url_name, self.user1, rescore_if_higher)
192 193 194
        self.check_state(self.user1, descriptor, new_expected_scores[0], new_expected_max)
        for i, user in enumerate(self.users[1:], start=1):  # everyone other than user1
            self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)
195

196
        # rescore the problem for all students
197
        self.submit_rescore_all_student_answers('instructor', problem_url_name, rescore_if_higher)
198 199
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, new_expected_scores[i], new_expected_max)
200

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
    RescoreTestData = namedtuple('RescoreTestData', 'edit, new_expected_scores, new_expected_max')

    @ddt.data(
        RescoreTestData(edit=dict(correct_answer=OPTION_2), new_expected_scores=(0, 1, 1, 2), new_expected_max=2),
        RescoreTestData(edit=dict(num_inputs=2), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
        RescoreTestData(edit=dict(num_inputs=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
        RescoreTestData(edit=dict(num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
        RescoreTestData(edit=dict(num_inputs=2, num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
    )
    @ddt.unpack
    def test_rescoring_option_problem(self, problem_edit, new_expected_scores, new_expected_max):
        """
        Run rescore scenario on option problem.
        Verify rescoring updates grade after content change.
        Original problem definition has:
            num_inputs = 1
            num_responses = 2
            correct_answer = OPTION_1
        """
        self.verify_rescore_results(
            problem_edit, new_expected_scores, new_expected_max, rescore_if_higher=False,
        )

    @ddt.data(
        RescoreTestData(edit=dict(), new_expected_scores=(2, 1, 1, 0), new_expected_max=2),
        RescoreTestData(edit=dict(correct_answer=OPTION_2), new_expected_scores=(2, 1, 1, 2), new_expected_max=2),
        RescoreTestData(edit=dict(num_inputs=2), new_expected_scores=(2, 1, 1, 0), new_expected_max=2),
    )
    @ddt.unpack
    def test_rescoring_if_higher(self, problem_edit, new_expected_scores, new_expected_max):
        self.verify_rescore_results(
            problem_edit, new_expected_scores, new_expected_max, rescore_if_higher=True,
        )

235 236
    def test_rescoring_failure(self):
        """Simulate a failure in rescoring a problem"""
237 238
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
239
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
240 241

        expected_message = "bad things happened"
242 243
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = ZeroDivisionError(expected_message)
244
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
245
        self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
246

247 248 249 250 251 252 253 254 255
    def test_rescoring_bad_unicode_input(self):
        """Generate a real failure in rescoring a problem, with an answer including unicode"""
        # At one point, the student answers that resulted in StudentInputErrors were being
        # persisted (even though they were not counted as an attempt).  That is not possible
        # now, so it's harder to generate a test for how such input is handled.
        problem_url_name = 'H1P1'
        # set up an option problem -- doesn't matter really what problem it is, but we need
        # it to have an answer.
        self.define_option_problem(problem_url_name)
256
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
257 258 259 260 261 262 263 264 265 266 267 268 269

        # return an input error as if it were a numerical response, with an embedded unicode character:
        expected_message = u"Could not interpret '2/3\u03a9' as a number"
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = StudentInputError(expected_message)
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)

        # check instructor_task returned
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, 'SUCCESS')
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, 'rescore_problem')
        task_input = json.loads(instructor_task.task_input)
270
        self.assertNotIn('student', task_input)
271
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
272 273
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['attempted'], 1)
274
        self.assertEqual(status['succeeded'], 0)
275 276
        self.assertEqual(status['total'], 1)

277
    def define_code_response_problem(self, problem_url_name):
278 279
        """
        Define an arbitrary code-response problem.
280 281 282 283 284 285 286 287 288 289

        We'll end up mocking its evaluation later.
        """
        factory = CodeResponseXMLFactory()
        grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
        problem_xml = factory.build_xml(initial_display="def square(x):",
                                        answer_display="answer",
                                        grader_payload=grader_payload,
                                        num_responses=2)
        ItemFactory.create(parent_location=self.problem_section.location,
290
                           category="problem",
291 292 293
                           display_name=str(problem_url_name),
                           data=problem_xml)

294 295
    def test_rescoring_code_problem(self):
        """Run rescore scenario on problem with code submission"""
296 297 298 299 300 301 302
        problem_url_name = 'H1P2'
        self.define_code_response_problem(problem_url_name)
        # we fully create the CodeResponse problem, but just pretend that we're queuing it:
        with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
            mock_send_to_queue.return_value = (0, "Successfully queued")
            self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])

303
        instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
Brian Wilson committed
304 305

        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
306 307
        self.assertEqual(instructor_task.task_state, FAILURE)
        status = json.loads(instructor_task.task_output)
308
        self.assertEqual(status['exception'], 'NotImplementedError')
309
        self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
310

311
        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
312
        self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
313

314 315 316 317 318 319 320 321 322 323 324
    def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
        """
        Defines a custom response problem that uses a random value to determine correctness.

        Generated answer is also returned as the `msg`, so that the value can be used as a
        correct answer by a test.

        If the `redefine` flag is set, then change the definition of correctness (from equals
        to not-equals).
        """
        factory = CustomResponseXMLFactory()
325
        script = textwrap.dedent("""
326 327
                def check_func(expect, answer_given):
                    expected = str(random.randint(0, 100))
328 329
                    return {'ok': answer_given %s expected, 'msg': expected}
            """ % ('!=' if redefine else '=='))
330 331
        problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
        if redefine:
332 333
            descriptor = self.module_store.get_item(
                InstructorTaskModuleTestCase.problem_location(problem_url_name)
334 335
            )
            descriptor.data = problem_xml
336 337 338
            with self.module_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, descriptor.location.course_key):
                self.module_store.update_item(descriptor, self.user.id)
                self.module_store.publish(descriptor.location, self.user.id)
339 340 341 342 343 344 345
        else:
            # Use "per-student" rerandomization so that check-problem can be called more than once.
            # Using "always" means we cannot check a problem twice, but we want to call once to get the
            # correct answer, and call a second time with that answer to confirm it's graded as correct.
            # Per-student rerandomization will at least generate different seeds for different users, so
            # we get a little more test coverage.
            ItemFactory.create(parent_location=self.problem_section.location,
346
                               category="problem",
347 348 349 350
                               display_name=str(problem_url_name),
                               data=problem_xml,
                               metadata={"rerandomize": "per_student"})

351 352
    def test_rescoring_randomized_problem(self):
        """Run rescore scenario on custom problem that uses randomize"""
353 354 355
        # First define the custom response problem:
        problem_url_name = 'H1P1'
        self.define_randomized_custom_response_problem(problem_url_name)
356
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
357
        descriptor = self.module_store.get_item(location)
358
        # run with more than one user
359
        for user in self.users:
360
            # first render the problem, so that a seed will be created for this user
361
            self.render_problem(user.username, problem_url_name)
362 363
            # submit a bogus answer, in order to get the problem to tell us its real answer
            dummy_answer = "1000"
364
            self.submit_student_answer(user.username, problem_url_name, [dummy_answer, dummy_answer])
365
            # we should have gotten the problem wrong, since we're way out of range:
366
            self.check_state(user, descriptor, 0, 1, expected_attempts=1)
367
            # dig the correct answer out of the problem's message
368
            module = self.get_student_module(user.username, descriptor)
369 370 371 372
            state = json.loads(module.state)
            correct_map = state['correct_map']
            log.info("Correct Map: %s", correct_map)
            # only one response, so pull it out:
373
            answer = correct_map.values()[0]['msg']
374
            self.submit_student_answer(user.username, problem_url_name, [answer, answer])
375
            # we should now get the problem right, with a second attempt:
376
            self.check_state(user, descriptor, 1, 1, expected_attempts=2)
377 378 379 380 381 382

        # redefine the problem (as stored in Mongo) so that the definition of correct changes
        self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade (or the attempts):
        self.render_problem('u1', problem_url_name)
383
        self.check_state(self.user1, descriptor, 1, 1, expected_attempts=2)
384

385
        # rescore the problem for only one student -- only that student's grade should change
386
        # (and none of the attempts):
387
        self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
388 389 390
        for user in self.users:
            expected_score = 0 if user.username == 'u1' else 1
            self.check_state(user, descriptor, expected_score, 1, expected_attempts=2)
391

392
        # rescore the problem for all students
393
        self.submit_rescore_all_student_answers('instructor', problem_url_name)
394 395

        # all grades should change to being wrong (with no change in attempts)
396 397
        for user in self.users:
            self.check_state(user, descriptor, 0, 1, expected_attempts=2)
398

399

400
class TestResetAttemptsTask(TestIntegrationTask):
Brian Wilson committed
401 402 403 404 405
    """
    Integration-style tests for resetting problem attempts in a background task.

    Exercises real problems with a minimum of patching.
    """
406 407 408
    userlist = ['u1', 'u2', 'u3', 'u4']

    def setUp(self):
409
        super(TestResetAttemptsTask, self).setUp()
410 411 412 413 414 415 416
        self.initialize_course()
        self.create_instructor('instructor')
        for username in self.userlist:
            self.create_student(username)
        self.logout()

    def get_num_attempts(self, username, descriptor):
417
        """returns number of attempts stored for `username` on problem `descriptor` for test course"""
418 419 420 421
        module = self.get_student_module(username, descriptor)
        state = json.loads(module.state)
        return state['attempts']

422
    def reset_problem_attempts(self, instructor, location):
423
        """Submits the current problem for resetting"""
424 425
        return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor),
                                                              location)
426 427

    def test_reset_attempts_on_problem(self):
428
        """Run reset-attempts scenario on option problem"""
429 430 431
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
432
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
433
        descriptor = self.module_store.get_item(location)
434 435 436 437
        num_attempts = 3
        # first store answers for each of the separate users:
        for _ in range(num_attempts):
            for username in self.userlist:
438
                self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
439 440 441 442

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)

443
        self.reset_problem_attempts('instructor', location)
444 445 446 447 448 449 450

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), 0)

    def test_reset_failure(self):
        """Simulate a failure in resetting attempts on a problem"""
        problem_url_name = 'H1P1'
451
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
452
        self.define_option_problem(problem_url_name)
453
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
454 455 456 457

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.save') as mock_save:
            mock_save.side_effect = ZeroDivisionError(expected_message)
458
            instructor_task = self.reset_problem_attempts('instructor', location)
459
        self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
460 461 462

    def test_reset_non_problem(self):
        """confirm that a non-problem can still be successfully reset"""
463 464
        location = self.problem_section.location
        instructor_task = self.reset_problem_attempts('instructor', location)
Brian Wilson committed
465
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
466
        self.assertEqual(instructor_task.task_state, SUCCESS)
467

468

469
class TestDeleteProblemTask(TestIntegrationTask):
Brian Wilson committed
470 471 472 473 474
    """
    Integration-style tests for deleting problem state in a background task.

    Exercises real problems with a minimum of patching.
    """
475 476 477
    userlist = ['u1', 'u2', 'u3', 'u4']

    def setUp(self):
478 479
        super(TestDeleteProblemTask, self).setUp()

480 481 482 483 484 485
        self.initialize_course()
        self.create_instructor('instructor')
        for username in self.userlist:
            self.create_student(username)
        self.logout()

486
    def delete_problem_state(self, instructor, location):
487
        """Submits the current problem for deletion"""
488
        return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), location)
489 490

    def test_delete_problem_state(self):
491
        """Run delete-state scenario on option problem"""
492 493 494
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
495
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
496
        descriptor = self.module_store.get_item(location)
497 498
        # first store answers for each of the separate users:
        for username in self.userlist:
499
            self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
500 501
        # confirm that state exists:
        for username in self.userlist:
502
            self.assertIsNotNone(self.get_student_module(username, descriptor))
503
        # run delete task:
504
        self.delete_problem_state('instructor', location)
505 506 507 508 509 510 511 512
        # confirm that no state can be found:
        for username in self.userlist:
            with self.assertRaises(StudentModule.DoesNotExist):
                self.get_student_module(username, descriptor)

    def test_delete_failure(self):
        """Simulate a failure in deleting state of a problem"""
        problem_url_name = 'H1P1'
513
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
514
        self.define_option_problem(problem_url_name)
515
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
516 517 518 519

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.delete') as mock_delete:
            mock_delete.side_effect = ZeroDivisionError(expected_message)
520
            instructor_task = self.delete_problem_state('instructor', location)
521
        self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
522 523 524

    def test_delete_non_problem(self):
        """confirm that a non-problem can still be successfully deleted"""
525 526
        location = self.problem_section.location
        instructor_task = self.delete_problem_state('instructor', location)
Brian Wilson committed
527
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
528
        self.assertEqual(instructor_task.task_state, SUCCESS)
529 530


531
class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent, TestIntegrationTask):
532
    """
533
    Test grade report in cases where there are problems contained within split tests.
534 535 536 537 538 539 540 541 542 543 544 545
    """

    def verify_csv_task_success(self, task_result):
        """
        Verify that all students were successfully graded by
        `upload_grades_csv`.

        Arguments:
            task_result (dict): Return value of `upload_grades_csv`.
        """
        self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, task_result)

546
    def verify_grades_in_csv(self, students_grades, ignore_other_columns=False):
547 548 549 550 551 552 553 554 555
        """
        Verify that the grades CSV contains the expected grades data.

        Arguments:
            students_grades (iterable): An iterable of dictionaries,
                where each dict maps a student to another dict
                representing their grades we expect to see in the CSV.
                For example: [student_a: {'grade': 1.0, 'HW': 1.0}]
        """
556 557 558 559 560 561 562 563 564 565 566
        def merge_dicts(*dicts):
            """
            Return the union of dicts

            Arguments:
                dicts: tuple of dicts
            """
            return dict([item for d in dicts for item in d.items()])

        def user_partition_group(user):
            """Return a dict having single key with value equals to students group in partition"""
567
            group_config_hdr_tpl = 'Experiment Group ({})'
568
            return {
569
                group_config_hdr_tpl.format(self.partition.name): self.partition.scheme.get_group_for_user(
570 571 572
                    self.course.id, user, self.partition, track_function=None
                ).name
            }
573 574 575 576

        self.verify_rows_in_csv(
            [
                merge_dicts(
577
                    {'Student ID': str(student.id), 'Username': student.username, 'Email': student.email},
578 579
                    grades,
                    user_partition_group(student)
580 581
                )
                for student_grades in students_grades for student, grades in student_grades.iteritems()
582
            ],
583
            ignore_other_columns=ignore_other_columns,
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
        )

    def test_both_groups_problems(self):
        """
        Verify that grade export works when each user partition
        receives (different) problems.  Each user's grade on their
        particular problem should show up in the grade report.
        """
        problem_a_url = 'problem_a_url'
        problem_b_url = 'problem_b_url'
        self.define_option_problem(problem_a_url, parent=self.vertical_a)
        self.define_option_problem(problem_b_url, parent=self.vertical_b)
        # student A will get 100%, student B will get 50% because
        # OPTION_1 is the correct option, and OPTION_2 is the
        # incorrect option
        self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])
        self.submit_student_answer(self.student_b.username, problem_b_url, [OPTION_1, OPTION_2])

602
        with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
603 604 605 606
            result = upload_grades_csv(None, None, self.course.id, None, 'graded')
            self.verify_csv_task_success(result)
            self.verify_grades_in_csv(
                [
607 608 609 610 611 612 613 614 615 616 617 618
                    {
                        self.student_a: {
                            u'Grade': '1.0',
                            u'Homework': '1.0',
                        }
                    },
                    {
                        self.student_b: {
                            u'Grade': '0.5',
                            u'Homework': '0.5',
                        }
                    },
619
                ],
620
                ignore_other_columns=True,
621 622 623 624 625 626 627 628 629 630 631 632 633 634
            )

    def test_one_group_problem(self):
        """
        Verify that grade export works when only the Group A user
        partition receives a problem.  We expect to see a column for
        the homework where student_a's entry includes their grade, and
        student b's entry shows a 0.
        """
        problem_a_url = 'problem_a_url'
        self.define_option_problem(problem_a_url, parent=self.vertical_a)

        self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])

635
        with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
636 637 638 639
            result = upload_grades_csv(None, None, self.course.id, None, 'graded')
            self.verify_csv_task_success(result)
            self.verify_grades_in_csv(
                [
640 641 642 643 644 645 646 647 648
                    {
                        self.student_a: {
                            u'Grade': '1.0',
                            u'Homework': '1.0',
                        },
                    },
                    {
                        self.student_b: {
                            u'Grade': '0.0',
649
                            u'Homework': u'Not Available',
650 651
                        }
                    },
652 653
                ],
                ignore_other_columns=True
654
            )