subtasks.py 26.9 KB
Newer Older
1 2 3 4 5
"""
This module contains celery task functions for handling the management of subtasks.
"""
from time import time
import json
6
from uuid import uuid4
7 8
import psutil
from contextlib import contextmanager
9
import logging
10

11
from celery.states import SUCCESS, READY_STATES, RETRY
12
import dogstats_wrapper as dog_stats_api
13

14
from django.db import transaction, DatabaseError
15
from django.core.cache import cache
16 17 18

from instructor_task.models import InstructorTask, PROGRESS, QUEUING

19
TASK_LOG = logging.getLogger('edx.celery.task')
20

21
# Lock expiration should be long enough to allow a subtask to complete.
22
SUBTASK_LOCK_EXPIRE = 60 * 10  # Lock expires in 10 minutes
23 24 25
# Number of times to retry if a subtask update encounters a lock on the InstructorTask.
# (These are recursive retries, so don't make this number too large.)
MAX_DATABASE_LOCK_RETRIES = 5
26

27

28 29 30 31 32
class DuplicateTaskException(Exception):
    """Exception indicating that a task already exists or has already completed."""
    pass


33
def _get_number_of_subtasks(total_num_items, items_per_task):
34
    """
35
    Determines number of subtasks that would be generated by _generate_items_for_subtask.
36

37
    This needs to be calculated before the query is executed so that the list of all subtasks can be
38 39 40 41 42
    stored in the InstructorTask before any subtasks are started.

    The number of subtask_id values returned by this should match the number of chunks returned
    by the generate_items_for_subtask generator.
    """
43 44 45 46
    num_subtasks, remainder = divmod(total_num_items, items_per_task)
    if remainder:
        num_subtasks += 1
    return num_subtasks
47 48


49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
@contextmanager
def track_memory_usage(metric, course_id):
    """
    Context manager to track how much memory (in bytes) a given process uses.
    Metrics will look like: 'course_email.subtask_generation.memory.rss'
    or 'course_email.subtask_generation.memory.vms'.
    """
    memory_types = ['rss', 'vms']
    process = psutil.Process()
    baseline_memory_info = process.get_memory_info()
    baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types]
    yield
    for memory_type, baseline_usage in zip(memory_types, baseline_usages):
        total_memory_info = process.get_memory_info()
        total_usage = getattr(total_memory_info, memory_type)
        memory_used = total_usage - baseline_usage
        dog_stats_api.increment(
            metric + "." + memory_type,
            memory_used,
            tags=["course_id:{}".format(course_id)],
        )


def _generate_items_for_subtask(
Adam Palay committed
73
    item_querysets,  # pylint: disable=bad-continuation
74 75 76 77 78 79
    item_fields,
    total_num_items,
    items_per_task,
    total_num_subtasks,
    course_id,
):
80 81 82 83
    """
    Generates a chunk of "items" that should be passed into a subtask.

    Arguments:
84
        `item_querysets` : a list of query sets, each of which defines the "items" that should be passed to subtasks.
85 86
        `item_fields` : the fields that should be included in the dict that is returned.
            These are in addition to the 'pk' field.
87
        `total_num_items` : the result of summing the count of each queryset in `item_querysets`.
88 89
        `items_per_query` : size of chunks to break the query operation into.
        `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
90
        `course_id` : course_id of the course. Only needed for the track_memory_usage context manager.
91 92 93

    Returns:  yields a list of dicts, where each dict contains the fields in `item_fields`, plus the 'pk' field.

94
    Warning:  if the algorithm here changes, the _get_number_of_subtasks() method should similarly be changed.
95 96 97 98
    """
    num_items_queued = 0
    all_item_fields = list(item_fields)
    all_item_fields.append('pk')
99
    num_subtasks = 0
100

101 102
    items_for_task = []

103
    with track_memory_usage('course_email.subtask_generation.memory', course_id):
104 105 106 107 108 109 110 111
        for queryset in item_querysets:
            for item in queryset.values(*all_item_fields).iterator():
                if len(items_for_task) == items_per_task and num_subtasks < total_num_subtasks - 1:
                    yield items_for_task
                    num_items_queued += items_per_task
                    items_for_task = []
                    num_subtasks += 1
                items_for_task.append(item)
112 113 114 115 116

        # yield remainder items for task, if any
        if items_for_task:
            yield items_for_task
            num_items_queued += len(items_for_task)
117 118 119 120 121 122 123

    # Note, depending on what kind of DB is used, it's possible for the queryset
    # we iterate over to change in the course of the query. Therefore it's
    # possible that there are more (or fewer) items queued than were initially
    # calculated. It also means it's possible that the last task contains
    # more items than items_per_task allows. We expect this to be a small enough
    # number as to be negligible.
124
    if num_items_queued != total_num_items:
125
        TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items)
126 127


128
class SubtaskStatus(object):
129
    """
130 131
    Create and return a dict for tracking the status of a subtask.

132
    SubtaskStatus values are:
133 134 135 136 137 138 139 140 141 142 143

      'task_id' : id of subtask.  This is used to pass task information across retries.
      'attempted' : number of attempts -- should equal succeeded plus failed
      'succeeded' : number that succeeded in processing
      'skipped' : number that were not processed.
      'failed' : number that failed during processing
      'retried_nomax' : number of times the subtask has been retried for conditions that
          should not have a maximum count applied
      'retried_withmax' : number of times the subtask has been retried for conditions that
          should have a maximum count applied
      'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
144

145
    Object is not JSON-serializable, so to_dict and from_dict methods are provided so that
146
    it can be passed as a serializable argument to tasks (and be reconstituted within such tasks).
147

148
    In future, we may want to include specific error information
149
    indicating the reason for failure.
150
    Also, we should count up "not attempted" separately from attempted/failed.
151
    """
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
    def __init__(self, task_id, attempted=None, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
        """Construct a SubtaskStatus object."""
        self.task_id = task_id
        if attempted is not None:
            self.attempted = attempted
        else:
            self.attempted = succeeded + failed
        self.succeeded = succeeded
        self.failed = failed
        self.skipped = skipped
        self.retried_nomax = retried_nomax
        self.retried_withmax = retried_withmax
        self.state = state if state is not None else QUEUING

    @classmethod
168
    def from_dict(cls, d):
169 170 171 172 173 174 175
        """Construct a SubtaskStatus object from a dict representation."""
        options = dict(d)
        task_id = options['task_id']
        del options['task_id']
        return SubtaskStatus.create(task_id, **options)

    @classmethod
176
    def create(cls, task_id, **options):
177
        """Construct a SubtaskStatus object."""
178
        return cls(task_id, **options)
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214

    def to_dict(self):
        """
        Output a dict representation of a SubtaskStatus object.

        Use for creating a JSON-serializable representation for use by tasks.
        """
        return self.__dict__

    def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
        """
        Update the result of a subtask with additional results.

        Kwarg arguments are incremented to the existing values.
        The exception is for `state`, which if specified is used to override the existing value.
        """
        self.attempted += (succeeded + failed)
        self.succeeded += succeeded
        self.failed += failed
        self.skipped += skipped
        self.retried_nomax += retried_nomax
        self.retried_withmax += retried_withmax
        if state is not None:
            self.state = state

    def get_retry_count(self):
        """Returns the number of retries of any kind."""
        return self.retried_nomax + self.retried_withmax

    def __repr__(self):
        """Return print representation of a SubtaskStatus object."""
        return 'SubtaskStatus<%r>' % (self.to_dict(),)

    def __unicode__(self):
        """Return unicode version of a SubtaskStatus object representation."""
        return unicode(repr(self))
215 216


217
def initialize_subtask_info(entry, action_name, total_num, subtask_id_list):
218 219 220
    """
    Store initial subtask information to InstructorTask object.

221 222 223 224
    The InstructorTask's "task_output" field is initialized.  This is a JSON-serialized dict.
    Counters for 'attempted', 'succeeded', 'failed', 'skipped' keys are initialized to zero,
    as is the 'duration_ms' value.  A 'start_time' is stored for later duration calculations,
    and the total number of "things to do" is set, so the user can be told how much needs to be
225
    done overall.  The `action_name` is also stored, to help with constructing more readable
226
    task_progress messages.
227 228 229 230 231 232 233 234

    The InstructorTask's "subtasks" field is also initialized.  This is also a JSON-serialized dict.
    Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
    subtasks.  'Total' is set here to the total number, while the other three are initialized to zero.
    Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and
    the InstructorTask's "status" will be changed to SUCCESS.

    The "subtasks" field also contains a 'status' key, that contains a dict that stores status
235
    information for each subtask.  The value for each subtask (keyed by its task_id)
236
    is its subtask status, as defined by SubtaskStatus.to_dict().
237 238 239 240 241 242 243 244 245

    This information needs to be set up in the InstructorTask before any of the subtasks start
    running.  If not, there is a chance that the subtasks could complete before the parent task
    is done creating subtasks.  Doing so also simplifies the save() here, as it avoids the need
    for locking.

    Monitoring code should assume that if an InstructorTask has subtask information, that it should
    rely on the status stored in the InstructorTask object, rather than status stored in the
    corresponding AsyncResult.
246
    """
247
    task_progress = {
248 249 250 251 252 253 254 255 256
        'action_name': action_name,
        'attempted': 0,
        'failed': 0,
        'skipped': 0,
        'succeeded': 0,
        'total': total_num,
        'duration_ms': int(0),
        'start_time': time()
    }
257
    entry.task_output = InstructorTask.create_output_for_success(task_progress)
258 259 260 261
    entry.task_state = PROGRESS

    # Write out the subtasks information.
    num_subtasks = len(subtask_id_list)
262
    # Note that may not be necessary to store initial value with all those zeroes!
263 264
    # Write out as a dict, so it will go more smoothly into json.
    subtask_status = {subtask_id: (SubtaskStatus.create(subtask_id)).to_dict() for subtask_id in subtask_id_list}
265 266 267 268 269 270
    subtask_dict = {
        'total': num_subtasks,
        'succeeded': 0,
        'failed': 0,
        'status': subtask_status
    }
271 272 273 274
    entry.subtasks = json.dumps(subtask_dict)

    # and save the entry immediately, before any subtasks actually start work:
    entry.save_now()
275
    return task_progress
276 277


278 279 280 281 282 283 284 285 286 287
# pylint: disable=bad-continuation
def queue_subtasks_for_query(
    entry,
    action_name,
    create_subtask_fcn,
    item_querysets,
    item_fields,
    items_per_task,
    total_num_items,
):
288 289 290 291 292 293 294 295 296
    """
    Generates and queues subtasks to each execute a chunk of "items" generated by a queryset.

    Arguments:
        `entry` : the InstructorTask object for which subtasks are being queued.
        `action_name` : a past-tense verb that can be used for constructing readable status messages.
        `create_subtask_fcn` : a function of two arguments that constructs the desired kind of subtask object.
            Arguments are the list of items to be processed by this subtask, and a SubtaskStatus
            object reflecting initial status (and containing the subtask's id).
297
        `item_querysets` : a list of query sets that define the "items" that should be passed to subtasks.
298 299 300
        `item_fields` : the fields that should be included in the dict that is returned.
            These are in addition to the 'pk' field.
        `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
301
        `total_num_items` : total amount of items that will be put into subtasks
302 303 304 305 306 307 308

    Returns:  the task progress as stored in the InstructorTask object.

    """
    task_id = entry.task_id

    # Calculate the number of tasks that will be created, and create a list of ids for each task.
309
    total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_task)
310 311 312
    subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)]

    # Update the InstructorTask  with information about the subtasks we've defined.
313 314 315 316 317 318
    TASK_LOG.info(
        "Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.",
        task_id,
        entry.id,
        total_num_subtasks,
        total_num_items,
319
    )  # pylint: disable=no-member
320 321 322 323
    progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list)

    # Construct a generator that will return the recipients to use for each subtask.
    # Pass in the desired fields to fetch for each recipient.
324
    item_list_generator = _generate_items_for_subtask(
325
        item_querysets,
326 327
        item_fields,
        total_num_items,
328
        items_per_task,
329
        total_num_subtasks,
330
        entry.course_id,
331 332 333
    )

    # Now create the subtasks, and start them running.
334 335 336 337 338 339
    TASK_LOG.info(
        "Task %s: creating %s subtasks to process %s items.",
        task_id,
        total_num_subtasks,
        total_num_items,
    )
340
    num_subtasks = 0
341
    for item_list in item_list_generator:
342 343 344 345 346 347
        subtask_id = subtask_id_list[num_subtasks]
        num_subtasks += 1
        subtask_status = SubtaskStatus.create(subtask_id)
        new_subtask = create_subtask_fcn(item_list, subtask_status)
        new_subtask.apply_async()

348
    # Subtasks have been queued so no exceptions should be raised after this point.
349 350 351 352 353

    # Return the task progress as stored in the InstructorTask object.
    return progress


354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
def _acquire_subtask_lock(task_id):
    """
    Mark the specified task_id as being in progress.

    This is used to make sure that the same task is not worked on by more than one worker
    at the same time.  This can occur when tasks are requeued by Celery in response to
    loss of connection to the task broker.  Most of the time, such duplicate tasks are
    run sequentially, but they can overlap in processing as well.

    Returns true if the task_id was not already locked; false if it was.
    """
    # cache.add fails if the key already exists
    key = "subtask-{}".format(task_id)
    succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE)
    if not succeeded:
        TASK_LOG.warning("task_id '%s': already locked.  Contains value '%s'", task_id, cache.get(key))
    return succeeded


def _release_subtask_lock(task_id):
    """
    Unmark the specified task_id as being no longer in progress.

    This is most important to permit a task to be retried.
    """
    # According to Celery task cookbook, "Memcache delete is very slow, but we have
    # to use it to take advantage of using add() for atomic locking."
    key = "subtask-{}".format(task_id)
    cache.delete(key)


385
def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
386
    """
387 388 389
    Confirms that the current subtask is known to the InstructorTask and hasn't already been completed.

    Problems can occur when the parent task has been run twice, and results in duplicate
390
    subtasks being created for the same InstructorTask entry.  This maybe happens when Celery
391
    loses its connection to its broker, and any current tasks get requeued.
392

393 394 395
    If a parent task gets requeued, then the same InstructorTask may have a different set of
    subtasks defined (to do the same thing), so the subtasks from the first queuing would not
    be known to the InstructorTask.  We return an exception in this case.
396

397 398
    If a subtask gets requeued, then the first time the subtask runs it should run fine to completion.
    However, we want to prevent it from running again, so we check here to see what the existing
399 400 401
    subtask's status is.  If it is complete, we raise an exception.  We also take a lock on the task,
    so that we can detect if another worker has started work but has not yet completed that work.
    The other worker is allowed to finish, and this raises an exception.
402 403

    Raises a DuplicateTaskException exception if it's not a task that should be run.
404 405 406

    If this succeeds, it requires that update_subtask_status() is called to release the lock on the
    task.
407
    """
408
    # Confirm that the InstructorTask actually defines subtasks.
409 410
    entry = InstructorTask.objects.get(pk=entry_id)
    if len(entry.subtasks) == 0:
411 412
        format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
413
        TASK_LOG.warning(msg)
414
        dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id])
415
        raise DuplicateTaskException(msg)
416

417
    # Confirm that the InstructorTask knows about this particular subtask.
418 419 420
    subtask_dict = json.loads(entry.subtasks)
    subtask_status_info = subtask_dict['status']
    if current_task_id not in subtask_status_info:
421 422
        format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
423
        TASK_LOG.warning(msg)
424
        dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id])
425 426 427 428
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask doesn't think that this subtask has already been
    # performed successfully.
429 430
    subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
    subtask_state = subtask_status.state
431
    if subtask_state in READY_STATES:
432 433
        format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
434
        TASK_LOG.warning(msg)
435
        dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id])
436
        raise DuplicateTaskException(msg)
437

438 439 440 441 442
    # Confirm that the InstructorTask doesn't think that this subtask is already being
    # retried by another task.
    if subtask_state == RETRY:
        # Check to see if the input number of retries is less than the recorded number.
        # If so, then this is an earlier version of the task, and a duplicate.
443 444
        new_retry_count = new_subtask_status.get_retry_count()
        current_retry_count = subtask_status.get_retry_count()
445 446 447 448
        if new_retry_count < current_retry_count:
            format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}"
            msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
            TASK_LOG.warning(msg)
449
            dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id])
450 451
            raise DuplicateTaskException(msg)

452 453 454 455 456 457 458
    # Now we are ready to start working on this.  Try to lock it.
    # If it fails, then it means that another worker is already in the
    # middle of working on this.
    if not _acquire_subtask_lock(current_task_id):
        format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
        msg = format_str.format(current_task_id, entry)
        TASK_LOG.warning(msg)
459
        dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id])
460 461
        raise DuplicateTaskException(msg)

462

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Because select_for_update is used to lock the InstructorTask object while it is being updated,
    multiple subtasks updating at the same time may time out while waiting for the lock.
    The actual update operation is surrounded by a try/except/else that permits the update to be
    retried if the transaction times out.

    The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
    the attempting of retries has concluded.
    """
    try:
        _update_subtask_status(entry_id, current_task_id, new_subtask_status)
    except DatabaseError:
        # If we fail, try again recursively.
        retry_count += 1
        if retry_count < MAX_DATABASE_LOCK_RETRIES:
            TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s:  retry %d",
                          current_task_id, entry_id, new_subtask_status, retry_count)
            dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update')
            update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
        else:
            TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
                          retry_count, current_task_id, entry_id, new_subtask_status)
            dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries')
            raise
    finally:
        # Only release the lock on the subtask when we're done trying to update it.
        # Note that this will be called each time a recursive call to update_subtask_status()
        # returns.  Fortunately, it's okay to release a lock that has already been released.
        _release_subtask_lock(current_task_id)


497
@transaction.commit_manually
498
def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
499 500
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.
501 502 503 504 505 506

    Uses select_for_update to lock the InstructorTask object while it is being updated.
    The operation is surrounded by a try/except/else that permit the manual transaction to be
    committed on completion, or rolled back on error.

    The InstructorTask's "task_output" field is updated.  This is a JSON-serialized dict.
507
    Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status`
508
    into the corresponding values in the InstructorTask's task_output.  Also updates the 'duration_ms'
509 510 511
    value with the current interval since the original InstructorTask started.  Note that this
    value is only approximate, since the subtask may be running on a different server than the
    original task, so is subject to clock skew.
512 513 514 515 516 517 518 519 520 521

    The InstructorTask's "subtasks" field is also updated.  This is also a JSON-serialized dict.
    Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
    subtasks.  'Total' is expected to have been set at the time the subtasks were created.
    The other three counters are incremented depending on the value of `status`.  Once the counters
    for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's
    "status" is changed to SUCCESS.

    The "subtasks" field also contains a 'status' key, that contains a dict that stores status
    information for each subtask.  At the moment, the value for each subtask (keyed by its task_id)
522 523
    is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information
    about failure messages, progress made, etc.
524
    """
525
    TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s",
526
                  current_task_id, entry_id, new_subtask_status)
527 528 529 530

    try:
        entry = InstructorTask.objects.select_for_update().get(pk=entry_id)
        subtask_dict = json.loads(entry.subtasks)
531 532
        subtask_status_info = subtask_dict['status']
        if current_task_id not in subtask_status_info:
533
            # unexpected error -- raise an exception
534
            format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
535
            msg = format_str.format(current_task_id, entry_id)
536
            TASK_LOG.warning(msg)
537
            raise ValueError(msg)
538

539
        # Update status:
540
        subtask_status_info[current_task_id] = new_subtask_status.to_dict()
541

542
        # Update the parent task progress.
543 544 545
        # Set the estimate of duration, but only if it
        # increases.  Clock skew between time() returned by different machines
        # may result in non-monotonic values for duration.
546 547
        task_progress = json.loads(entry.task_output)
        start_time = task_progress['start_time']
548 549 550 551 552 553 554 555
        prev_duration = task_progress['duration_ms']
        new_duration = int((time() - start_time) * 1000)
        task_progress['duration_ms'] = max(prev_duration, new_duration)

        # Update counts only when subtask is done.
        # In future, we can make this more responsive by updating status
        # between retries, by comparing counts that change from previous
        # retry.
556
        new_state = new_subtask_status.state
557
        if new_subtask_status is not None and new_state in READY_STATES:
558
            for statname in ['attempted', 'succeeded', 'failed', 'skipped']:
559
                task_progress[statname] += getattr(new_subtask_status, statname)
560 561 562

        # Figure out if we're actually done (i.e. this is the last task to complete).
        # This is easier if we just maintain a counter, rather than scanning the
563 564
        # entire new_subtask_status dict.
        if new_state == SUCCESS:
565
            subtask_dict['succeeded'] += 1
566
        elif new_state in READY_STATES:
567 568
            subtask_dict['failed'] += 1
        num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed']
569 570 571 572 573

        # If we're done with the last task, update the parent status to indicate that.
        # At present, we mark the task as having succeeded.  In future, we should see
        # if there was a catastrophic failure that occurred, and figure out how to
        # report that here.
574 575 576 577 578
        if num_remaining <= 0:
            entry.task_state = SUCCESS
        entry.subtasks = json.dumps(subtask_dict)
        entry.task_output = InstructorTask.create_output_for_success(task_progress)

579
        TASK_LOG.debug("about to save....")
580
        entry.save()
581 582
        TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d",
                      entry.task_output, current_task_id, entry_id)
583 584
    except Exception:
        TASK_LOG.exception("Unexpected error while updating InstructorTask.")
585
        transaction.rollback()
586
        dog_stats_api.increment('instructor_task.subtask.update_exception')
587
        raise
588
    else:
589
        TASK_LOG.debug("about to commit....")
590
        transaction.commit()