subtasks.py 26.8 KB
Newer Older
1 2 3 4
"""
This module contains celery task functions for handling the management of subtasks.
"""
import json
5
import logging
6 7 8
from contextlib import contextmanager
from time import time
from uuid import uuid4
9

10 11
import psutil
from celery.states import READY_STATES, RETRY, SUCCESS
12
from django.core.cache import cache
13
from django.db import DatabaseError, transaction
14

15
import dogstats_wrapper as dog_stats_api
16
from util.db import outer_atomic
17

18
from .exceptions import DuplicateTaskException
19
from .models import PROGRESS, QUEUING, InstructorTask
20

21
TASK_LOG = logging.getLogger('edx.celery.task')
22

23
# Lock expiration should be long enough to allow a subtask to complete.
24
SUBTASK_LOCK_EXPIRE = 60 * 10  # Lock expires in 10 minutes
25 26 27
# Number of times to retry if a subtask update encounters a lock on the InstructorTask.
# (These are recursive retries, so don't make this number too large.)
MAX_DATABASE_LOCK_RETRIES = 5
28

29

30
def _get_number_of_subtasks(total_num_items, items_per_task):
31
    """
32
    Determines number of subtasks that would be generated by _generate_items_for_subtask.
33

34
    This needs to be calculated before the query is executed so that the list of all subtasks can be
35 36 37 38 39
    stored in the InstructorTask before any subtasks are started.

    The number of subtask_id values returned by this should match the number of chunks returned
    by the generate_items_for_subtask generator.
    """
40 41 42 43
    num_subtasks, remainder = divmod(total_num_items, items_per_task)
    if remainder:
        num_subtasks += 1
    return num_subtasks
44 45


46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
@contextmanager
def track_memory_usage(metric, course_id):
    """
    Context manager to track how much memory (in bytes) a given process uses.
    Metrics will look like: 'course_email.subtask_generation.memory.rss'
    or 'course_email.subtask_generation.memory.vms'.
    """
    memory_types = ['rss', 'vms']
    process = psutil.Process()
    baseline_memory_info = process.get_memory_info()
    baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types]
    yield
    for memory_type, baseline_usage in zip(memory_types, baseline_usages):
        total_memory_info = process.get_memory_info()
        total_usage = getattr(total_memory_info, memory_type)
        memory_used = total_usage - baseline_usage
        dog_stats_api.increment(
            metric + "." + memory_type,
            memory_used,
            tags=["course_id:{}".format(course_id)],
        )


def _generate_items_for_subtask(
Adam Palay committed
70
    item_querysets,  # pylint: disable=bad-continuation
71 72 73 74 75 76
    item_fields,
    total_num_items,
    items_per_task,
    total_num_subtasks,
    course_id,
):
77 78 79 80
    """
    Generates a chunk of "items" that should be passed into a subtask.

    Arguments:
81
        `item_querysets` : a list of query sets, each of which defines the "items" that should be passed to subtasks.
82 83
        `item_fields` : the fields that should be included in the dict that is returned.
            These are in addition to the 'pk' field.
84
        `total_num_items` : the result of summing the count of each queryset in `item_querysets`.
85 86
        `items_per_query` : size of chunks to break the query operation into.
        `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
87
        `course_id` : course_id of the course. Only needed for the track_memory_usage context manager.
88 89 90

    Returns:  yields a list of dicts, where each dict contains the fields in `item_fields`, plus the 'pk' field.

91
    Warning:  if the algorithm here changes, the _get_number_of_subtasks() method should similarly be changed.
92 93 94 95
    """
    num_items_queued = 0
    all_item_fields = list(item_fields)
    all_item_fields.append('pk')
96
    num_subtasks = 0
97

98 99
    items_for_task = []

100
    with track_memory_usage('course_email.subtask_generation.memory', course_id):
101 102 103 104 105 106 107 108
        for queryset in item_querysets:
            for item in queryset.values(*all_item_fields).iterator():
                if len(items_for_task) == items_per_task and num_subtasks < total_num_subtasks - 1:
                    yield items_for_task
                    num_items_queued += items_per_task
                    items_for_task = []
                    num_subtasks += 1
                items_for_task.append(item)
109 110 111 112 113

        # yield remainder items for task, if any
        if items_for_task:
            yield items_for_task
            num_items_queued += len(items_for_task)
114 115 116 117 118 119 120

    # Note, depending on what kind of DB is used, it's possible for the queryset
    # we iterate over to change in the course of the query. Therefore it's
    # possible that there are more (or fewer) items queued than were initially
    # calculated. It also means it's possible that the last task contains
    # more items than items_per_task allows. We expect this to be a small enough
    # number as to be negligible.
121
    if num_items_queued != total_num_items:
122
        TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items)
123 124


125
class SubtaskStatus(object):
126
    """
127 128
    Create and return a dict for tracking the status of a subtask.

129
    SubtaskStatus values are:
130 131 132 133 134 135 136 137 138 139 140

      'task_id' : id of subtask.  This is used to pass task information across retries.
      'attempted' : number of attempts -- should equal succeeded plus failed
      'succeeded' : number that succeeded in processing
      'skipped' : number that were not processed.
      'failed' : number that failed during processing
      'retried_nomax' : number of times the subtask has been retried for conditions that
          should not have a maximum count applied
      'retried_withmax' : number of times the subtask has been retried for conditions that
          should have a maximum count applied
      'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
141

142
    Object is not JSON-serializable, so to_dict and from_dict methods are provided so that
143
    it can be passed as a serializable argument to tasks (and be reconstituted within such tasks).
144

145
    In future, we may want to include specific error information
146
    indicating the reason for failure.
147
    Also, we should count up "not attempted" separately from attempted/failed.
148
    """
149

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
    def __init__(self, task_id, attempted=None, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
        """Construct a SubtaskStatus object."""
        self.task_id = task_id
        if attempted is not None:
            self.attempted = attempted
        else:
            self.attempted = succeeded + failed
        self.succeeded = succeeded
        self.failed = failed
        self.skipped = skipped
        self.retried_nomax = retried_nomax
        self.retried_withmax = retried_withmax
        self.state = state if state is not None else QUEUING

    @classmethod
165
    def from_dict(cls, d):
166 167 168 169 170 171 172
        """Construct a SubtaskStatus object from a dict representation."""
        options = dict(d)
        task_id = options['task_id']
        del options['task_id']
        return SubtaskStatus.create(task_id, **options)

    @classmethod
173
    def create(cls, task_id, **options):
174
        """Construct a SubtaskStatus object."""
175
        return cls(task_id, **options)
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211

    def to_dict(self):
        """
        Output a dict representation of a SubtaskStatus object.

        Use for creating a JSON-serializable representation for use by tasks.
        """
        return self.__dict__

    def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
        """
        Update the result of a subtask with additional results.

        Kwarg arguments are incremented to the existing values.
        The exception is for `state`, which if specified is used to override the existing value.
        """
        self.attempted += (succeeded + failed)
        self.succeeded += succeeded
        self.failed += failed
        self.skipped += skipped
        self.retried_nomax += retried_nomax
        self.retried_withmax += retried_withmax
        if state is not None:
            self.state = state

    def get_retry_count(self):
        """Returns the number of retries of any kind."""
        return self.retried_nomax + self.retried_withmax

    def __repr__(self):
        """Return print representation of a SubtaskStatus object."""
        return 'SubtaskStatus<%r>' % (self.to_dict(),)

    def __unicode__(self):
        """Return unicode version of a SubtaskStatus object representation."""
        return unicode(repr(self))
212 213


214
def initialize_subtask_info(entry, action_name, total_num, subtask_id_list):
215 216 217
    """
    Store initial subtask information to InstructorTask object.

218 219 220 221
    The InstructorTask's "task_output" field is initialized.  This is a JSON-serialized dict.
    Counters for 'attempted', 'succeeded', 'failed', 'skipped' keys are initialized to zero,
    as is the 'duration_ms' value.  A 'start_time' is stored for later duration calculations,
    and the total number of "things to do" is set, so the user can be told how much needs to be
222
    done overall.  The `action_name` is also stored, to help with constructing more readable
223
    task_progress messages.
224 225 226 227 228 229 230 231

    The InstructorTask's "subtasks" field is also initialized.  This is also a JSON-serialized dict.
    Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
    subtasks.  'Total' is set here to the total number, while the other three are initialized to zero.
    Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and
    the InstructorTask's "status" will be changed to SUCCESS.

    The "subtasks" field also contains a 'status' key, that contains a dict that stores status
232
    information for each subtask.  The value for each subtask (keyed by its task_id)
233
    is its subtask status, as defined by SubtaskStatus.to_dict().
234 235 236 237 238 239 240 241 242

    This information needs to be set up in the InstructorTask before any of the subtasks start
    running.  If not, there is a chance that the subtasks could complete before the parent task
    is done creating subtasks.  Doing so also simplifies the save() here, as it avoids the need
    for locking.

    Monitoring code should assume that if an InstructorTask has subtask information, that it should
    rely on the status stored in the InstructorTask object, rather than status stored in the
    corresponding AsyncResult.
243
    """
244
    task_progress = {
245 246 247 248 249 250 251 252 253
        'action_name': action_name,
        'attempted': 0,
        'failed': 0,
        'skipped': 0,
        'succeeded': 0,
        'total': total_num,
        'duration_ms': int(0),
        'start_time': time()
    }
254
    entry.task_output = InstructorTask.create_output_for_success(task_progress)
255 256 257 258
    entry.task_state = PROGRESS

    # Write out the subtasks information.
    num_subtasks = len(subtask_id_list)
259
    # Note that may not be necessary to store initial value with all those zeroes!
260 261
    # Write out as a dict, so it will go more smoothly into json.
    subtask_status = {subtask_id: (SubtaskStatus.create(subtask_id)).to_dict() for subtask_id in subtask_id_list}
262 263 264 265 266 267
    subtask_dict = {
        'total': num_subtasks,
        'succeeded': 0,
        'failed': 0,
        'status': subtask_status
    }
268 269 270 271
    entry.subtasks = json.dumps(subtask_dict)

    # and save the entry immediately, before any subtasks actually start work:
    entry.save_now()
272
    return task_progress
273 274


275 276 277 278 279 280 281 282 283 284
# pylint: disable=bad-continuation
def queue_subtasks_for_query(
    entry,
    action_name,
    create_subtask_fcn,
    item_querysets,
    item_fields,
    items_per_task,
    total_num_items,
):
285 286 287 288 289 290 291 292 293
    """
    Generates and queues subtasks to each execute a chunk of "items" generated by a queryset.

    Arguments:
        `entry` : the InstructorTask object for which subtasks are being queued.
        `action_name` : a past-tense verb that can be used for constructing readable status messages.
        `create_subtask_fcn` : a function of two arguments that constructs the desired kind of subtask object.
            Arguments are the list of items to be processed by this subtask, and a SubtaskStatus
            object reflecting initial status (and containing the subtask's id).
294
        `item_querysets` : a list of query sets that define the "items" that should be passed to subtasks.
295 296 297
        `item_fields` : the fields that should be included in the dict that is returned.
            These are in addition to the 'pk' field.
        `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
298
        `total_num_items` : total amount of items that will be put into subtasks
299 300 301 302 303 304 305

    Returns:  the task progress as stored in the InstructorTask object.

    """
    task_id = entry.task_id

    # Calculate the number of tasks that will be created, and create a list of ids for each task.
306
    total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_task)
307 308 309
    subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)]

    # Update the InstructorTask  with information about the subtasks we've defined.
310 311 312 313 314 315
    TASK_LOG.info(
        "Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.",
        task_id,
        entry.id,
        total_num_subtasks,
        total_num_items,
316
    )
317 318 319
    # Make sure this is committed to database before handing off subtasks to celery.
    with outer_atomic():
        progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list)
320 321 322

    # Construct a generator that will return the recipients to use for each subtask.
    # Pass in the desired fields to fetch for each recipient.
323
    item_list_generator = _generate_items_for_subtask(
324
        item_querysets,
325 326
        item_fields,
        total_num_items,
327
        items_per_task,
328
        total_num_subtasks,
329
        entry.course_id,
330 331 332
    )

    # Now create the subtasks, and start them running.
333 334 335 336 337 338
    TASK_LOG.info(
        "Task %s: creating %s subtasks to process %s items.",
        task_id,
        total_num_subtasks,
        total_num_items,
    )
339
    num_subtasks = 0
340
    for item_list in item_list_generator:
341 342 343 344 345 346
        subtask_id = subtask_id_list[num_subtasks]
        num_subtasks += 1
        subtask_status = SubtaskStatus.create(subtask_id)
        new_subtask = create_subtask_fcn(item_list, subtask_status)
        new_subtask.apply_async()

347
    # Subtasks have been queued so no exceptions should be raised after this point.
348 349 350 351 352

    # Return the task progress as stored in the InstructorTask object.
    return progress


353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
def _acquire_subtask_lock(task_id):
    """
    Mark the specified task_id as being in progress.

    This is used to make sure that the same task is not worked on by more than one worker
    at the same time.  This can occur when tasks are requeued by Celery in response to
    loss of connection to the task broker.  Most of the time, such duplicate tasks are
    run sequentially, but they can overlap in processing as well.

    Returns true if the task_id was not already locked; false if it was.
    """
    # cache.add fails if the key already exists
    key = "subtask-{}".format(task_id)
    succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE)
    if not succeeded:
        TASK_LOG.warning("task_id '%s': already locked.  Contains value '%s'", task_id, cache.get(key))
    return succeeded


def _release_subtask_lock(task_id):
    """
    Unmark the specified task_id as being no longer in progress.

    This is most important to permit a task to be retried.
    """
    # According to Celery task cookbook, "Memcache delete is very slow, but we have
    # to use it to take advantage of using add() for atomic locking."
    key = "subtask-{}".format(task_id)
    cache.delete(key)


384
def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
385
    """
386 387 388
    Confirms that the current subtask is known to the InstructorTask and hasn't already been completed.

    Problems can occur when the parent task has been run twice, and results in duplicate
389
    subtasks being created for the same InstructorTask entry.  This maybe happens when Celery
390
    loses its connection to its broker, and any current tasks get requeued.
391

392 393 394
    If a parent task gets requeued, then the same InstructorTask may have a different set of
    subtasks defined (to do the same thing), so the subtasks from the first queuing would not
    be known to the InstructorTask.  We return an exception in this case.
395

396 397
    If a subtask gets requeued, then the first time the subtask runs it should run fine to completion.
    However, we want to prevent it from running again, so we check here to see what the existing
398 399 400
    subtask's status is.  If it is complete, we raise an exception.  We also take a lock on the task,
    so that we can detect if another worker has started work but has not yet completed that work.
    The other worker is allowed to finish, and this raises an exception.
401 402

    Raises a DuplicateTaskException exception if it's not a task that should be run.
403 404 405

    If this succeeds, it requires that update_subtask_status() is called to release the lock on the
    task.
406
    """
407
    # Confirm that the InstructorTask actually defines subtasks.
408 409
    entry = InstructorTask.objects.get(pk=entry_id)
    if len(entry.subtasks) == 0:
410 411
        format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
412
        TASK_LOG.warning(msg)
413
        dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id])
414
        raise DuplicateTaskException(msg)
415

416
    # Confirm that the InstructorTask knows about this particular subtask.
417 418 419
    subtask_dict = json.loads(entry.subtasks)
    subtask_status_info = subtask_dict['status']
    if current_task_id not in subtask_status_info:
420 421
        format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
422
        TASK_LOG.warning(msg)
423
        dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id])
424 425 426 427
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask doesn't think that this subtask has already been
    # performed successfully.
428 429
    subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
    subtask_state = subtask_status.state
430
    if subtask_state in READY_STATES:
431 432
        format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
433
        TASK_LOG.warning(msg)
434
        dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id])
435
        raise DuplicateTaskException(msg)
436

437 438 439 440 441
    # Confirm that the InstructorTask doesn't think that this subtask is already being
    # retried by another task.
    if subtask_state == RETRY:
        # Check to see if the input number of retries is less than the recorded number.
        # If so, then this is an earlier version of the task, and a duplicate.
442 443
        new_retry_count = new_subtask_status.get_retry_count()
        current_retry_count = subtask_status.get_retry_count()
444 445 446 447
        if new_retry_count < current_retry_count:
            format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}"
            msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
            TASK_LOG.warning(msg)
448
            dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id])
449 450
            raise DuplicateTaskException(msg)

451 452 453 454 455 456 457
    # Now we are ready to start working on this.  Try to lock it.
    # If it fails, then it means that another worker is already in the
    # middle of working on this.
    if not _acquire_subtask_lock(current_task_id):
        format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
        msg = format_str.format(current_task_id, entry)
        TASK_LOG.warning(msg)
458
        dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id])
459 460
        raise DuplicateTaskException(msg)

461

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Because select_for_update is used to lock the InstructorTask object while it is being updated,
    multiple subtasks updating at the same time may time out while waiting for the lock.
    The actual update operation is surrounded by a try/except/else that permits the update to be
    retried if the transaction times out.

    The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
    the attempting of retries has concluded.
    """
    try:
        _update_subtask_status(entry_id, current_task_id, new_subtask_status)
    except DatabaseError:
        # If we fail, try again recursively.
        retry_count += 1
        if retry_count < MAX_DATABASE_LOCK_RETRIES:
            TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s:  retry %d",
                          current_task_id, entry_id, new_subtask_status, retry_count)
            dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update')
            update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
        else:
            TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
                          retry_count, current_task_id, entry_id, new_subtask_status)
            dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries')
            raise
    finally:
        # Only release the lock on the subtask when we're done trying to update it.
        # Note that this will be called each time a recursive call to update_subtask_status()
        # returns.  Fortunately, it's okay to release a lock that has already been released.
        _release_subtask_lock(current_task_id)


496
@transaction.atomic
497
def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
498 499
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.
500 501 502 503 504 505

    Uses select_for_update to lock the InstructorTask object while it is being updated.
    The operation is surrounded by a try/except/else that permit the manual transaction to be
    committed on completion, or rolled back on error.

    The InstructorTask's "task_output" field is updated.  This is a JSON-serialized dict.
506
    Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status`
507
    into the corresponding values in the InstructorTask's task_output.  Also updates the 'duration_ms'
508 509 510
    value with the current interval since the original InstructorTask started.  Note that this
    value is only approximate, since the subtask may be running on a different server than the
    original task, so is subject to clock skew.
511 512 513 514 515 516 517 518 519 520

    The InstructorTask's "subtasks" field is also updated.  This is also a JSON-serialized dict.
    Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
    subtasks.  'Total' is expected to have been set at the time the subtasks were created.
    The other three counters are incremented depending on the value of `status`.  Once the counters
    for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's
    "status" is changed to SUCCESS.

    The "subtasks" field also contains a 'status' key, that contains a dict that stores status
    information for each subtask.  At the moment, the value for each subtask (keyed by its task_id)
521 522
    is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information
    about failure messages, progress made, etc.
523
    """
524
    TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s",
525
                  current_task_id, entry_id, new_subtask_status)
526 527 528 529

    try:
        entry = InstructorTask.objects.select_for_update().get(pk=entry_id)
        subtask_dict = json.loads(entry.subtasks)
530 531
        subtask_status_info = subtask_dict['status']
        if current_task_id not in subtask_status_info:
532
            # unexpected error -- raise an exception
533
            format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
534
            msg = format_str.format(current_task_id, entry_id)
535
            TASK_LOG.warning(msg)
536
            raise ValueError(msg)
537

538
        # Update status:
539
        subtask_status_info[current_task_id] = new_subtask_status.to_dict()
540

541
        # Update the parent task progress.
542 543 544
        # Set the estimate of duration, but only if it
        # increases.  Clock skew between time() returned by different machines
        # may result in non-monotonic values for duration.
545 546
        task_progress = json.loads(entry.task_output)
        start_time = task_progress['start_time']
547 548 549 550 551 552 553 554
        prev_duration = task_progress['duration_ms']
        new_duration = int((time() - start_time) * 1000)
        task_progress['duration_ms'] = max(prev_duration, new_duration)

        # Update counts only when subtask is done.
        # In future, we can make this more responsive by updating status
        # between retries, by comparing counts that change from previous
        # retry.
555
        new_state = new_subtask_status.state
556
        if new_subtask_status is not None and new_state in READY_STATES:
557
            for statname in ['attempted', 'succeeded', 'failed', 'skipped']:
558
                task_progress[statname] += getattr(new_subtask_status, statname)
559 560 561

        # Figure out if we're actually done (i.e. this is the last task to complete).
        # This is easier if we just maintain a counter, rather than scanning the
562 563
        # entire new_subtask_status dict.
        if new_state == SUCCESS:
564
            subtask_dict['succeeded'] += 1
565
        elif new_state in READY_STATES:
566 567
            subtask_dict['failed'] += 1
        num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed']
568 569 570 571 572

        # If we're done with the last task, update the parent status to indicate that.
        # At present, we mark the task as having succeeded.  In future, we should see
        # if there was a catastrophic failure that occurred, and figure out how to
        # report that here.
573 574 575 576 577
        if num_remaining <= 0:
            entry.task_state = SUCCESS
        entry.subtasks = json.dumps(subtask_dict)
        entry.task_output = InstructorTask.create_output_for_success(task_progress)

578
        TASK_LOG.debug("about to save....")
579
        entry.save()
580 581
        TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d",
                      entry.task_output, current_task_id, entry_id)
582 583
    except Exception:
        TASK_LOG.exception("Unexpected error while updating InstructorTask.")
584
        dog_stats_api.increment('instructor_task.subtask.update_exception')
585
        raise