Commit 7b81dcc3 by Calen Pennington

Make split_mongo assert block identity uniqueness only over (block_type,…

Make split_mongo assert block identity uniqueness only over (block_type, block_id) tuples [LMS-11364]
parent df6dc219
......@@ -198,9 +198,7 @@ class TemplateTests(unittest.TestCase):
second_problem = persistent_factories.ItemFactory.create(
display_name='problem 2',
parent_location=BlockUsageLocator.make_relative(
test_course.location.version_agnostic(), block_type='problem', block_id=sub.location.block_id
),
parent_location=sub.location.version_agnostic(),
user_id='testbot', category='problem',
data="<problem></problem>"
)
......@@ -208,6 +206,7 @@ class TemplateTests(unittest.TestCase):
# The draft course root has 2 revisions: the published revision, and then the subsequent
# changes to the draft revision
version_history = self.split_store.get_block_generations(test_course.location)
self.assertIsNotNone(version_history)
self.assertEqual(version_history.locator.version_guid, test_course.location.version_guid)
self.assertEqual(len(version_history.children), 1)
self.assertEqual(version_history.children[0].children, [])
......
# Disable PyContract contract checking when running as a webserver
import contracts
contracts.disable_all()
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.envs.aws")
......
"""
General utilities
"""
import urllib
from collections import namedtuple
from contracts import contract, check
from opaque_keys.edx.locator import BlockUsageLocator
def encode_key_for_mongo(fieldname):
"""
Fieldnames in mongo cannot have periods nor dollar signs. So encode them.
:param fieldname: an atomic field name. Note, don't pass structured paths as it will flatten them
"""
for char in [".", "$"]:
fieldname = fieldname.replace(char, '%{:02x}'.format(ord(char)))
return fieldname
class BlockKey(namedtuple('BlockKey', 'type id')):
__slots__ = ()
@contract(type="string[>0]")
def __new__(cls, type, id):
return super(BlockKey, cls).__new__(cls, type, id)
@classmethod
@contract(usage_key=BlockUsageLocator)
def from_usage_key(cls, usage_key):
return cls(usage_key.block_type, usage_key.block_id)
def decode_key_from_mongo(fieldname):
"""
The inverse of encode_key_for_mongo
:param fieldname: with period and dollar escaped
"""
return urllib.unquote(fieldname)
import sys
import logging
from contracts import contract, new_contract
from xblock.runtime import KvsFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locator import BlockUsageLocator, LocalId, CourseLocator, DefinitionLocator
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import exc_info_to_str
from xmodule.modulestore.split_mongo import encode_key_for_mongo
from ..exceptions import ItemNotFoundError
from .split_mongo_kvs import SplitMongoKVS
from fs.osfs import OSFS
from .definition_lazy_loader import DefinitionLazyLoader
from xmodule.modulestore.edit_info import EditInfoRuntimeMixin
from xmodule.modulestore.split_mongo import BlockKey
log = logging.getLogger(__name__)
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
"""
......@@ -58,30 +62,31 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
# Compute inheritance
modulestore.inherit_settings(
course_entry['structure'].get('blocks', {}),
encode_key_for_mongo(course_entry['structure'].get('root')),
course_entry['structure'].get('root'),
course_entry.setdefault('inherited_settings', {}),
)
self.default_class = default_class
self.local_modules = {}
@contract(usage_key="BlockUsageLocator | BlockKey")
def _load_item(self, usage_key, course_entry_override=None, **kwargs):
# usage_key is either a UsageKey or just the block_id. if a usage_key,
# usage_key is either a UsageKey or just the block_key. if a usage_key,
if isinstance(usage_key, BlockUsageLocator):
# trust the passed in key to know the caller's expectations of which fields are filled in.
# particularly useful for strip_keys so may go away when we're version aware
course_key = usage_key.course_key
if isinstance(usage_key.block_id, LocalId):
try:
return self.local_modules[usage_key]
except KeyError:
raise ItemNotFoundError
else:
block_id = usage_key.block_id
block_key = BlockKey.from_usage_key(usage_key)
else:
block_id = usage_key
block_key = usage_key
if isinstance(usage_key, BlockUsageLocator):
# trust the passed in key to know the caller's expectations of which fields are filled in.
# particularly useful for strip_keys so may go away when we're version aware
course_key = usage_key.course_key
else:
course_info = course_entry_override or self.course_entry
course_key = CourseLocator(
version_guid=course_info['structure']['_id'],
......@@ -90,27 +95,29 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
run=course_info.get('run'),
branch=course_info.get('branch'),
)
json_data = self.get_module_data(block_id, course_key)
class_ = self.load_block_type(json_data.get('category'))
json_data = self.get_module_data(block_key, course_key)
class_ = self.load_block_type(json_data.get('block_type'))
# pass None for inherited_settings to signal that it should get the settings from cache
new_item = self.xblock_from_json(class_, course_key, block_id, json_data, None, course_entry_override, **kwargs)
new_item = self.xblock_from_json(class_, course_key, block_key, json_data, None, course_entry_override, **kwargs)
return new_item
def get_module_data(self, block_id, course_key):
@contract(block_key=BlockKey, course_key=CourseLocator)
def get_module_data(self, block_key, course_key):
"""
Get block from module_data adding it to module_data if it's not already there but is in the structure
Raises:
ItemNotFoundError if block is not in the structure
"""
json_data = self.module_data.get(block_id)
json_data = self.module_data.get(block_key)
if json_data is None:
# deeper than initial descendant fetch or doesn't exist
self.modulestore.cache_items(self, [block_id], course_key, lazy=self.lazy)
json_data = self.module_data.get(block_id)
self.modulestore.cache_items(self, [block_key], course_key, lazy=self.lazy)
json_data = self.module_data.get(block_key)
if json_data is None:
raise ItemNotFoundError(block_id)
raise ItemNotFoundError(block_key)
return json_data
......@@ -125,8 +132,9 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
# low; thus, the course_entry is most likely correct. If the thread is looking at > 1 named container
# pointing to the same structure, the access is likely to be chunky enough that the last known container
# is the intended one when not given a course_entry_override; thus, the caching of the last branch/course id.
@contract(block_key="BlockKey | None")
def xblock_from_json(
self, class_, course_key, block_id, json_data, inherited_settings, course_entry_override=None, **kwargs
self, class_, course_key, block_key, json_data, inherited_settings, course_entry_override=None, **kwargs
):
if course_entry_override is None:
course_entry_override = self.course_entry
......@@ -138,20 +146,23 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
self.course_entry['run'] = course_entry_override['run']
definition_id = json_data.get('definition')
block_type = json_data['category']
if block_id is not None:
# If no usage id is provided, generate an in-memory id
if block_key is None:
block_key = BlockKey(json_data['block_type'], LocalId())
else:
if inherited_settings is None:
# see if there's a value in course_entry
if (block_type, block_id) in self.course_entry['inherited_settings']:
inherited_settings = self.course_entry['inherited_settings'][(block_type, block_id)]
elif (block_type, block_id) not in self.course_entry['inherited_settings']:
self.course_entry['inherited_settings'][(block_type, block_id)] = inherited_settings
if block_key in self.course_entry['inherited_settings']:
inherited_settings = self.course_entry['inherited_settings'][block_key]
elif block_key not in self.course_entry['inherited_settings']:
self.course_entry['inherited_settings'][block_key] = inherited_settings
if definition_id is not None and not json_data.get('definition_loaded', False):
definition_loader = DefinitionLazyLoader(
self.modulestore, block_type, definition_id,
self.modulestore, block_key.type, definition_id,
lambda fields: self.modulestore.convert_references_to_keys(
course_key, self.load_block_type(block_type),
course_key, self.load_block_type(block_key.type),
fields, self.course_entry['structure']['blocks'],
)
)
......@@ -162,14 +173,10 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
if definition_id is None:
definition_id = LocalId()
# If no usage id is provided, generate an in-memory id
if block_id is None:
block_id = LocalId()
block_locator = BlockUsageLocator(
course_key,
block_type=block_type,
block_id=block_id,
block_type=block_key.type,
block_id=block_key.id,
)
converted_fields = self.modulestore.convert_references_to_keys(
......@@ -186,7 +193,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
try:
module = self.construct_xblock_from_class(
class_,
ScopeIds(None, block_type, definition_id, block_locator),
ScopeIds(None, block_key.type, definition_id, block_locator),
field_data,
)
except Exception:
......@@ -197,7 +204,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
BlockUsageLocator(
CourseLocator(version_guid=course_entry_override['structure']['_id']),
block_type='error',
block_id=block_id
block_id=block_key.id
),
error_msg=exc_info_to_str(sys.exc_info())
)
......@@ -208,7 +215,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
module.previous_version = edit_info.get('previous_version')
module.update_version = edit_info.get('update_version')
module.source_version = edit_info.get('source_version', None)
module.definition_locator = DefinitionLocator(block_type, definition_id)
module.definition_locator = DefinitionLocator(block_key.type, definition_id)
# decache any pending field settings
module.save()
......@@ -235,7 +242,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
if not hasattr(xblock, '_subtree_edited_by'):
json_data = self.module_data[xblock.location.block_id]
json_data = self.module_data[BlockKey.from_usage_key(xblock.location)]
if '_subtree_edited_by' not in json_data.setdefault('edit_info', {}):
self._compute_subtree_edited_internal(
xblock.location.block_id, json_data, xblock.location.course_key
......@@ -249,7 +256,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
if not hasattr(xblock, '_subtree_edited_on'):
json_data = self.module_data[xblock.location.block_id]
json_data = self.module_data[BlockKey.from_usage_key(xblock.location)]
if '_subtree_edited_on' not in json_data.setdefault('edit_info', {}):
self._compute_subtree_edited_internal(
xblock.location.block_id, json_data, xblock.location.course_key
......@@ -284,7 +291,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
max_by = json_data['edit_info']['edited_by']
for child in json_data.get('fields', {}).get('children', []):
child_data = self.get_module_data(child, course_key)
child_data = self.get_module_data(BlockKey(*child), course_key)
if '_subtree_edited_on' not in json_data.setdefault('edit_info', {}):
self._compute_subtree_edited_internal(child, child_data, course_key)
if child_data['edit_info']['_subtree_edited_on'] > max_date:
......
......@@ -28,6 +28,6 @@ class DefinitionLazyLoader(object):
def as_son(self):
return SON((
('category', self.definition_locator.block_type),
('block_type', self.definition_locator.block_type),
('definition', self.definition_locator.definition_id)
))
......@@ -4,7 +4,68 @@ Segregation of pymongo functions from the data modeling mechanisms for split mod
import re
import pymongo
from bson import son
from contracts import check
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore.split_mongo import BlockKey
def structure_from_mongo(structure):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
"""
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = block
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
check('BlockKey', structure['root'])
check('dict(BlockKey: dict)', structure['blocks'])
for block in structure['blocks'].itervalues():
if 'children' in block['fields']:
check('list(BlockKey)', block['fields']['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in structure['blocks'].iteritems():
new_block = dict(block)
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
def definition_from_mongo(definition):
"""
Converts 'fields.children' from a list [[block_type, block_id]] to [BlockKey]
"""
new_def = dict(definition)
class MongoConnection(object):
"""
......@@ -55,7 +116,7 @@ class MongoConnection(object):
"""
Get the structure from the persistence mechanism whose id is the given key
"""
return self.structures.find_one({'_id': key})
return structure_from_mongo(self.structures.find_one({'_id': key}))
def find_structures_by_id(self, ids):
"""
......@@ -64,7 +125,7 @@ class MongoConnection(object):
Arguments:
ids (list): A list of structure ids
"""
return self.structures.find({'_id': {'$in': ids}})
return [structure_from_mongo(structure) for structure in self.structures.find({'_id': {'$in': ids}})]
def find_structures_derived_from(self, ids):
"""
......@@ -73,26 +134,32 @@ class MongoConnection(object):
Arguments:
ids (list): A list of structure ids
"""
return self.structures.find({'previous_version': {'$in': ids}})
return [structure_from_mongo(structure) for structure in self.structures.find({'previous_version': {'$in': ids}})]
def find_ancestor_structures(self, original_version, block_id):
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_id``.
Find all structures that originated from ``original_version`` that contain ``block_key``.
Arguments:
original_version (str or ObjectID): The id of a structure
block_id (str): The id of the block in question
block_key (BlockKey): The id of the block in question
"""
return self.structures.find({
return [structure_from_mongo(structure) for structure in self.structures.find({
'original_version': original_version,
'blocks.{}.edit_info.update_version'.format(block_id): {'$exists': True}
})
'blocks': {
'$elemMatch': {
'block_id': block_key.id,
'block_type': block_key.type,
'edit_info.update_version': {'$exists': True},
}
}
})]
def upsert_structure(self, structure):
"""
Update the db record for structure, creating that record if it doesn't already exist
"""
self.structures.update({'_id': structure['_id']}, structure, upsert=True)
self.structures.update({'_id': structure['_id']}, structure_to_mongo(structure), upsert=True)
def get_course_index(self, key, ignore_case=False):
"""
......
......@@ -14,7 +14,7 @@ Representation:
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': root_block_id (string of key in 'blocks' for the root of this structure,
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
......@@ -22,10 +22,11 @@ Representation:
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** block_id: dictionary of block settings and children:
**** 'category': the xblock type id
*** BlockKey: dictionary of block settings and children:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'edit_info': dictionary:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
......@@ -40,7 +41,7 @@ Representation:
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'category': xblock type id
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
......@@ -49,12 +50,13 @@ Representation:
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import threading
import datetime
import logging
from contracts import contract, new_contract
from importlib import import_module
from path import path
import copy
from pytz import UTC
from bson.objectid import ObjectId
......@@ -73,9 +75,8 @@ from xmodule.modulestore import (
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, BlockKey
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.split_mongo import encode_key_for_mongo, decode_key_from_mongo
from _collections import defaultdict
from types import NoneType
......@@ -104,6 +105,10 @@ log = logging.getLogger(__name__)
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
......@@ -397,15 +402,15 @@ class SplitBulkWriteMixin(BulkOperationsMixin):
)
return structures
def find_ancestor_structures(self, original_version, block_id):
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_id``.
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_id (str): The id of the block in question
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
......@@ -418,10 +423,10 @@ class SplitBulkWriteMixin(BulkOperationsMixin):
if structure['original_version'] != original_version:
continue
if block_id not in structure.get('blocks', {}):
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_id].get('edit_info', {}):
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
......@@ -429,7 +434,7 @@ class SplitBulkWriteMixin(BulkOperationsMixin):
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_id)
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
......@@ -507,7 +512,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of block_ids to fetch
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to fetch definitions or use placeholders
......@@ -534,7 +539,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
for block in new_module_data.itervalues():
if block['definition'] in definitions:
converted_fields = self.convert_references_to_keys(
course_key, system.load_block_type(block['category']),
course_key, system.load_block_type(block['block_type']),
definitions[block['definition']].get('fields'),
system.course_entry['structure']['blocks'],
)
......@@ -544,7 +549,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
system.module_data.update(new_module_data)
return system.module_data
def _load_items(self, course_entry, block_ids, depth=0, lazy=True, **kwargs):
def _load_items(self, course_entry, block_keys, depth=0, lazy=True, **kwargs):
'''
Load & cache the given blocks from the course. Prefetch down to the
given depth. Load the definitions into each block if lazy is False;
......@@ -561,8 +566,8 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
run=course_entry.get('run'),
branch=course_entry.get('branch'),
)
self.cache_items(runtime, block_ids, course_key, depth, lazy)
return [runtime.load_item(block_id, course_entry, **kwargs) for block_id in block_ids]
self.cache_items(runtime, block_keys, course_key, depth, lazy)
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
......@@ -747,7 +752,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, usage_key.block_id) is not None
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
def get_item(self, usage_key, depth=0, **kwargs):
"""
......@@ -763,7 +768,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
raise ItemNotFoundError(usage_key)
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [usage_key.block_id], depth, lazy=True, **kwargs)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, lazy=True, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
......@@ -814,12 +819,17 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_id = qualifiers.pop('name')
block = course['structure']['blocks'].get(block_id)
if _block_matches_all(block):
return self._load_items(course, [block_id], lazy=True, **kwargs)
else:
return []
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in course['structure']['blocks'].iteritems():
if block_name == block_id.id and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, lazy=True, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
......@@ -841,13 +851,13 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
:param locator: BlockUsageLocator restricting search scope
'''
course = self._lookup_course(locator.course_key)
parent_id = self._get_parent_from_structure(locator.block_id, course['structure'])
parent_id = self._get_parent_from_structure(BlockKey.from_usage_key(locator), course['structure'])
if parent_id is None:
return None
return BlockUsageLocator.make_relative(
locator,
block_type=course['structure']['blocks'][parent_id].get('category'),
block_id=decode_key_from_mongo(parent_id),
block_type=parent_id.type,
block_id=parent_id.id,
)
def get_orphans(self, course_key, **kwargs):
......@@ -856,17 +866,15 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = {decode_key_from_mongo(block_id) for block_id in course['structure']['blocks'].keys()}
items = set(course['structure']['blocks'].keys())
items.remove(course['structure']['root'])
blocks = course['structure']['blocks']
for block_id, block_data in blocks.iteritems():
items.difference_update(block_data.get('fields', {}).get('children', []))
if block_data['category'] in detached_categories:
items.discard(decode_key_from_mongo(block_id))
items.difference_update(BlockKey(*child) for child in block_data.get('fields', {}).get('children', []))
if block_data['block_type'] in detached_categories:
items.discard(block_id)
return [
BlockUsageLocator(
course_key=course_key, block_type=blocks[block_id]['category'], block_id=block_id
)
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
......@@ -967,16 +975,16 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
'''
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic())['structure']
block_id = block_locator.block_id
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_id=block_id
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_id)
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload['edit_info']['update_version']:
if block_payload['edit_info'].get('previous_version') is None:
# this was when this block was created
......@@ -988,7 +996,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_id)['edit_info']['update_version']
element_to_find = self._get_block_from_structure(course_struct, block_key)['edit_info']['update_version']
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
......@@ -1026,7 +1034,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
new_id = ObjectId()
document = {
'_id': new_id,
"category": category,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
......@@ -1061,7 +1069,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['category'], new_def_data)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
# new id to create new version
old_definition['_id'] = ObjectId()
......@@ -1072,11 +1080,11 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
old_definition['edit_info']['previous_version'] = definition_locator.definition_id
old_definition['schema_version'] = self.SCHEMA_VERSION
self.db_connection.insert_definition(old_definition)
return DefinitionLocator(old_definition['category'], old_definition['_id']), True
return DefinitionLocator(old_definition['block_type'], old_definition['_id']), True
else:
return definition_locator, False
def _generate_block_id(self, course_blocks, category):
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
......@@ -1087,11 +1095,12 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
# NOTE2: this assumes category will never contain a $ nor a period.
serial = 1
while category + str(serial) in course_blocks:
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
return category + str(serial)
def create_item(
self, user_id, course_key, block_type, block_id=None,
......@@ -1169,17 +1178,16 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# generate usage id
if block_id is not None:
if encode_key_for_mongo(block_id) in new_structure['blocks']:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
new_block_id = block_id
else:
new_block_id = self._generate_block_id(new_structure['blocks'], block_type)
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, new_block_id, self._new_block(
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
......@@ -1198,13 +1206,13 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=new_block_id,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=new_block_id,
block_id=block_key.id,
)
# reconstruct the new_item from the cache
......@@ -1235,12 +1243,12 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
new_structure = self._lookup_course(xblock.location.course_key)['structure']
# add new block as child and update parent's version
encoded_block_id = encode_key_for_mongo(parent_usage_key.block_id)
if encoded_block_id not in new_structure['blocks']:
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][encoded_block_id]
parent['fields'].setdefault('children', []).append(xblock.location.block_id)
parent = new_structure['blocks'][block_id]
parent['fields'].setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
if parent['edit_info']['update_version'] != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
......@@ -1344,7 +1352,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
definition_id = ObjectId()
definition_entry = {
'_id': definition_id,
'category': root_category,
'block_type': root_category,
'fields': definition_fields,
'edit_info': {
'edited_by': user_id,
......@@ -1358,8 +1366,10 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
draft_structure = self._new_structure(
user_id,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_BLOCK_ID,
root_category,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_BLOCK_ID,
),
block_fields,
definition_id
)
......@@ -1376,8 +1386,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
draft_structure = self._lookup_course(draft_version)['structure']
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
encoded_block_id = encode_key_for_mongo(draft_structure['root'])
root_block = draft_structure['blocks'][encoded_block_id]
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block['fields'].update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
......@@ -1440,45 +1449,45 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, descriptor.location.block_type, descriptor.location.block_id,
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(
self, user_id, course_key, block_type, block_id, partitioned_fields,
self, user_id, course_key, block_key, partitioned_fields,
definition_locator, allow_not_found, force, **kwargs
):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_id, (LocalId, NoneType)):
if allow_not_found and isinstance(block_key.id, (LocalId, NoneType)):
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_type, fields=fields, force=force
user_id, course_key, block_key.type, fields=fields, force=force
)
original_structure = self._lookup_course(course_key)['structure']
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_id)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_type, block_id=block_id, fields=fields, force=force,
user_id, course_key, block_key.type, block_id=block_key.id, fields=fields, force=force,
)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_type, block_id))
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry['category'], original_entry['definition'])
definition_locator = DefinitionLocator(original_entry['block_type'], original_entry['definition'])
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
definition_locator, definition_fields, user_id
......@@ -1486,13 +1495,13 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_type, settings)
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry['fields'])
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [child.block_id for child in partitioned_fields[Scope.children]['children']]
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry['fields'].get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
......@@ -1500,7 +1509,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# if updated, rev the structure
if is_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data["definition"] = definition_locator.definition_id
block_data["fields"] = settings
......@@ -1524,7 +1533,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
course_key = CourseLocator(version_guid=new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_type, block_id)
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
......@@ -1550,7 +1559,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
xblock_class = runtime.load_block_type(block_type)
json_data = {
'category': block_type,
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
......@@ -1566,7 +1575,12 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class, course_key, block_id, json_data, inherited_settings, **kwargs
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
json_data,
inherited_settings,
**kwargs
)
for field_name, value in (fields or {}).iteritems():
setattr(new_block, field_name, value)
......@@ -1634,13 +1648,14 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_id = self._generate_block_id(structure_blocks, xblock.category)
encoded_block_id = encode_key_for_mongo(block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_id)
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id) # pylint: disable=protected-access
else:
is_new = False
encoded_block_id = encode_key_for_mongo(xblock.location.block_id)
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
......@@ -1648,15 +1663,15 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(child_block, user_id, structure_blocks, new_id) or is_updated
children.append(child_block.location.block_id)
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(child.block_id)
is_updated = is_updated or structure_blocks[encoded_block_id]['fields']['children'] != children
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key]['fields']['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[encoded_block_id]['fields'])
is_updated = self._compare_settings(block_fields, structure_blocks[block_key]['fields'])
if children:
block_fields['children'] = children
......@@ -1671,12 +1686,12 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
raw=True
)
else:
block_info = structure_blocks[encoded_block_id]
block_info = structure_blocks[block_key]
block_info['fields'] = block_fields
block_info['definition'] = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[encoded_block_id] = block_info
structure_blocks[block_key] = block_info
return is_updated
......@@ -1739,13 +1754,13 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_id = source_structure['root']
if not any(root_block_id == subtree.block_id for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_id))
root_source = source_structure['blocks'][root_block_id]
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_id, root_category=root_source['category'],
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source['definition'],
)
......@@ -1754,14 +1769,14 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [shunned.block_id for shunned in blacklist or []]
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if subtree_root.block_id != source_structure['root']:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parent = self._get_parent_from_structure(subtree_root.block_id, source_structure)
parent = self._get_parent_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
if parent is not None: # may be a detached category xblock
if not parent in destination_blocks:
raise ItemNotFoundError(parent)
......@@ -1769,14 +1784,17 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
subtree_root.block_id
BlockKey.from_usage_key(subtree_root)
)
)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
subtree_root.block_id, source_structure['blocks'], destination_blocks, blacklist
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
......@@ -1809,24 +1827,25 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key)['structure']
if original_structure['root'] == usage_locator.block_id:
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if encode_key_for_mongo(usage_locator.block_id) not in original_structure['blocks']:
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete a block that does not exist")
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
encoded_block_id = self._get_parent_from_structure(usage_locator.block_id, original_structure)
if encoded_block_id:
parent_block = new_blocks[encoded_block_id]
parent_block['fields']['children'].remove(usage_locator.block_id)
parent_block_key = self._get_parent_from_structure(block_key, original_structure)
if parent_block_key:
parent_block = new_blocks[parent_block_key]
parent_block['fields']['children'].remove(block_key)
parent_block['edit_info']['edited_on'] = datetime.datetime.now(UTC)
parent_block['edit_info']['edited_by'] = user_id
parent_block['edit_info']['previous_version'] = parent_block['edit_info']['update_version']
parent_block['edit_info']['update_version'] = new_id
self._remove_subtree(usage_locator.block_id, new_blocks)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
......@@ -1840,14 +1859,14 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
return result
def _remove_subtree(self, block_id, blocks):
@contract(block_key=BlockKey, blocks='dict(BlockKey: dict)')
def _remove_subtree(self, block_key, blocks):
"""
Remove the subtree rooted at block_id
Remove the subtree rooted at block_key
"""
encoded_block_id = encode_key_for_mongo(block_id)
for child in blocks[encoded_block_id]['fields'].get('children', []):
self._remove_subtree(child, blocks)
del blocks[encoded_block_id]
for child in blocks[block_key]['fields'].get('children', []):
self._remove_subtree(BlockKey(*child), blocks)
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
......@@ -1868,25 +1887,30 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
def inherit_settings(self, block_map, block_id, inherited_settings_map, inheriting_settings=None):
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_json with any inheritable setting set by an ancestor and recurses to children.
"""
encoded_key = encode_key_for_mongo(block_id)
if encoded_key not in block_map:
if block_key not in block_map:
return
block_json = block_map[encoded_key]
block_json = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault((block_json['category'], block_id), {}).update(inheriting_settings)
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[(block_json['category'], block_id)].copy()
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_json['fields']
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
......@@ -1894,8 +1918,15 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
for child in block_fields.get('children', []):
try:
child = encode_key_for_mongo(child)
self.inherit_settings(block_map, child, inherited_settings_map, inheriting_settings)
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
......@@ -1909,12 +1940,11 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
encoded_block_id = encode_key_for_mongo(block_id)
if encoded_block_id not in block_map:
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[encoded_block_id]
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
......@@ -1945,7 +1975,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if 'fields' in block and 'children' in block['fields']:
block['fields']["children"] = [
block_id for block_id in block['fields']["children"]
if encode_key_for_mongo(block_id) in original_structure['blocks']
if block_id in original_structure['blocks']
]
self.update_structure(course_locator, original_structure)
......@@ -1955,9 +1985,10 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
def robust_usage_key(block_id):
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_id. If the block_id is in blocks,
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
......@@ -1965,14 +1996,17 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_id, BlockUsageLocator):
return block_id.map_into_course(course_key)
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
blocks[encode_key_for_mongo(block_id)]['category'], block_id
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_id)
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
......@@ -1988,7 +2022,6 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in value.iteritems():
assert isinstance(subvalue, basestring)
value[key] = robust_usage_key(subvalue)
return output_fields
......@@ -2081,14 +2114,14 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
for field_name, value in fields.iteritems():
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = value.block_id
fields[field_name] = BlockKey.from_usage_key(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
ele.block_id for ele in value
BlockKey.from_usage_key(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = subvalue.block_id
value[key] = BlockKey.from_usage_key(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
......@@ -2097,33 +2130,31 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if 'location' in fields:
log.warn('attempt to persist location')
del fields['location']
if 'category' in fields:
if 'block_type' in fields:
log.warn('attempt to persist category')
del fields['category']
del fields['block_type']
return fields
def _new_structure(self, user_id, root_block_id,
root_category=None, block_fields=None, definition_id=None):
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_category is not None:
encoded_root = encode_key_for_mongo(root_block_id)
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
encoded_root: self._new_block(
user_id, root_category, block_fields, definition_id, new_id
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
......@@ -2132,15 +2163,15 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
'schema_version': self.SCHEMA_VERSION,
}
def _get_parent_from_structure(self, block_id, structure):
@contract(block_key=BlockKey)
def _get_parent_from_structure(self, block_key, structure):
"""
Given a structure, find block_id's parent in that structure. Note returns
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
for parent_id, value in structure['blocks'].iteritems():
for child_id in value['fields'].get('children', []):
if block_id == child_id:
return parent_id
for parent_block_key, value in structure['blocks'].iteritems():
if block_key in value['fields'].get('children', []):
return parent_block_key
return None
def _sync_children(self, source_parent, destination_parent, new_child):
......@@ -2149,31 +2180,31 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = destination_parent['fields']['children']
destination_children = set(destination_parent['fields']['children'])
source_children = source_parent['fields']['children']
orphans = set()
for child in destination_children:
try:
source_children.index(child)
except ValueError:
orphans.add(child)
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent['fields']['children'] = destination_reordered
return orphans
def _copy_subdag(self, user_id, destination_version, block_id, source_blocks, destination_blocks, blacklist):
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_id to be like the one in
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
encoded_block_id = encode_key_for_mongo(block_id)
destination_block = destination_blocks.get(encoded_block_id)
new_block = source_blocks[encoded_block_id]
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
......@@ -2186,7 +2217,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(child)
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
......@@ -2202,7 +2233,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
destination_block['edit_info']['edited_on'] = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block['category'],
user_id, new_block['block_type'],
self._filter_blacklist(copy.copy(new_block['fields']), blacklist),
new_block['definition'],
destination_version,
......@@ -2217,12 +2248,13 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, child, source_blocks, destination_blocks, blacklist
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[encoded_block_id] = destination_block
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
......@@ -2231,18 +2263,18 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if child not in blacklist]
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if self._get_parent_from_structure(orphan, structure) is None:
encoded_block_id = encode_key_for_mongo(orphan)
for child in structure['blocks'][encoded_block_id]['fields'].get('children', []):
self._delete_if_true_orphan(child, structure)
del structure['blocks'][encoded_block_id]
for child in structure['blocks'][orphan]['fields'].get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
del structure['blocks'][orphan]
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False):
"""
......@@ -2256,7 +2288,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
if not raw:
block_fields = self._serialize_fields(category, block_fields)
return {
'category': category,
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'edit_info': {
......@@ -2267,19 +2299,21 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
}
}
def _get_block_from_structure(self, structure, block_id):
@contract(block_key=BlockKey)
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block id before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(encode_key_for_mongo(block_id))
return structure['blocks'].get(block_key)
def _update_block_in_structure(self, structure, block_id, content):
@contract(block_key=BlockKey)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block id before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][encode_key_for_mongo(block_id)] = content
structure['blocks'][block_key] = content
def find_courses_by_search_target(self, field_name, field_value):
"""
......
......@@ -10,6 +10,7 @@ from xmodule.modulestore.draft_and_published import (
ModuleStoreDraftAndPublished, DIRECT_ONLY_CATEGORIES, UnsupportedRevisionError
)
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.split_mongo import BlockKey
class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleStore):
......@@ -241,15 +242,15 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
def get_course(branch_name):
return self._lookup_course(xblock.location.course_key.for_branch(branch_name))['structure']
def get_block(course_structure, block_id):
return self._get_block_from_structure(course_structure, block_id)
def get_block(course_structure, block_key):
return self._get_block_from_structure(course_structure, block_key)
draft_course = get_course(ModuleStoreEnum.BranchName.draft)
published_course = get_course(ModuleStoreEnum.BranchName.published)
def has_changes_subtree(block_id):
draft_block = get_block(draft_course, block_id)
published_block = get_block(published_course, block_id)
def has_changes_subtree(block_key):
draft_block = get_block(draft_course, block_key)
published_block = get_block(published_course, block_key)
if not published_block:
return True
......@@ -265,7 +266,7 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
return False
return has_changes_subtree(xblock.location.block_id)
return has_changes_subtree(BlockKey.from_usage_key(xblock.location))
def publish(self, location, user_id, blacklist=None, **kwargs):
"""
......@@ -316,7 +317,10 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
published_course_structure = self._lookup_course(
location.course_key.for_branch(ModuleStoreEnum.BranchName.published)
)['structure']
published_block = self._get_block_from_structure(published_course_structure, location.block_id)
published_block = self._get_block_from_structure(
published_course_structure,
BlockKey.from_usage_key(location)
)
if published_block is None:
raise InvalidVersionError(location)
......@@ -325,7 +329,7 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
new_structure = self.version_structure(draft_course_key, draft_course_structure, user_id)
# remove the block and its descendants from the new structure
self._remove_subtree(location.block_id, new_structure['blocks'])
self._remove_subtree(BlockKey.from_usage_key(location), new_structure['blocks'])
# copy over the block and its descendants from the published branch
def copy_from_published(root_block_id):
......@@ -341,7 +345,7 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
for child_block_id in block.setdefault('fields', {}).get('children', []):
copy_from_published(child_block_id)
copy_from_published(location.block_id)
copy_from_published(BlockKey.from_usage_key(location))
# update course structure and index
self.update_structure(draft_course_key, new_structure)
......@@ -389,7 +393,7 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
def _get_head(self, xblock, branch):
course_structure = self._lookup_course(xblock.location.course_key.for_branch(branch))['structure']
return self._get_block_from_structure(course_structure, xblock.location.block_id)
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(xblock.location))
def _get_version(self, block):
"""
......@@ -429,7 +433,7 @@ class DraftVersioningModuleStore(ModuleStoreDraftAndPublished, SplitMongoModuleS
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
course_key = self._map_revision_to_branch(course_key) # cast to branch_setting
return self._update_item_from_fields(
user_id, course_key, block_type, block_id, partitioned_fields, None, allow_not_found=True, force=True
user_id, course_key, BlockKey(block_type, block_id), partitioned_fields, None, allow_not_found=True, force=True
) or self.get_item(new_usage_key)
def compute_published_info_internal(self, xblock):
......
......@@ -6,6 +6,7 @@ import random
import re
import unittest
import uuid
from contracts import contract
from importlib import import_module
from path import path
......@@ -22,6 +23,7 @@ from xmodule.x_module import XModuleMixin
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.edit_info import EditInfoMixin
......@@ -129,7 +131,7 @@ class SplitModuleTest(unittest.TestCase):
"revisions": [{
"user_id": "testassist@edx.org",
"update": {
"head12345": {
("course", "head12345"): {
"end": _date_field.from_json("2013-04-13T04:30"),
"tabs": [
{
......@@ -198,7 +200,7 @@ class SplitModuleTest(unittest.TestCase):
},
{"user_id": "testassist@edx.org",
"update":
{"head12345": {
{("course", "head12345"): {
"end": _date_field.from_json("2013-06-13T04:30"),
"grading_policy": {
"GRADER": [
......@@ -243,6 +245,7 @@ class SplitModuleTest(unittest.TestCase):
{
"id": "chapter1",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hercules"
......@@ -251,6 +254,7 @@ class SplitModuleTest(unittest.TestCase):
{
"id": "chapter2",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hera heckles Hercules"
......@@ -259,6 +263,7 @@ class SplitModuleTest(unittest.TestCase):
{
"id": "chapter3",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hera cuckolds Zeus"
......@@ -267,6 +272,7 @@ class SplitModuleTest(unittest.TestCase):
{
"id": "problem1",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.1",
......@@ -276,6 +282,7 @@ class SplitModuleTest(unittest.TestCase):
{
"id": "problem3_2",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.2"
......@@ -350,7 +357,7 @@ class SplitModuleTest(unittest.TestCase):
"revisions": [{
"user_id": "test@edx.org",
"update": {
"head23456": {
("course", "head23456"): {
"display_name": "The most wonderful course",
"grading_policy": {
"GRADER": [
......@@ -466,13 +473,13 @@ class SplitModuleTest(unittest.TestCase):
root_block_id=course_spec['root_block_id']
)
for revision in course_spec.get('revisions', []):
for block_id, fields in revision.get('update', {}).iteritems():
for (block_type, block_id), fields in revision.get('update', {}).iteritems():
# cheat since course is most frequent
if course.location.block_id == block_id:
block = course
else:
# not easy to figure out the category but get_item won't care
block_usage = BlockUsageLocator.make_relative(course.location, '', block_id)
block_usage = BlockUsageLocator.make_relative(course.location, block_type, block_id)
block = split_store.get_item(block_usage)
for key, value in fields.iteritems():
setattr(block, key, value)
......@@ -484,7 +491,7 @@ class SplitModuleTest(unittest.TestCase):
elif spec['parent'] == course.location.block_id:
parent = course
else:
block_usage = BlockUsageLocator.make_relative(course.location, '', spec['parent'])
block_usage = BlockUsageLocator.make_relative(course.location, spec['parent_type'], spec['parent'])
parent = split_store.get_item(block_usage)
block_id = LocalId(spec['id'])
child = split_store.create_xblock(
......@@ -680,10 +687,10 @@ class SplitModuleCourseTests(SplitModuleTest):
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
block_map = modulestore().cache_items(
course.system, [child.block_id for child in course.children], course.id, depth=3
course.system, [BlockKey.from_usage_key(child) for child in course.children], course.id, depth=3
)
self.assertIn('chapter1', block_map)
self.assertIn('problem3_2', block_map)
self.assertIn(BlockKey('chapter', 'chapter1'), block_map)
self.assertIn(BlockKey('problem', 'problem3_2'), block_map)
def test_course_successors(self):
"""
......@@ -903,10 +910,6 @@ class SplitModuleItemTests(SplitModuleTest):
)
self.assertEqual(len(matches), 2)
matches = modulestore().get_items(locator, qualifiers={'children': 'chapter2'})
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].location.block_id, 'head12345')
def test_get_parents(self):
'''
get_parent_location(locator): BlockUsageLocator
......@@ -920,8 +923,9 @@ class SplitModuleItemTests(SplitModuleTest):
self.assertEqual(parent.block_id, 'head12345')
self.assertEqual(parent.org, "testx")
self.assertEqual(parent.course, "GreekHero")
locator = locator.course_key.make_usage_key('Chapter', 'chapter2')
locator = locator.course_key.make_usage_key('chapter', 'chapter2')
parent = modulestore().get_parent_location(locator)
self.assertIsNotNone(parent)
self.assertEqual(parent.block_id, 'head12345')
locator = locator.course_key.make_usage_key('garbage', 'nosuchblock')
parent = modulestore().get_parent_location(locator)
......@@ -1243,7 +1247,7 @@ class TestItemCrud(SplitModuleTest):
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(version_agnostic(updated_problem.children), version_agnostic(block.children))
self.assertNotIn(moved_child, version_agnostic(updated_problem.children))
locator = locator.course_key.make_usage_key('Chapter', "chapter1")
locator = locator.course_key.make_usage_key('chapter', "chapter1")
other_block = modulestore().get_item(locator)
other_block.children.append(moved_child)
other_updated = modulestore().update_item(other_block, self.user_id)
......@@ -1524,9 +1528,9 @@ class TestCourseCreation(SplitModuleTest):
new_course.location.as_object_id(new_course.location.version_guid)
)
self.assertIsNotNone(db_structure, "Didn't find course")
self.assertNotIn('course', db_structure['blocks'])
self.assertIn('top', db_structure['blocks'])
self.assertEqual(db_structure['blocks']['top']['category'], 'chapter')
self.assertNotIn(BlockKey('course', 'course'), db_structure['blocks'])
self.assertIn(BlockKey('chapter', 'top'), db_structure['blocks'])
self.assertEqual(db_structure['blocks'][BlockKey('chapter', 'top')]['block_type'], 'chapter')
def test_create_id_dupe(self):
"""
......@@ -1636,23 +1640,27 @@ class TestPublish(SplitModuleTest):
chapter2 = source_course.make_usage_key('chapter', 'chapter2')
chapter3 = source_course.make_usage_key('chapter', 'chapter3')
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2, chapter3])
expected = [head.block_id, chapter1.block_id]
self._check_course(
source_course, dest_course, expected, [chapter2.block_id, chapter3.block_id, "problem1", "problem3_2"]
)
expected = [BlockKey.from_usage_key(head), BlockKey.from_usage_key(chapter1)]
unexpected = [
BlockKey.from_usage_key(chapter2),
BlockKey.from_usage_key(chapter3),
BlockKey("problem", "problem1"),
BlockKey("problem", "problem3_2")
]
self._check_course(source_course, dest_course, expected, unexpected)
# add a child under chapter1
new_module = modulestore().create_child(
self.user_id, chapter1, "sequential",
fields={'display_name': 'new sequential'},
)
# remove chapter1 from expected b/c its pub'd version != the source anymore since source changed
expected.remove(chapter1.block_id)
expected.remove(BlockKey.from_usage_key(chapter1))
# check that it's not in published course
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(new_module.location.map_into_course(dest_course))
# publish it
modulestore().copy(self.user_id, source_course, dest_course, [new_module.location], None)
expected.append(new_module.location.block_id)
expected.append(BlockKey.from_usage_key(new_module.location))
# check that it is in the published course and that its parent is the chapter
pub_module = modulestore().get_item(new_module.location.map_into_course(dest_course))
self.assertEqual(
......@@ -1664,12 +1672,10 @@ class TestPublish(SplitModuleTest):
)
# publish it
modulestore().copy(self.user_id, source_course, dest_course, [new_module.location], None)
expected.append(new_module.location.block_id)
expected.append(BlockKey.from_usage_key(new_module.location))
# check that it is in the published course (no error means it worked)
pub_module = modulestore().get_item(new_module.location.map_into_course(dest_course))
self._check_course(
source_course, dest_course, expected, [chapter2.block_id, chapter3.block_id, "problem1", "problem3_2"]
)
self._check_course(source_course, dest_course, expected, unexpected)
def test_exceptions(self):
"""
......@@ -1702,8 +1708,14 @@ class TestPublish(SplitModuleTest):
chapter2 = source_course.make_usage_key('chapter', 'chapter2')
problem1 = source_course.make_usage_key('problem', 'problem1')
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2])
expected = ["head12345", "chapter1", "chapter3", "problem1", "problem3_2"]
self._check_course(source_course, dest_course, expected, ["chapter2"])
expected = [
BlockKey("course", "head12345"),
BlockKey("chapter", "chapter1"),
BlockKey("chapter", "chapter3"),
BlockKey("problem", "problem1"),
BlockKey("problem", "problem3_2"),
]
self._check_course(source_course, dest_course, expected, [BlockKey("chapter", "chapter2")])
# now move problem1 and delete problem3_2
chapter1 = modulestore().get_item(source_course.make_usage_key("chapter", "chapter1"))
chapter3 = modulestore().get_item(source_course.make_usage_key("chapter", "chapter3"))
......@@ -1711,9 +1723,15 @@ class TestPublish(SplitModuleTest):
chapter3.children.remove(problem1.map_into_course(chapter3.location.course_key))
modulestore().delete_item(source_course.make_usage_key("problem", "problem3_2"), self.user_id)
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2])
expected = ["head12345", "chapter1", "chapter3", "problem1"]
self._check_course(source_course, dest_course, expected, ["chapter2", "problem3_2"])
expected = [
BlockKey("course", "head12345"),
BlockKey("chapter", "chapter1"),
BlockKey("chapter", "chapter3"),
BlockKey("problem", "problem1")
]
self._check_course(source_course, dest_course, expected, [BlockKey("chapter", "chapter2"), BlockKey("problem", "problem3_2")])
@contract(expected_blocks="list(BlockKey)", unexpected_blocks="list(BlockKey)")
def _check_course(self, source_course_loc, dest_course_loc, expected_blocks, unexpected_blocks):
"""
Check that the course has the expected blocks and does not have the unexpected blocks
......@@ -1721,9 +1739,8 @@ class TestPublish(SplitModuleTest):
history_info = modulestore().get_course_history_info(dest_course_loc)
self.assertEqual(history_info['edited_by'], self.user_id)
for expected in expected_blocks:
# since block_type has no impact on identity, we can just provide an empty string
source = modulestore().get_item(source_course_loc.make_usage_key("", expected))
pub_copy = modulestore().get_item(dest_course_loc.make_usage_key("", expected))
source = modulestore().get_item(source_course_loc.make_usage_key(expected.type, expected.id))
pub_copy = modulestore().get_item(dest_course_loc.make_usage_key(expected.type, expected.id))
# everything except previous_version & children should be the same
self.assertEqual(source.category, pub_copy.category)
self.assertEqual(
......@@ -1743,21 +1760,28 @@ class TestPublish(SplitModuleTest):
self.assertEqual(field.read_from(source), field.read_from(pub_copy))
for unexp in unexpected_blocks:
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(dest_course_loc.make_usage_key("", unexp))
modulestore().get_item(dest_course_loc.make_usage_key(unexp.type, unexp.id))
@contract(
source_children="list(BlockUsageLocator)",
dest_children="list(BlockUsageLocator)",
unexpected="list(BlockKey)"
)
def _compare_children(self, source_children, dest_children, unexpected):
"""
Ensure dest_children == source_children minus unexpected
"""
dest_cursor = 0
for child in source_children:
child = child.version_agnostic()
if child.block_id in unexpected:
self.assertNotIn(child.block_id, [dest.block_id for dest in dest_children])
else:
self.assertEqual(child.block_id, dest_children[dest_cursor].block_id)
dest_cursor += 1
self.assertEqual(dest_cursor, len(dest_children))
source_block_keys = [
src_key
for src_key
in (BlockKey.from_usage_key(src) for src in source_children)
if src_key not in unexpected
]
dest_block_keys = [BlockKey.from_usage_key(dest) for dest in dest_children]
for unexp in unexpected:
self.assertNotIn(unexp, dest_block_keys)
self.assertEqual(source_block_keys, dest_block_keys)
class TestSchema(SplitModuleTest):
......
......@@ -187,7 +187,7 @@ class BulkAssertionManager(object):
def assertEqual(self, expected, actual, description=None):
if description is None:
description = "{!r} does not equal {!r}".format(expected, actual)
description = u"{!r} does not equal {!r}".format(expected, actual)
if expected != actual:
self._equal_expected.append((description, expected))
self._equal_actual.append((description, actual))
......@@ -291,8 +291,11 @@ class CourseComparisonTest(BulkAssertionTest):
with self.bulk_assertions():
self.assertEqual(len(expected_items), len(actual_items))
def map_key(usage_key):
return (usage_key.block_type, usage_key.block_id)
actual_item_map = {
item.location.block_id: item
map_key(item.location): item
for item in actual_items
}
......@@ -302,12 +305,12 @@ class CourseComparisonTest(BulkAssertionTest):
# modulestore actual's come from here; so, assume old mongo and if that fails, assume split
if expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name=actual_item_location.run)
actual_item = actual_item_map.get(actual_item_location.block_id)
actual_item = actual_item_map.get(map_key(actual_item_location))
# must be split
if actual_item is None and expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name='course')
actual_item = actual_item_map.get(actual_item_location.block_id)
self.assertIsNotNone(actual_item, u'cannot find {} in {}'.format(actual_item_location, actual_item_map))
actual_item = actual_item_map.get(map_key(actual_item_location))
self.assertIsNotNone(actual_item, u'cannot find {} in {}'.format(map_key(actual_item_location), actual_item_map))
# compare fields
self.assertEqual(expected_item.fields, actual_item.fields)
......
<html url_name="Duplicate_URL_Name">
This is test html.
</html>
\ No newline at end of file
</html>
<vertical display_name="Duplicate URL Name">
<html url_name="Duplicate_URL_Name"/>
</vertical>
\ No newline at end of file
</vertical>
# Disable PyContract contract checking when running as a webserver
import contracts
contracts.disable_all()
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lms.envs.aws")
......
......@@ -127,6 +127,7 @@ rednose==0.3
selenium==2.42.1
splinter==0.5.4
testtools==0.9.34
PyContracts==1.6.4
# Used for Segment.io analytics
analytics-python==0.4.4
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment