Commit c58e6881 by Don Mitchell Committed by Calen Pennington

Cache modules in split bulk ops

parent 18822bdb
...@@ -240,7 +240,7 @@ class CombinedOpenEndedFields(object): ...@@ -240,7 +240,7 @@ class CombinedOpenEndedFields(object):
help=_("The number of times the student can try to answer this problem."), help=_("The number of times the student can try to answer this problem."),
default=1, default=1,
scope=Scope.settings, scope=Scope.settings,
values={"min": 1 } values={"min": 1}
) )
accept_file_upload = Boolean( accept_file_upload = Boolean(
display_name=_("Allow File Uploads"), display_name=_("Allow File Uploads"),
......
...@@ -76,6 +76,11 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin): ...@@ -76,6 +76,11 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
@contract(usage_key="BlockUsageLocator | BlockKey", course_entry_override="CourseEnvelope | None") @contract(usage_key="BlockUsageLocator | BlockKey", course_entry_override="CourseEnvelope | None")
def _load_item(self, usage_key, course_entry_override=None, **kwargs): def _load_item(self, usage_key, course_entry_override=None, **kwargs):
"""
Instantiate the xblock fetching it either from the cache or from the structure
:param course_entry_override: the course_info with the course_key to use (defaults to cached)
"""
# usage_key is either a UsageKey or just the block_key. if a usage_key, # usage_key is either a UsageKey or just the block_key. if a usage_key,
if isinstance(usage_key, BlockUsageLocator): if isinstance(usage_key, BlockUsageLocator):
...@@ -90,21 +95,25 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin): ...@@ -90,21 +95,25 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
raise ItemNotFoundError raise ItemNotFoundError
else: else:
block_key = BlockKey.from_usage_key(usage_key) block_key = BlockKey.from_usage_key(usage_key)
version_guid = self.course_entry.course_key.version_guid
else: else:
block_key = usage_key block_key = usage_key
course_info = course_entry_override or self.course_entry course_info = course_entry_override or self.course_entry
course_key = course_info.course_key course_key = course_info.course_key
version_guid = course_key.version_guid
if course_entry_override: # look in cache
structure_id = course_entry_override.structure.get('_id') cached_module = self.modulestore.get_cached_block(course_key, version_guid, block_key)
else: if cached_module:
structure_id = self.course_entry.structure.get('_id') return cached_module
json_data = self.get_module_data(block_key, course_key) json_data = self.get_module_data(block_key, course_key)
class_ = self.load_block_type(json_data.get('block_type')) class_ = self.load_block_type(json_data.get('block_type'))
return self.xblock_from_json(class_, course_key, block_key, json_data, course_entry_override, **kwargs) block = self.xblock_from_json(class_, course_key, block_key, json_data, course_entry_override, **kwargs)
self.modulestore.cache_block(course_key, version_guid, block_key, block)
return block
@contract(block_key=BlockKey, course_key=CourseLocator) @contract(block_key=BlockKey, course_key=CourseLocator)
def get_module_data(self, block_key, course_key): def get_module_data(self, block_key, course_key):
......
...@@ -117,6 +117,8 @@ class SplitBulkWriteRecord(BulkOpsRecord): ...@@ -117,6 +117,8 @@ class SplitBulkWriteRecord(BulkOpsRecord):
self.index = None self.index = None
self.structures = {} self.structures = {}
self.structures_in_db = set() self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {} self.definitions = {}
self.definitions_in_db = set() self.definitions_in_db = set()
...@@ -309,6 +311,38 @@ class SplitBulkWriteMixin(BulkOperationsMixin): ...@@ -309,6 +311,38 @@ class SplitBulkWriteMixin(BulkOperationsMixin):
else: else:
self.db_connection.insert_structure(structure) self.db_connection.insert_structure(structure)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid): def get_definition(self, course_key, definition_guid):
""" """
Retrieve a single definition by id, respecting the active bulk operation Retrieve a single definition by id, respecting the active bulk operation
...@@ -637,8 +671,9 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase): ...@@ -637,8 +671,9 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None") @contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, lazy=True, **kwargs): def _load_items(self, course_entry, block_keys, depth=0, lazy=True, **kwargs):
''' '''
Load & cache the given blocks from the course. Prefetch down to the Load & cache the given blocks from the course. May return the blocks in any order.
given depth. Load the definitions into each block if lazy is False;
Load the definitions into each block if lazy is False;
otherwise, use the lazy definition placeholder. otherwise, use the lazy definition placeholder.
''' '''
runtime = self._get_cache(course_entry.structure['_id']) runtime = self._get_cache(course_entry.structure['_id'])
...@@ -646,6 +681,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase): ...@@ -646,6 +681,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
runtime = self.create_runtime(course_entry, lazy) runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime) self._add_cache(course_entry.structure['_id'], runtime)
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy) self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys] return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid): def _get_cache(self, course_version_guid):
...@@ -1364,6 +1400,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase): ...@@ -1364,6 +1400,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's # if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction # part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id']) self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update # db update
self.update_structure(parent_usage_key.course_key, new_structure) self.update_structure(parent_usage_key.course_key, new_structure)
...@@ -1957,6 +1994,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase): ...@@ -1957,6 +1994,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
parent_block['edit_info']['edited_by'] = user_id parent_block['edit_info']['edited_by'] = user_id
parent_block['edit_info']['previous_version'] = parent_block['edit_info']['update_version'] parent_block['edit_info']['previous_version'] = parent_block['edit_info']['update_version']
parent_block['edit_info']['update_version'] = new_id parent_block['edit_info']['update_version'] = new_id
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks) self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
......
...@@ -1229,7 +1229,6 @@ class CombinedOpenEndedV1Descriptor(): ...@@ -1229,7 +1229,6 @@ class CombinedOpenEndedV1Descriptor():
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs): def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.''' '''Return an xml element representing this definition.'''
elt = etree.Element('combinedopenended') elt = etree.Element('combinedopenended')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment