Commit 237b02b1 by Kevin Falcone

Merge pull request #2587 from edx/max/idempotent-mongo_3_0

Max/idempotent mongo 3 0
parents 2aec6db3 e650a267
# Manages a mongo cluster.
# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG
# as used by mongo_replica_set in the mongo_3_0 role.
#
# If you are initializing a cluster, your command might look like:
# ansible-playbook mongo_3_0.yml -i 10.1.1.1,10.2.2.2,10.3.3.3 -e@/path/to/edx.yml -e@/path/to/ed.yml
# If you just want to deploy an updated replica set config, you can run
# ansible-playbook mongo_3_0.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set
#
# ADDING A NEW CLUSTER MEMBER
# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory
# ansible-playbook mongo_3_0.yml -i 10.1.1.1,10.2.2.2,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml
- name: Deploy MongoDB
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 3
serial: "{{ serial_count }}"
roles:
- aws
- mongo_3_0
......
#!/usr/bin/env python
DOCUMENTATION = """
---
module: mongodb_replica_set
short_description: Modify replica set config.
description:
- Modify replica set config, including modifying/adding/removing members from a replica set
changing replica set options, and initiating the replica set if necessary.
Uses replSetReconfig and replSetInitiate.
version_added: "1.9"
author:
- Max Rothman
- Feanil Patel
options:
rs_host:
description:
- The hostname or ip of a server already in the mongo cluster.
required: false
default: 'localhost'
rs_port:
description:
- The port to connect to mongo on.
required: false
default: 27017
username:
description:
- The username of the mongo user to connect as.
required: false
password:
description:
- The password to use when authenticating.
required: false
auth_database:
description:
- The database to authenticate against.
requred: false
force:
description: Whether to pass the "force" option to replSetReconfig.
For more details, see `<https://docs.mongodb.org/manual/reference/command/replSetReconfig/>`
required: false
default: false
rs_config:
description: A `replica set configuration document <https://docs.mongodb.org/manual/reference/replica-configuration/>`.
This structure can be a valid document, but this module can also manage some details for you:
- members can have separate ``host`` and ``port`` properties. ``port`` defaults to 27017.
To override this, provide a ``host`` like ``somehost:27017``.
- ``_id`` is automatically managed if not provided
- members' ``_id`` are automatically managed
- ``version`` is automatically incremented
required: true
"""
EXAMPLES = '''
- name: Basic example
mongodb_replica_set:
username: root
password: password
rs_config:
members:
- host: some.host
- host: other.host
port: 27018
hidden: true
- name: Fully specify a whole document
mongodb_replica_set:
username: admin
password: password
rs_config:
_id: myReplicaSetName
version: 5
members:
- _id: 1
host: some.host:27017
- _id: 2
host: other.host:27017
hidden: true
'''
# Magic import
from ansible.module_utils.basic import *
try:
from pymongo import MongoClient
from pymongo.errors import OperationFailure
from bson import json_util
except ImportError:
pymongo_found = False
else:
pymongo_found = True
import json, copy
from urllib import quote_plus
########### Mongo API calls ###########
def get_replset():
# Not using `replSetGetConfig` because it's not supported in MongoDB 2.x.
try:
rs_config = client.local.system.replset.find_one()
except OperationFailure as e:
return None
return rs_config
def initialize_replset(rs_config):
try:
client.admin.command("replSetInitiate", rs_config)
except OperationFailure as e:
module.fail_json(msg="Failed to initiate replSet: {}".format(e.message))
def reconfig_replset(rs_config):
try:
client.admin.command("replSetReconfig", rs_config, force=module.params['force'])
except OperationFailure as e:
module.fail_json(msg="Failed to reconfigure replSet: {}".format(e.message))
def get_rs_config_id():
try:
return client.admin.command('getCmdLineOpts')['parsed']['replication']['replSetName']
except (OperationFailure, KeyError) as e:
module.fail_json(msg=("Unable to get replSet name. "
"Was mongod started with --replSet, "
"or was replication.replSetName set in the config file? Error: ") + e.message)
########### Helper functions ###########
def set_member_ids(members, old_members=None):
'''
Set the _id property of members who don't already have one.
Prefer the _id of the "matching" member from `old_members`.
'''
#Add a little padding to ensure we don't run out of IDs
available_ids = set(range(len(members)*2))
available_ids -= {m['_id'] for m in members if '_id' in m}
if old_members is not None:
available_ids -= {m['_id'] for m in old_members}
available_ids = list(sorted(available_ids, reverse=True))
for member in members:
if '_id' not in member:
if old_members is not None:
match = get_matching_member(member, old_members)
member['_id'] = match['_id'] if match is not None else available_ids.pop()
else:
member['_id'] = available_ids.pop()
def get_matching_member(member, members):
'''Return the rs_member from `members` that "matches" `member` (currently on host)'''
match = [m for m in members if m['host'] == member['host']]
return match[0] if len(match) > 0 else None
def members_match(new, old):
"Compare 2 lists of members, discounting their `_id`s and matching on hostname"
if len(new) != len(old):
return False
for old_member in old:
new_member = get_matching_member(old_member, new).copy()
#Don't compare on _id
new_member.pop('_id', None)
old_member = old_member.copy()
old_member.pop('_id', None)
if old_member != new_member:
return False
return True
def fix_host_port(rs_config):
'''Fix host, port to host:port'''
if 'members' in rs_config:
if not isinstance(rs_config['members'], list):
module.fail_json(msg='rs_config.members must be a list')
for member in rs_config['members']:
if ':' not in member['host']:
member['host'] = '{}:{}'.format(member['host'], member.get('port', 27017))
if 'port' in member:
del member['port']
def check_config_subset(old_config, new_config):
'''
Compares the old config (what we pass in to Mongo) to the new config (returned from Mongo)
It is assumed that old_config will be a subset of new_config because Mongo tracks many more
details about the replica set and the members in a replica set that we don't track in our
secure repo.
'''
for k in old_config:
if k == 'members':
matches = is_member_subset(old_config['members'],new_config['members'])
if not matches: return False
else:
if old_config[k] != new_config[k]: return False
return True
def is_member_subset(old_members,new_members):
'''
Compares the member list of a replica set configuration as specified (old_members)
to what Mongo has returned (new_members). If it finds anything in old_members that
does not match new_members, it will return False. new_members is allowed to contain
extra information that is not reflected in old_members because we do not necesarily
track all of mongo's internal data in the config.
'''
for member in old_members:
for k in member:
if member[k] != new_members[member['_id']][k]: return False
return True
def update_replset(rs_config):
changed = False
old_rs_config = get_replset()
fix_host_port(rs_config) #fix host, port to host:port
#Decide whether we need to initialize
if old_rs_config is None:
changed = True
if '_id' not in rs_config:
rs_config['_id'] = get_rs_config_id() #Errors if no replSet specified to mongod
set_member_ids(rs_config['members']) #Noop if all _ids are set
#Don't set the version, it'll auto-set
initialize_replset(rs_config)
else:
old_rs_config_scalars = {k:v for k,v in old_rs_config.items() if not isinstance(v, (list, dict))}
rs_config_scalars = {k:v for k,v in rs_config.items() if not isinstance(v, (list, dict))}
if '_id' not in rs_config_scalars and '_id' in old_rs_config_scalars:
# _id is going to be managed, don't compare on it
del old_rs_config_scalars['_id']
if 'version' not in rs_config and 'version' in old_rs_config_scalars:
# version is going to be managed, don't compare on it
del old_rs_config_scalars['version']
# Special comparison to test whether 2 rs_configs are "equivalent"
# We can't simply use == because of special logic in `members_match()`
# 1. Compare the scalars (i.e. non-collections)
# 2. Compare the "settings" dict
# 3. Compare the members dicts using `members_match()`
# Since the only nested structures in the rs_config spec are "members" and "settings",
# if all of the above 3 match, the structures are equivalent.
if rs_config_scalars != old_rs_config_scalars \
or rs_config.get('settings') != old_rs_config.get('settings') \
or not members_match(rs_config['members'], old_rs_config['members']):
changed=True
if '_id' not in rs_config:
rs_config['_id'] = old_rs_config['_id']
if 'version' not in rs_config:
#Using manual increment to prevent race condition
rs_config['version'] = old_rs_config['version'] + 1
set_member_ids(rs_config['members'], old_rs_config['members']) #Noop if all _ids are set
reconfig_replset(rs_config)
#Validate it worked
if changed:
changed_rs_config = get_replset()
if not check_config_subset(rs_config, changed_rs_config):
module.fail_json(msg="Failed to validate that the replica set was changed", new_config=changed_rs_config, config=rs_config)
module.exit_json(changed=changed, config=rs_config, new_config=changed_rs_config)
######### Client making stuff #########
def get_mongo_uri(host, port, username, password, auth_database):
mongo_uri = 'mongodb://'
if username and password:
mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password]))
mongo_uri += "{}:{}".format(quote_plus(host), port)
if auth_database:
mongo_uri += "/{}".format(quote_plus(auth_database))
return mongo_uri
def primary_client(some_host, some_port, username, password, auth_database):
'''
Given a member of a replica set, find out who the primary is
and provide a client that is connected to the primary for running
commands.
Because this function attempts to find the primary of your replica set,
it can fail and throw PyMongo exceptions. You should handle these and
fall back to get_client.
'''
client = get_client(some_host, some_port, username, password, auth_database)
# This can fail (throws OperationFailure), in which case code will need to
# fall back to using get_client since there either is no primary, or we can't
# know it for some reason.
status = client.admin.command("replSetGetStatus")
# Find out who the primary is.
rs_primary = filter(lambda member: member['stateStr']=='PRIMARY', status['members'])[0]
primary_host, primary_port = rs_primary['name'].split(':')
# Connect to the primary if this is not the primary.
if primary_host != some_host or primary_port != some_port:
client.close()
new_uri = get_mongo_uri(primary_host, primary_port, username, password, auth_database)
client = MongoClient(new_uri)
return client
def get_client(some_host, some_port, username, password, auth_database):
'''
Connects to the given host. Does not have any of the logic of primary_client,
so is safer to use when handling an uninitialized replica set or some other
mongo instance that requires special logic.
This function connects to Mongo, and as such can throw any of the PyMongo
exceptions.
'''
mongo_uri = get_mongo_uri(some_host, some_port, username, password, auth_database)
client = MongoClient(mongo_uri)
return client
################ Main ################
def validate_args():
arg_spec = dict(
username = dict(required=False, type='str'),
password = dict(required=False, type='str'),
auth_database = dict(required=False, type='str'),
rs_host = dict(required=False, type='str', default="localhost"),
rs_port = dict(required=False, type='int', default=27017),
rs_config = dict(required=True, type='dict'),
force = dict(required=False, type='bool', default=False),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False)
username = module.params.get('username')
password = module.params.get('password')
if (username and not password) or (password and not username):
module.fail_json(msg="Must provide both username and password or neither.")
return module
if __name__ == '__main__':
module = validate_args()
if not pymongo_found:
module.fail_json(msg="The python pymongo module is not installed.")
username = module.params.get('username')
password = module.params.get('password')
auth_database = module.params.get('auth_database')
rs_host = module.params['rs_host']
rs_port = module.params['rs_port']
try:
client = primary_client(rs_host, rs_port, username, password, auth_database)
except OperationFailure:
client = get_client(rs_host, rs_port, username, password, auth_database)
update_replset(module.params['rs_config'])
#!/usr/bin/env python
DOCUMENTATION = """
---
module: mongo_rs_member
short_description: Modify replica set config for a member.
description:
- Modify/Add/Remove members from a replica set. Member management as done by rs.reconfig().
version_added: "1.9"
author: Feanil Patel
options:
rs_host:
description:
- The hostname or ip of a server already in the mongo cluster.
required: false
default: 'localhost'
rs_port:
description:
- The port to connect to mongo on.
required: false
default: 27017
host:
description:
- The hostname of the member we want to modify.
required: false
default: 'localhost'
port:
description:
- The port of the member we want to modify.
required: false
default: 27017
username:
description:
- The username of the mongo user to connect as.
required: false
password:
description:
- The password to use when authenticating.
required: false
auth_database:
description:
- The database to authenticate against.
requred: false
priority:
description:
- The priority of the member in the replica set. Ignored if
`hidden` is set to true.
required: false
hidden:
description:
- Whether or not the member is hidden.
required: false
state:
description:
- Whether or not the member exists in the replica set. The member
will be added or removed to reach this state.
choices:
- present
- absent
default: present
required: false
"""
EXAMPLES = '''
- name: Get status for the stage cluster
mongo_rs_member:
rs_host: some.mongo
rs_port: 27017
host: localhost
port: 27017
username: root
password: password
register: mongo_config
'''
# Magic import
from ansible.module_utils.basic import *
try:
from pymongo import MongoClient
from pymongo.errors import OperationFailure
from bson import json_util
except ImportError:
pymongo_found = False
else:
pymongo_found = True
import json
from urllib import quote_plus
def get_mongo_uri(host, port, username, password, auth_database):
mongo_uri = 'mongodb://'
if username and password:
mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password]))
mongo_uri += "{}:{}".format(quote_plus(host),port)
if auth_database:
mongo_uri += "/{}".format(quote_plus(auth_database))
return mongo_uri
def get_replset(module, client):
# Get the current config using `replSetGetConfig`
rs_config = client.admin.command("replSetGetConfig")
if 'config' not in rs_config:
module.fail_json(msg="Failed to get replset config from {}".format(primary_host), response=rs_config)
rs_config = rs_config['config']
return rs_config
def reconfig_replset(module, client, rs_config):
# Update the config version
try:
client.admin.command("replSetReconfig", rs_config)
except OperationFailure as e:
raise
module.fail_json(msg="Failed to reconfigure replSet: {}".format(e.message))
def primary_client(module, some_host, some_port, username, password, auth_database):
"""
Given a member of a replica set, find out who the primary is
and provide a client that is connected to the primary for running
commands.
"""
mongo_uri = get_mongo_uri(some_host, some_port, username, password, auth_database)
client = MongoClient(mongo_uri)
try:
status = client.admin.command("replSetGetStatus")
except OperationFailure as e:
module.fail_json(msg="Failed to get replica set status from host({}): {}".format(some_host, e.message))
# Find out who the primary is.
rs_primary = filter(lambda member: member['stateStr']=='PRIMARY', status['members'])[0]
primary_host, primary_port = rs_primary['name'].split(':')
# Connect to the primary if this is not the primary.
if primary_host != some_host or primary_port != some_port:
client.close()
new_uri = get_mongo_uri(primary_host, primary_port, username, password, auth_database)
client = MongoClient(new_uri)
return client
def remove_member(module, client, rs_config):
host = module.params.get('host')
port = module.params.get('port')
existing_member_names = [ member['host'] for member in rs_config['members'] ]
dead_member_name = "{}:{}".format(host,port)
if dead_member_name in existing_member_names:
# Member is in config and needs to be removed.
new_member_list = filter(lambda member: member['host'] != dead_member_name, rs_config['members'])
rs_config['members'] = new_member_list
rs_config['version'] += 1
reconfig_replset(module, client, rs_config)
# Get status again.
status = client.admin.command("replSetGetConfig")['config']
# Validate that your instance is in there.
existing_member_names = [ member['host'] for member in rs_config['members'] ]
if dead_member_name not in existing_member_names:
module.exit_json(changed=True, config=rs_config)
else:
module.fail_json(msg="Failed to remove member from the replica set.", config=rs_config)
else:
# Member is not in the list.
module.exit_json(
changed=False,
msg="Member({}) was not in the replica set.".format(dead_member_name),
)
def upsert_member(module, client, rs_config):
rs_host = module.params.get('rs_host')
rs_port = module.params.get('rs_port')
host = module.params.get('host')
port = module.params.get('port')
username = module.params.get('username')
password = module.params.get('password')
priority = module.params.get('priority')
hidden = module.params.get('hidden')
state = module.params.get('state')
changed = False
# Ignore priority when member should be hidden.
# Hidden members have to be set to priority 0.
if hidden:
priority = 0
existing_member_names = [ member['host'] for member in rs_config['members'] ]
new_member_name = "{}:{}".format(host,port)
# See if member is already in the replica set
if new_member_name in existing_member_names:
# Make sure its config is the same. Grab a pointer to the current settings
# inside of the rs_config.
current_settings = filter(lambda member: member['host'] == new_member_name, rs_config['members'])[0]
need_to_update = False
# If the priority param is set and different from upstream.
if (priority is not None and current_settings['priority'] != priority):
current_settings['priority'] = priority
need_to_update = True
if current_settings['hidden'] != hidden:
current_settings['hidden'] = hidden
need_to_update = True
if need_to_update:
rs_config['version'] += 1
# This is a bit yucky since rs_config is updated because we update the dictionary
# that it references.
reconfig_replset(module, client, rs_config)
changed = True
else:
# Member exists and no settings need to be updated
module.exit_json(changed=False, config=rs_config)
else:
# New Member doesn't exist and we need to add it.
# First we build the config we need.
new_member_id = max([ member['_id'] for member in rs_config['members']]) + 1
new_member_config = { 'host': new_member_name , '_id': new_member_id }
if priority != None:
new_member_config['priority'] = priority
if hidden != None:
new_member_config['hidden'] = hidden
# Update the config.
rs_config['members'].append(new_member_config)
rs_config['version'] += 1
reconfig_replset(module, client, rs_config)
changed = True
# Get status again.
status = client.admin.command("replSetGetConfig")['config']
# Validate that your instance is in there.
existing_member_names = [ member['host'] for member in rs_config['members'] ]
if new_member_name in existing_member_names:
module.exit_json(changed=changed, config=rs_config)
else:
module.fail_json(msg="Failed to validate that the member we were modifying is in the replica set.", config=rs_config)
def main():
arg_spec = dict(
rs_host=dict(required=False, type='str', default="localhost"),
rs_port=dict(required=False, type='int', default=27017),
host=dict(required=False, type='str', default="localhost"),
port=dict(required=False, type='int', default=27017),
username=dict(required=False, type='str'),
password=dict(required=False, type='str'),
auth_database=dict(required=False, type='str'),
priority=dict(required=False, type='float'),
hidden=dict(required=False, type='bool', default=False),
state=dict(required=False, type="str", default="present"),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False)
if not pymongo_found:
module.fail_json(msg="The python pymongo module is not installed.")
rs_host = module.params.get('rs_host')
rs_port = module.params.get('rs_port')
username = module.params.get('username')
password = module.params.get('password')
auth_database = module.params.get('auth_database')
state = module.params.get('state')
if (username and not password) or (password and not username):
module.fail_json(msg="Must provide both username and password or neither.")
client = primary_client(module, rs_host, rs_port, username, password, auth_database)
rs_config = get_replset(module, client)
if state == 'absent':
remove_member(module, client, rs_config)
elif state == 'present':
upsert_member(module, client, rs_config)
else:
module.fail_json(msg="Don't know about state: {}".format(state))
if __name__ == '__main__':
main()
......@@ -6,8 +6,13 @@ module: mongodb_rs_status
short_description: Get the status of a replica set of a mongo cluster.
description:
- Get the status of the replica set of a mongo cluster. Provide the same info as rs.status() or replSetGetStatus.
Returns a status dictionary key containing the replica set JSON document from Mongo, or no status key if there
was no status found. This usually indicates that either Mongo was configured to run without replica sets or
that the replica set has not been initiated yet.
version_added: "1.9"
author: Feanil Patel
author:
- Feanil Patel
- Kevin Falcone
options:
host:
description:
......@@ -35,17 +40,26 @@ options:
EXAMPLES = '''
- name: Get status for the stage cluster
mongo_status:
mongodb_rs_status:
host: localhost:27017
username: root
password: password
register: mongo_status
Note that you're testing for the presence of the status member of the dictionary not the contents of it
- debug: msg="I don't have a replica set available"
when: mongo_status.status is not defined
- debug: var=mongo_status.status
'''
# Magic import
from ansible.module_utils.basic import *
try:
from pymongo import MongoClient
from pymongo.errors import OperationFailure
from bson import json_util
except ImportError:
pymongo_found = False
......@@ -89,7 +103,22 @@ def main():
mongo_uri += '/{}'.format(quote_plus(auth_database))
client = MongoClient(mongo_uri)
# This checks to see if you have a replSetName configured
# This generally means that /etc/mongod.conf has been changed
# from the default to use a replica set and mongo has been
# restarted to use it.
try:
repl_set = client.admin.command('getCmdLineOpts')['parsed']['replication']['replSetName']
except (OperationFailure, KeyError):
module.exit_json(changed=False)
# If mongo was started with a repl_set, it is safe to run replSetGetStatus
if repl_set:
status = client.admin.command("replSetGetStatus")
else:
module.exit_json(changed=False)
# This converts the bson into a python dictionary that ansible's standard
# jsonify function can process and output without throwing errors on bson
......
......@@ -7,8 +7,6 @@ mongo_version: 3.0.8
mongo_port: "27017"
mongo_extra_conf: ''
mongo_key_file: '/etc/mongodb_key'
mongo_repl_set: "{{ MONGO_REPL_SET }}"
mongo_cluster_members: []
pymongo_version: 2.8.1
mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo"
......@@ -45,9 +43,13 @@ MONGO_CLUSTERED: !!null
MONGO_BIND_IP: 127.0.0.1
MONGO_REPL_SET: "rs0"
# Cluster member configuration
# Fed directly into mongodb_replica_set module
MONGO_RS_CONFIG:
members: []
# Storage engine options in 3.0: "mmapv1" or "wiredTiger"
MONGO_STORAGE_ENGINE: "mmapv1"
##
# WiredTiger takes a number of optional configuration settings
# which can be defined as a yaml structure in your secure configuration.
......@@ -56,31 +58,8 @@ MONGO_STORAGE_ENGINE_OPTIONS: !!null
mongo_logpath: "{{ mongo_log_dir }}/mongodb.log"
mongo_dbpath: "{{ mongo_data_dir }}/mongodb"
# If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out
# the name of the stack the mongo server resides in.
mongo_aws_stack_name: "tag_aws_cloudformation_stack-name_"
# In environments that do not require durability (devstack / Jenkins)
# you can disable the journal to reduce disk usage
mongo_enable_journal: true
# We can do regular backups of MongoDB to S3.
MONGO_S3_BACKUP: false
# backup cron time:
MONGO_S3_BACKUP_HOUR: "*/12"
MONGO_S3_BACKUP_DAY: "*"
# override with a secondary node that will perform backups
MONGO_S3_BACKUP_NODE: "undefined"
# back up data into a specific S3 bucket
MONGO_S3_BACKUP_BUCKET: "undefined"
# temporary directory mongodump will use to store data
MONGO_S3_BACKUP_TEMPDIR: "{{ mongo_data_dir }}"
MONGO_S3_NOTIFY_EMAIL: "dummy@example.com"
mongo_s3_logfile: "{{ COMMON_LOG_DIR }}/mongo/s3-mongo-backup.log"
MONGO_S3_S3CMD_CONFIG: "{{ COMMON_DATA_DIR }}/mongo-s3-backup.s3cfg"
MONGO_S3_BACKUP_AWS_ACCESS_KEY: !!null
MONGO_S3_BACKUP_AWS_SECRET_KEY: !!null
MONGO_LOG_SERVERSTATUS: true
MONGO_HEARTBEAT_TIMEOUT_SECS: 10
---
- name: check to see that MongoDB 2.4 is not installed
stat: path=/etc/init.d/mongodb
register: mongodb_needs_upgrade
- name: verify 2.4 not installed
fail: msg="MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6."
when: mongodb_needs_upgrade.stat.exists and MONGO_CLUSTERED
- name: check to see if MongoDB is already installed
stat: path=/etc/init.d/mongod
register: mongodb_already_installed
when: MONGO_CLUSTERED
#- name: verify MongoDB not installed (clustered upgrades)
# fail: msg="MongoDB is currently installed and cannot be safely upgraded in a clustered configuration. FIXME"
# when: mongodb_already_installed.stat.exists and MONGO_CLUSTERED
- name: remove mongo 2.4 if present
apt: >
pkg=mongodb-10gen
state=absent purge=yes
force=yes
when: mongodb_needs_upgrade.stat.exists and not MONGO_CLUSTERED
- name: remove old init script for hugepages
file: >
path=/etc/init.d/disable-transparent-hugepages
state=absent
tags:
- "hugepages"
- "install"
- "install:system-requirements"
- name: disable transparent huge pages on startup (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/)
copy: >
src=disable-transparent-hugepages.conf
dest=/etc/init/disable-transparent-hugepages.conf
owner=root
group=root
mode=0755
copy:
src: disable-transparent-hugepages.conf
dest: /etc/init/disable-transparent-hugepages.conf
owner: root
group: root
mode: 0755
tags:
- "hugepages"
- "install"
- "install:system-requirements"
- name: disable transparent huge pages
service: >
name=disable-transparent-hugepages
enabled=yes
state=started
service:
name: disable-transparent-hugepages
enabled: yes
state: started
tags:
- "hugepages"
- "install"
- "install:system-requirements"
- name: install python pymongo for mongo_user ansible module
pip: >
name=pymongo state=present
version={{ pymongo_version }} extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
pip:
name: pymongo
state: present
version: "{{ pymongo_version }}"
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
tags:
- "install"
- "install:system-requirements"
- name: add the mongodb signing key
apt_key: >
id={{ MONGODB_APT_KEY }}
keyserver={{ MONGODB_APT_KEYSERVER }}
state=present
apt_key:
id: "{{ MONGODB_APT_KEY }}"
keyserver: "{{ MONGODB_APT_KEYSERVER }}"
state: present
tags:
- "install"
- "install:system-requirements"
- name: add the mongodb repo to the sources list
apt_repository: >
repo='{{ MONGODB_REPO }}'
state=present
apt_repository:
repo: "{{ MONGODB_REPO }}"
state: present
tags:
- "install"
- "install:system-requirements"
- name: install mongo server and recommends
apt: >
apt:
pkg={{','.join(mongodb_debian_pkgs)}}
state=present install_recommends=yes
force=yes update_cache=yes
pkg: "{{ item }}"
state: present
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
tags:
- install
- install:system-requirements
- install:app-requirements
- mongo_packages
- name: create mongo dirs
file: >
path="{{ item }}" state=directory
owner="{{ mongo_user }}"
group="{{ mongo_user }}"
file:
path: "{{ item }}"
state: directory
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
with_items:
- "{{ mongo_data_dir }}"
- "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}"
- "{{ mongo_journal_dir }}"
tags:
- "install"
- "install:configuration"
- name: add serverStatus logging script
template:
src="log-mongo-serverStatus.sh.j2"
dest="{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh"
owner="{{ mongo_user }}"
group="{{ mongo_user }}"
mode=0700
src: "log-mongo-serverStatus.sh.j2"
dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh"
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
mode: 0700
when: MONGO_LOG_SERVERSTATUS
tags:
- "install"
- "install:configuration"
- name: add serverStatus logging script to cron
cron:
name: "mongostat logging job"
name: mongostat logging job
job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1
become: yes
when: MONGO_LOG_SERVERSTATUS
tags:
- "install"
- "install:configuration"
- name: stop mongod service
service: name=mongod state=stopped
# This will error when run on a new replica set, so we ignore_errors
# and connect anonymously next.
- name: determine if there is a replica set already
mongodb_rs_status:
host: "{{ ansible_default_ipv4['address'] }}"
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
run_once: true
register: authed_replica_set_already_configured
when: MONGO_CLUSTERED
ignore_errors: true
tags:
- "install"
- "install:configuration"
- name: Try checking the replica set with no user/pass in case this is a new box
mongodb_rs_status:
host: "{{ ansible_default_ipv4['address'] }}"
run_once: true
register: unauthed_replica_set_already_configured
when: MONGO_CLUSTERED and authed_replica_set_already_configured.failed is defined
ignore_errors: true
tags:
- "install"
- "install:configuration"
# We use these in the templates but also to control a whole bunch of logic
- name: set facts that default to not initializing a replica set
set_fact:
initialize_replica_set: false
skip_replica_set: false
tags:
- "install"
- "install:configuration"
- "update_mongod_conf"
# If either auth or unauthed access comes back with a replica set, we
# do not want to initialize one. Since initialization requires a bunch
# of extra templating and restarting, it's not something we want to do on
# existing boxes.
- name: track if you have a replica set
set_fact:
initialize_replica_set: true
skip_replica_set: true
when: MONGO_CLUSTERED
and authed_replica_set_already_configured.status is not defined
and unauthed_replica_set_already_configured.status is not defined
tags:
- "install"
- "install:configuration"
- name: move mongodb to {{ mongo_data_dir }}
command: >
mv /var/lib/mongodb {{ mongo_data_dir}}/.
creates={{ mongo_data_dir }}/mongodb
- name: warn about unconfigured replica sets
debug: msg="You do not appear to have a Replica Set configured, deploying one for you"
when: MONGO_CLUSTERED and initialize_replica_set
tags:
- "install"
- "install:configuration"
- name: copy mongodb key file
copy: >
content="{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }}
mode=0600
owner=mongodb
group=mongodb
copy:
content: "{{ MONGO_CLUSTER_KEY }}"
dest: "{{ mongo_key_file }}"
mode: 0600
owner: mongodb
group: mongodb
when: MONGO_CLUSTERED
notify: restart mongo
tags:
- "install"
- "install:configuration"
- "mongodb_key"
# If skip_replica_set is true, this template will not contain a replica set stanza
# because of the fact above.
- name: copy configuration template
template: src=mongodb-standalone.conf.j2 dest=/etc/mongod.conf backup=yes
template:
src: mongod.conf.j2
dest: /etc/mongod.conf
backup: yes
notify: restart mongo
register: update_mongod_conf
tags:
- "install"
- "install:configuration"
- "update_mongod_conf"
- name: install logrotate configuration
template: src=mongo_logrotate.j2 dest=/etc/logrotate.d/hourly/mongo
template:
src: mongo_logrotate.j2
dest: /etc/logrotate.d/hourly/mongo
tags:
- "install"
- "install:configuration"
- "logrotate"
- name: start mongo service
service: name=mongod state=started
- name: restart mongo service if we changed our configuration
service:
name: mongod
state: restarted
when: update_mongod_conf.changed
tags:
- "install"
- "install:configuration"
- name: wait for mongo server to start
wait_for: port=27017 delay=2
- name: drop super user script
template: src="create_root.js.j2" dest="/tmp/create_root.js"
wait_for:
port: 27017
delay: 2
tags:
- "install"
- "install:configuration"
- name: create super user with js
shell: >
/usr/bin/mongo admin /tmp/create_root.js
# We only try passwordless superuser creation when
# we're initializing the replica set and need to use
# the localhost exemption to create a user who will be
# able to initialize the replica set.
# We can only create the users on one machine, the one
# where we will initialize the replica set. If we
# create users on multiple hosts, then they will fail
# to come into the replica set.
- name: create super user
mongodb_user:
name: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
database: admin
roles: root
when: initialize_replica_set
run_once: true
tags:
- "manage"
- "manage:db"
- name: delete super user script
file: path=/tmp/create_root.js state=absent
- name: create super user
mongodb_user:
name: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
database: admin
roles: root
run_once: true
when: not initialize_replica_set
tags:
- "manage"
- "manage:db"
# Now that the localhost exemption has been used to create the superuser, we need
# to add replica set to our configuration. This will never happen if we detected
# a replica set in the 'determine if there is a replica set already' task.
- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set
set_fact:
skip_replica_set: false
when: MONGO_CLUSTERED and initialize_replica_set
tags:
- "install"
- "install:configuration"
- name: copy custered configuration template
template: src=mongodb-clustered.conf.j2 dest=/etc/mongod.conf backup=yes
when: MONGO_CLUSTERED
- name: re-copy configuration template with replica set enabled
template:
src: mongod.conf.j2
dest: /etc/mongod.conf
backup: yes
when: MONGO_CLUSTERED and initialize_replica_set
tags:
- "install"
- "install:configuration"
- name: restart mongo service
service: name=mongod state=restarted
when: MONGO_CLUSTERED
service:
name: mongod
state: restarted
when: MONGO_CLUSTERED and initialize_replica_set
tags:
- "install"
- "install:configuration"
- name: wait for mongo server to start
wait_for: port=27017 delay=2
when: MONGO_CLUSTERED
- name: Create the file to initialize the mongod replica set
template: src=repset_init.js.j2 dest=/tmp/repset_init.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: Initialize the replication set
shell: >
/usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: delete repset script
file: path=/tmp/repset_init.js state=absent
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: Create the file to add hosts to the mongod replica set
template: src=repset_add_secondaries.js.j2 dest=/tmp/repset_add_secondaries.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: Initialize the replication set
shell: >
/usr/bin/mongo /tmp/repset_add_secondaries.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: delete repset script
file: path=/tmp/repset_add_secondaries.js state=absent
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
wait_for:
port: 27017
delay: 2
when: MONGO_CLUSTERED and initialize_replica_set
tags:
- "install"
- "install:configuration"
- name: ensure all members are in replica set
mongodb_rs_member:
rs_host: "{{ MONGO_PRIMARY }}"
rs_port: 27017
- name: configure replica set
mongodb_replica_set:
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
rs_config: "{{ MONGO_RS_CONFIG }}"
run_once: true
register: replset_status
when: MONGO_CLUSTERED
tags:
- configure_replica_set
tags:
- "manage"
- "manage:db"
- "configure_replica_set"
# During initial replica set configuration, it can take a few seconds to vote
# a primary and for all members to reflect that status. During that window,
# use creation or other writes can fail. The best wait/check seems to be repeatedly
# checking the replica set status until we see a PRIMARY in the results.
- name: Wait for the replica set to update and (if needed) elect a primary
mongodb_rs_status:
host: "{{ ansible_default_ipv4['address'] }}"
port: 27017
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
state: "{{ item.state }}"
hidden: "{{ item.hidden }}"
with_items: mongo_cluster_members
when: item.name == ansible_default_ipv4["address"]
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
login_user={{ MONGO_ADMIN_USER }}
login_password={{ MONGO_ADMIN_PASSWORD }}
name={{ item.user }}
password="{{ item.password }}"
roles={{ item.roles }}
state=present
register: status
until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list
retries: 5
delay: 2
run_once: true
when: MONGO_CLUSTERED
tags:
- configure_replica_set
tags:
- "manage"
- "manage:db"
- name: create mongodb users in a replica set
mongodb_user:
database: "{{ item.database }}"
login_database: 'admin'
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
replica_set: "{{ MONGO_REPL_SET }}"
with_items: MONGO_USERS
when: not MONGO_CLUSTERED
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
login_user={{ MONGO_ADMIN_USER }}
login_password={{ MONGO_ADMIN_PASSWORD }}
name={{ item.user }}
password="{{ item.password }}"
roles={{ item.roles }}
state=present
replica_set={{ mongo_repl_set }}
run_once: true
when: MONGO_CLUSTERED
tags:
- "manage"
- "manage:db"
- name: create mongodb users in a standalone configuration
mongodb_user:
database: "{{ item.database }}"
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
with_items: MONGO_USERS
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
when: not MONGO_CLUSTERED
tags:
- "manage"
- "manage:db"
{% set lb = '{' %}
{% set rb = '}' %}
#!/bin/bash
#
exec > >(tee "{{ mongo_s3_logfile }}")
exec 2>&1
shopt -s extglob
usage() {
cat<<EO
A script that will run a mongodump of all databases, tar/gz them
and upload to an s3 bucket, will send mail to
{{ MONGO_S3_NOTIFY_EMAIL }} on failures.
Usage: $PROG
-v add verbosity (set -x)
-n echo what will be done
-h this
EO
}
while getopts "vhn" opt; do
case $opt in
v)
set -x
shift
;;
h)
usage
exit 0
;;
n)
noop="echo Would have run: "
shift
;;
esac
done
if [[ "{{ MONGO_S3_BACKUP }}" != "true" ]]; then
# only run if explicitly enabled
exit
fi
MYNODENAME=$(echo "db.isMaster()" | mongo -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" "{{ EDXAPP_MONGO_DB_NAME }}" | grep \"me\" | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//');
if [[ "$MYNODENAME" != "{{ MONGO_S3_BACKUP_NODE }}" ]]; then
# only run on specified node
exit
fi
ISSECONDARY=$(echo "db.isMaster()" | mongo -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" "{{ EDXAPP_MONGO_DB_NAME }}" | grep secondary | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//')
if [[ "$ISSECONDARY" != "true" ]]; then
# backups should be run on secondary server
exit;
fi
MONGOOUTDIR=$(mktemp -d -p {{ MONGO_S3_BACKUP_TEMPDIR }})
DATESTAMP=$(date +'%Y-%m-%d-%H%M')
$noop mongodump --host {{ EDXAPP_MONGO_HOSTS[0] }} -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" -o $MONGOOUTDIR
cd $MONGOOUTDIR
$noop tar zcf {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz .
cd {{ MONGO_S3_BACKUP_TEMPDIR }}
$noop s3cmd -c {{ MONGO_S3_S3CMD_CONFIG }} sync {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz "s3://{{ MONGO_S3_BACKUP_BUCKET }}/mongo/"
rm -rf $MONGOOUTDIR {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz
[default]
access_key = {{ MONGO_S3_BACKUP_AWS_ACCESS_KEY }}
secret_key = {{ MONGO_S3_BACKUP_AWS_SECRET_KEY }}
bucket_location = US
......@@ -29,9 +29,9 @@ systemLog:
{% endif %}
logRotate: {{ mongo_logrotate }}
{% if MONGO_CLUSTERED %}
{% if MONGO_CLUSTERED and not skip_replica_set %}
replication:
replSetName: {{ mongo_repl_set }}
replSetName: {{ MONGO_REPL_SET }}
security:
keyFile: {{ mongo_key_file }}
......@@ -39,9 +39,8 @@ security:
{% endif %}
net:
{% if MONGO_CLUSTERED is not defined %}
{## Bind to all ips(default) if in clustered mode,
# otherwise only to the specified local ip.
: #}
{# Bind to all ips(default) if in clustered mode,
otherwise only to the specified local ip. #}
bindIp: {{ MONGO_BIND_IP }}
{% endif %}
port: {{ mongo_port }}
......
# Do not edit this file directly, it was generated by ansible
# mongodb.conf
storage:
# Where to store the data.
dbPath: {{ mongo_dbpath }}
# Storage Engine
engine: {{ MONGO_STORAGE_ENGINE }}
# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
journal:
{% if mongo_enable_journal %}
enabled: true
{% else %}
enabled: false
{% endif %}
{% if MONGO_STORAGE_ENGINE_OPTIONS %}
{{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }}
{% endif %}
systemLog:
#where to log
destination: file
path: "{{ mongo_logpath }}"
{% if mongo_logappend %}
logAppend: true
{% else %}
logAppend: false
{% endif %}
logRotate: {{ mongo_logrotate }}
net:
{% if MONGO_CLUSTERED is not defined %}
{## Bind to all ips(default) if in clustered mode,
# otherwise only to the specified local ip.
#}
bindIp: {{ MONGO_BIND_IP }}
{% endif %}
port: {{ mongo_port }}
{{ mongo_extra_conf }}
conn = new Mongo();
db = conn.getDB("admin");
db.auth( '{{ MONGO_ADMIN_USER }}', '{{ MONGO_ADMIN_PASSWORD }}');
{# Generate a list of hosts if no cluster members are give. Otherwise use the
hosts provided in the variable.
#}
{%- if mongo_cluster_members|length == 0 -%}
{%- set hosts = [] -%}
{%- set all_mongo_hosts = [] -%}
{%- do all_mongo_hosts.extend(groups.tag_role_mongo) -%}
{%- do all_mongo_hosts.extend(groups.tag_group_mongo) -%}
{%- for name in group_names -%}
{%- if name.startswith(mongo_aws_stack_name) -%}
{%- for host in all_mongo_hosts -%}
{%- if host in groups[name] -%}
{% do hosts.append("ip-" + host.replace('.','-') + ":" + mongo_port) %}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{%- set hosts = mongo_cluster_members|map(attribute="name") -%}
{%- endif -%}
// Check that the cluster is ok
if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';}
// Check that the cluster has the right number of members
// and add them if we are the master
if(rs.isMaster().ismaster) {
if(rs.status().members.length!={{ hosts|length }}) {
{% for host in hosts %}
{%- if host != ansible_default_ipv4["address"] -%}
rs.add({_id: {{ loop.index }}, host: '{{ host }}'});
{%- endif -%}
{% endfor %}
sleep(30000);
// Check status and member account, throw exception if not
if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';}
if(rs.status().members.length!={{ hosts|length }}) {
throw 'Could not add all members to cluster'
}
}
}
conn = new Mongo();
db = conn.getDB("admin");
db.auth( '{{ MONGO_ADMIN_USER }}', '{{ MONGO_ADMIN_PASSWORD }}');
{%- if MONGO_PRIMARY == ansible_default_ipv4["address"] -%}
{# Generate a list of hosts if no cluster members are give. Otherwise use the
hosts provided in the variable.
#}
{%- if mongo_cluster_members|length == 0 -%}
{%- set hosts = [] -%}
{%- set all_mongo_hosts = [] -%}
{%- do all_mongo_hosts.extend(groups.tag_role_mongo) -%}
{%- do all_mongo_hosts.extend(groups.tag_group_mongo) -%}
{%- for name in group_names -%}
{%- if name.startswith(mongo_aws_stack_name) -%}
{%- for host in all_mongo_hosts -%}
{%- if host in groups[name] -%}
{% do hosts.append("ip-" + host.replace('.','-') + ":" + mongo_port) %}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{%- set hosts = mongo_cluster_members|map(attribute="name") -%}
{%- endif -%}
config = {_id: '{{ mongo_repl_set }}', members: [{% for host in hosts %}
{%- if host == ansible_default_ipv4["address"] -%}
{_id: {{ loop.index }}, host: '{{ host }}'}
{%- endif -%}
{% endfor %}
],
settings: { heartbeatTimeoutSecs: {{ MONGO_HEARTBEAT_TIMEOUT_SECS }} }};
rs.initiate(config)
sleep(30000)
rs.slaveOk()
printjson(rs.status())
// Check that the cluster is ok
if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';}
{%- endif -%}
# Tests for mongodb_replica_set ansible module
#
# How to run these tests:
# 1. move this file to playbooks/library
# 2. rename mongodb_replica_set to mongodb_replica_set.py
# 3. python test_mongodb_replica_set.py
import mongodb_replica_set as mrs
import unittest, mock
from urllib import quote_plus
from copy import deepcopy
class TestNoPatchingMongodbReplicaSet(unittest.TestCase):
def test_host_port_transformation(self):
unfixed = {
'members': [
{'host': 'foo.bar'},
{'host': 'bar.baz', 'port': 1234},
{'host': 'baz.bing:54321'}
]}
fixed = {
'members': [
{'host': 'foo.bar:27017'},
{'host': 'bar.baz:1234'},
{'host': 'baz.bing:54321'}
]}
mrs.fix_host_port(unfixed)
self.assertEqual(fixed, unfixed)
fixed_2 = deepcopy(fixed)
mrs.fix_host_port(fixed_2)
self.assertEqual(fixed, fixed_2)
def test_member_id_managed(self):
new = [
{'host': 'foo.bar', '_id': 1},
{'host': 'bar.baz'},
{'host': 'baz.bing'}
]
old = [
{'host': 'baz.bing', '_id': 0}
]
fixed = deepcopy(new)
mrs.set_member_ids(fixed, old)
#test that each id is unique
unique_ids = {m['_id'] for m in fixed}
self.assertEqual(len(unique_ids), len(new))
#test that it "prefers" the "matching" one in old_members
self.assertEqual(fixed[0]['_id'], new[0]['_id'])
self.assertEqual(fixed[2]['_id'], old[0]['_id'])
self.assertIn('_id', fixed[1])
def test_mongo_uri_escaped(self):
host = username = password = auth_database = ':!@#$%/'
port = 1234
uri = mrs.get_mongo_uri(host=host, port=port, username=username, password=password, auth_database=auth_database)
self.assertEqual(uri, "mongodb://{un}:{pw}@{host}:{port}/{db}".format(
un=quote_plus(username), pw=quote_plus(password),
host=quote_plus(host), port=port, db=quote_plus(auth_database),
))
rs_id = 'a replset id'
members = [
{'host': 'foo.bar:1234'},
{'host': 'bar.baz:4321'},
]
old_rs_config = {
'version': 1,
'_id': rs_id,
'members': [
{'_id': 0, 'host': 'foo.bar:1234',},
{'_id': 1, 'host': 'bar.baz:4321',},
]
}
new_rs_config = {
'version': 2,
'_id': rs_id,
'members': [
{'_id': 0, 'host': 'foo.bar:1234',},
{'_id': 1, 'host': 'bar.baz:4321',},
{'_id': 2, 'host': 'baz.bing:27017',},
]
}
rs_config = {
'members': [
{'host': 'foo.bar', 'port': 1234,},
{'host': 'bar.baz', 'port': 4321,},
{'host': 'baz.bing', 'port': 27017,},
]
}
def init_replset_mock(f):
get_replset_initialize_mock = mock.patch.object(mrs, 'get_replset',
side_effect=(None, deepcopy(new_rs_config)))
initialize_replset_mock = mock.patch.object(mrs, 'initialize_replset')
return get_replset_initialize_mock(initialize_replset_mock(f))
def update_replset_mock(f):
get_replset_update_mock = mock.patch.object(mrs, 'get_replset',
side_effect=(deepcopy(old_rs_config), deepcopy(new_rs_config)))
reconfig_replset_mock = mock.patch.object(mrs, 'reconfig_replset')
return get_replset_update_mock(reconfig_replset_mock(f))
@mock.patch.object(mrs, 'get_rs_config_id', return_value=rs_id)
@mock.patch.object(mrs, 'client', create=True)
@mock.patch.object(mrs, 'module', create=True)
class TestPatchingMongodbReplicaSet(unittest.TestCase):
@update_replset_mock
def test_version_managed(self, _1, _2, module, *args):
# Version set automatically on initialize
mrs.update_replset(deepcopy(rs_config))
new_version = module.exit_json.call_args[1]['config']['version']
self.assertEqual(old_rs_config['version'], new_version - 1)
@init_replset_mock
def test_doc_id_managed_on_initialize(self, _1, _2, module, *args):
#old_rs_config provided by init_replset_mock via mrs.get_replset().
#That returns None on the first call, so it falls through to get_rs_config_id(),
#which is also mocked.
mrs.update_replset(deepcopy(rs_config))
new_id = module.exit_json.call_args[1]['config']['_id']
self.assertEqual(rs_id, new_id)
@update_replset_mock
def test_doc_id_managed_on_update(self, _1, _2, module, *args):
#old_rs_config provided by update_replset_mock via mrs.get_replset()
mrs.update_replset(deepcopy(rs_config))
new_id = module.exit_json.call_args[1]['config']['_id']
self.assertEqual(rs_id, new_id)
@init_replset_mock
def test_initialize_if_necessary(self, initialize_replset, _2, module, *args):
mrs.update_replset(deepcopy(rs_config))
self.assertTrue(initialize_replset.called)
#self.assertFalse(reconfig_replset.called)
@update_replset_mock
def test_reconfig_if_necessary(self, reconfig_replset, _2, module, *args):
mrs.update_replset(deepcopy(rs_config))
self.assertTrue(reconfig_replset.called)
#self.assertFalse(initialize_replset.called)
@update_replset_mock
def test_not_changed_when_docs_match(self, _1, _2, module, *args):
rs_config = {'members': members} #This way the docs "match", but aren't identical
mrs.update_replset(deepcopy(rs_config))
changed = module.exit_json.call_args[1]['changed']
self.assertFalse(changed)
@update_replset_mock
def test_ignores_magic_given_full_doc(self, _1, _2, module, _3, get_rs_config_id, *args):
mrs.update_replset(deepcopy(new_rs_config))
new_doc = module.exit_json.call_args[1]['config']
self.assertEqual(new_doc, new_rs_config)
self.assertFalse(get_rs_config_id.called)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment