Commit 015b8bce by Max Rothman Committed by Kevin Falcone

OPS-967: Make mongo_3_0 role idempotent

Remove check for mongo 2.4 since this is the mongo_3_0 role
This was probably carried over from the mongo 2 role. I see no reason
why mongo 2.x and mongo 3 couldn't coexist on the same machine.

Remove old hugepages init script check
This was probably added while we were still iterating on our mongo 3
deployment, but it should no longer be necessary.

Clean up ansible syntax

Don't move the old mongo data dirs
This actually skips for edX because we provision machines that already
have {{mongo_data_dir}} mounted on an external disk.  However, for
non-edX use, this could fail if you turn on WiredTiger since it will
move mmapv1 files into the mongo_data_dir and then mongo will fail to
start because it has been told to use WiredTiger.

Don't make this a serial play
We usually run this on 3 machines, so serial: 3 was equivalent to
ansible's default of "run everything in parallel" but this causes
problems if you ever run it on 4 like we do for prod envs.
In addition, this prevents run_once from working properly, and there are
a number of things we only one to do on one machine (like creating a
superuser).
parent 54b820fc
......@@ -2,9 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 3
serial: "{{ serial_count }}"
roles:
- aws
- mongo_3_0
......
......@@ -7,8 +7,6 @@ mongo_version: 3.0.8
mongo_port: "27017"
mongo_extra_conf: ''
mongo_key_file: '/etc/mongodb_key'
mongo_repl_set: "{{ MONGO_REPL_SET }}"
mongo_cluster_members: []
pymongo_version: 2.8.1
mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo"
......@@ -45,9 +43,13 @@ MONGO_CLUSTERED: !!null
MONGO_BIND_IP: 127.0.0.1
MONGO_REPL_SET: "rs0"
# Cluster member configuration
# Fed directly into mongodb_replica_set module
MONGO_RS_CONFIG:
members: []
# Storage engine options in 3.0: "mmapv1" or "wiredTiger"
MONGO_STORAGE_ENGINE: "mmapv1"
##
# WiredTiger takes a number of optional configuration settings
# which can be defined as a yaml structure in your secure configuration.
......@@ -56,31 +58,8 @@ MONGO_STORAGE_ENGINE_OPTIONS: !!null
mongo_logpath: "{{ mongo_log_dir }}/mongodb.log"
mongo_dbpath: "{{ mongo_data_dir }}/mongodb"
# If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out
# the name of the stack the mongo server resides in.
mongo_aws_stack_name: "tag_aws_cloudformation_stack-name_"
# In environments that do not require durability (devstack / Jenkins)
# you can disable the journal to reduce disk usage
mongo_enable_journal: true
# We can do regular backups of MongoDB to S3.
MONGO_S3_BACKUP: false
# backup cron time:
MONGO_S3_BACKUP_HOUR: "*/12"
MONGO_S3_BACKUP_DAY: "*"
# override with a secondary node that will perform backups
MONGO_S3_BACKUP_NODE: "undefined"
# back up data into a specific S3 bucket
MONGO_S3_BACKUP_BUCKET: "undefined"
# temporary directory mongodump will use to store data
MONGO_S3_BACKUP_TEMPDIR: "{{ mongo_data_dir }}"
MONGO_S3_NOTIFY_EMAIL: "dummy@example.com"
mongo_s3_logfile: "{{ COMMON_LOG_DIR }}/mongo/s3-mongo-backup.log"
MONGO_S3_S3CMD_CONFIG: "{{ COMMON_DATA_DIR }}/mongo-s3-backup.s3cfg"
MONGO_S3_BACKUP_AWS_ACCESS_KEY: !!null
MONGO_S3_BACKUP_AWS_SECRET_KEY: !!null
MONGO_LOG_SERVERSTATUS: true
MONGO_HEARTBEAT_TIMEOUT_SECS: 10
---
- name: check to see that MongoDB 2.4 is not installed
stat: path=/etc/init.d/mongodb
register: mongodb_needs_upgrade
- name: verify 2.4 not installed
fail: msg="MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6."
when: mongodb_needs_upgrade.stat.exists and MONGO_CLUSTERED
- name: check to see if MongoDB is already installed
stat: path=/etc/init.d/mongod
register: mongodb_already_installed
when: MONGO_CLUSTERED
# - name: check to see if MongoDB is already installed
# stat:
# path: /etc/init.d/mongod
# register: mongodb_already_installed
# when: MONGO_CLUSTERED
#- name: verify MongoDB not installed (clustered upgrades)
# fail: msg="MongoDB is currently installed and cannot be safely upgraded in a clustered configuration. FIXME"
# when: mongodb_already_installed.stat.exists and MONGO_CLUSTERED
- name: remove mongo 2.4 if present
apt: >
pkg=mongodb-10gen
state=absent purge=yes
force=yes
when: mongodb_needs_upgrade.stat.exists and not MONGO_CLUSTERED
- name: remove old init script for hugepages
file: >
path=/etc/init.d/disable-transparent-hugepages
state=absent
tags:
- "hugepages"
- "install"
- "install:system-requirements"
# - name: verify MongoDB not installed (clustered upgrades)
# fail: msg="MongoDB is currently installed and cannot be safely upgraded in a clustered configuration. FIXME"
# when: mongodb_already_installed.stat.exists and MONGO_CLUSTERED
- name: disable transparent huge pages on startup (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/)
copy: >
src=disable-transparent-hugepages.conf
dest=/etc/init/disable-transparent-hugepages.conf
owner=root
group=root
mode=0755
copy:
src: disable-transparent-hugepages.conf
dest: /etc/init/disable-transparent-hugepages.conf
owner: root
group: root
mode: 0755
tags:
- "hugepages"
- "install"
- "install:system-requirements"
- name: disable transparent huge pages
service: >
name=disable-transparent-hugepages
enabled=yes
state=started
service:
name: disable-transparent-hugepages
enabled: yes
state: started
tags:
- "hugepages"
- "install"
- "install:system-requirements"
- name: install python pymongo for mongo_user ansible module
pip: >
name=pymongo state=present
version={{ pymongo_version }} extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
pip:
name: pymongo
state: present
version: "{{ pymongo_version }}"
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: add the mongodb signing key
apt_key: >
id={{ MONGODB_APT_KEY }}
keyserver={{ MONGODB_APT_KEYSERVER }}
state=present
apt_key:
id: "{{ MONGODB_APT_KEY }}"
keyserver: "{{ MONGODB_APT_KEYSERVER }}"
state: present
- name: add the mongodb repo to the sources list
apt_repository: >
repo='{{ MONGODB_REPO }}'
state=present
apt_repository:
repo: "{{ MONGODB_REPO }}"
state: present
#Will this break if it actually upgrades?
- name: install mongo server and recommends
apt: >
apt:
pkg={{','.join(mongodb_debian_pkgs)}}
state=present install_recommends=yes
force=yes update_cache=yes
apt:
pkg: "{{ item }}"
state: present
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
tags:
- install
- install:system-requirements
- mongo_packages
- name: create mongo dirs
file: >
path="{{ item }}" state=directory
owner="{{ mongo_user }}"
group="{{ mongo_user }}"
file:
path: "{{ item }}"
state: directory
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
with_items:
- "{{ mongo_data_dir }}"
- "{{ mongo_dbpath }}"
......@@ -94,138 +77,105 @@
- name: add serverStatus logging script
template:
src="log-mongo-serverStatus.sh.j2"
dest="{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh"
owner="{{ mongo_user }}"
group="{{ mongo_user }}"
mode=0700
src: "log-mongo-serverStatus.sh.j2"
dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh"
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
mode: 0700
when: MONGO_LOG_SERVERSTATUS
- name: add serverStatus logging script to cron
cron:
name: "mongostat logging job"
name: mongostat logging job
job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1
become: yes
when: MONGO_LOG_SERVERSTATUS
#### DANGER WILL ROBINSON ####
- name: stop mongod service
service: name=mongod state=stopped
- name: move mongodb to {{ mongo_data_dir }}
command: >
mv /var/lib/mongodb {{ mongo_data_dir}}/.
creates={{ mongo_data_dir }}/mongodb
service:
name: mongod
state: stopped
- name: copy mongodb key file
copy: >
content="{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }}
mode=0600
owner=mongodb
group=mongodb
copy:
content: "{{ MONGO_CLUSTER_KEY }}"
dest: "{{ mongo_key_file }}"
mode: 0600
owner: mongodb
group: mongodb
when: MONGO_CLUSTERED
- name: copy configuration template
template: src=mongodb-standalone.conf.j2 dest=/etc/mongod.conf backup=yes
template:
src: mongod.conf.j2
dest: /etc/mongod.conf
backup: yes
notify: restart mongo
- name: install logrotate configuration
template: src=mongo_logrotate.j2 dest=/etc/logrotate.d/hourly/mongo
template:
src: mongo_logrotate.j2
dest: /etc/logrotate.d/hourly/mongo
tags:
- "install"
- "install:configuration"
- "logrotate"
- name: start mongo service
service: name=mongod state=started
service:
name: mongod
state: started
- name: wait for mongo server to start
wait_for: port=27017 delay=2
- name: drop super user script
template: src="create_root.js.j2" dest="/tmp/create_root.js"
- name: create super user with js
shell: >
/usr/bin/mongo admin /tmp/create_root.js
- name: delete super user script
file: path=/tmp/create_root.js state=absent
- name: copy custered configuration template
template: src=mongodb-clustered.conf.j2 dest=/etc/mongod.conf backup=yes
when: MONGO_CLUSTERED
wait_for:
port: 27017
delay: 2
######### END DANGER #########
- name: restart mongo service
service: name=mongod state=restarted
- name: configure replica set
mongodb_replica_set:
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
rs_config: "{{ MONGO_RS_CONFIG }}"
run_once: true
register: replset_status
when: MONGO_CLUSTERED
- name: wait for mongo server to start
wait_for: port=27017 delay=2
- name: Choose primary
set_fact:
mongo_rs_primary: >
{{
(replset_status.config.members
| selectattr('stateStr', 'equalto', 'PRIMARY')
| attr('name') | replace(':'+string(mongo_port), '')
)
}}
when: MONGO_CLUSTERED
- name: Create the file to initialize the mongod replica set
template: src=repset_init.js.j2 dest=/tmp/repset_init.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: Initialize the replication set
shell: >
/usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: delete repset script
file: path=/tmp/repset_init.js state=absent
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: Create the file to add hosts to the mongod replica set
template: src=repset_add_secondaries.js.j2 dest=/tmp/repset_add_secondaries.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: Initialize the replication set
shell: >
/usr/bin/mongo /tmp/repset_add_secondaries.js
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: delete repset script
file: path=/tmp/repset_add_secondaries.js state=absent
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
- name: ensure all members are in replica set
mongodb_rs_member:
rs_host: "{{ MONGO_PRIMARY }}"
rs_port: 27017
host: "{{ ansible_default_ipv4['address'] }}"
port: 27017
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
state: "{{ item.state }}"
hidden: "{{ item.hidden }}"
with_items: mongo_cluster_members
when: item.name == ansible_default_ipv4["address"]
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
login_user={{ MONGO_ADMIN_USER }}
login_password={{ MONGO_ADMIN_PASSWORD }}
name={{ item.user }}
password="{{ item.password }}"
roles={{ item.roles }}
state=present
with_items: MONGO_USERS
- name: Set fake primary host
set_fact:
mongo_rs_primary: localhost
when: not MONGO_CLUSTERED
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
login_user={{ MONGO_ADMIN_USER }}
login_password={{ MONGO_ADMIN_PASSWORD }}
name={{ item.user }}
password="{{ item.password }}"
roles={{ item.roles }}
state=present
replica_set={{ mongo_repl_set }}
- name: create super user
mongodb_user:
name: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
database: admin
roles: root
run_once: true
delegate_to: "{{ mongo_rs_primary }}"
- name: create mongodb users
mongodb_user:
database: "{{ item.database }}"
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
with_items: MONGO_USERS
when: MONGO_CLUSTERED and MONGO_PRIMARY == ansible_default_ipv4["address"]
run_once: true
delegate_to: "{{ mongo_rs_primary }}"
{% set lb = '{' %}
{% set rb = '}' %}
#!/bin/bash
#
exec > >(tee "{{ mongo_s3_logfile }}")
exec 2>&1
shopt -s extglob
usage() {
cat<<EO
A script that will run a mongodump of all databases, tar/gz them
and upload to an s3 bucket, will send mail to
{{ MONGO_S3_NOTIFY_EMAIL }} on failures.
Usage: $PROG
-v add verbosity (set -x)
-n echo what will be done
-h this
EO
}
while getopts "vhn" opt; do
case $opt in
v)
set -x
shift
;;
h)
usage
exit 0
;;
n)
noop="echo Would have run: "
shift
;;
esac
done
if [[ "{{ MONGO_S3_BACKUP }}" != "true" ]]; then
# only run if explicitly enabled
exit
fi
MYNODENAME=$(echo "db.isMaster()" | mongo -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" "{{ EDXAPP_MONGO_DB_NAME }}" | grep \"me\" | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//');
if [[ "$MYNODENAME" != "{{ MONGO_S3_BACKUP_NODE }}" ]]; then
# only run on specified node
exit
fi
ISSECONDARY=$(echo "db.isMaster()" | mongo -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" "{{ EDXAPP_MONGO_DB_NAME }}" | grep secondary | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//')
if [[ "$ISSECONDARY" != "true" ]]; then
# backups should be run on secondary server
exit;
fi
MONGOOUTDIR=$(mktemp -d -p {{ MONGO_S3_BACKUP_TEMPDIR }})
DATESTAMP=$(date +'%Y-%m-%d-%H%M')
$noop mongodump --host {{ EDXAPP_MONGO_HOSTS[0] }} -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" -o $MONGOOUTDIR
cd $MONGOOUTDIR
$noop tar zcf {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz .
cd {{ MONGO_S3_BACKUP_TEMPDIR }}
$noop s3cmd -c {{ MONGO_S3_S3CMD_CONFIG }} sync {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz "s3://{{ MONGO_S3_BACKUP_BUCKET }}/mongo/"
rm -rf $MONGOOUTDIR {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz
[default]
access_key = {{ MONGO_S3_BACKUP_AWS_ACCESS_KEY }}
secret_key = {{ MONGO_S3_BACKUP_AWS_SECRET_KEY }}
bucket_location = US
......@@ -31,7 +31,7 @@ systemLog:
{% if MONGO_CLUSTERED %}
replication:
replSetName: {{ mongo_repl_set }}
replSetName: {{ MONGO_REPL_SET }}
security:
keyFile: {{ mongo_key_file }}
......@@ -39,9 +39,8 @@ security:
{% endif %}
net:
{% if MONGO_CLUSTERED is not defined %}
{## Bind to all ips(default) if in clustered mode,
# otherwise only to the specified local ip.
: #}
{# Bind to all ips(default) if in clustered mode,
otherwise only to the specified local ip. #}
bindIp: {{ MONGO_BIND_IP }}
{% endif %}
port: {{ mongo_port }}
......
# Do not edit this file directly, it was generated by ansible
# mongodb.conf
storage:
# Where to store the data.
dbPath: {{ mongo_dbpath }}
# Storage Engine
engine: {{ MONGO_STORAGE_ENGINE }}
# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
journal:
{% if mongo_enable_journal %}
enabled: true
{% else %}
enabled: false
{% endif %}
{% if MONGO_STORAGE_ENGINE_OPTIONS %}
{{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }}
{% endif %}
systemLog:
#where to log
destination: file
path: "{{ mongo_logpath }}"
{% if mongo_logappend %}
logAppend: true
{% else %}
logAppend: false
{% endif %}
logRotate: {{ mongo_logrotate }}
net:
{% if MONGO_CLUSTERED is not defined %}
{## Bind to all ips(default) if in clustered mode,
# otherwise only to the specified local ip.
#}
bindIp: {{ MONGO_BIND_IP }}
{% endif %}
port: {{ mongo_port }}
{{ mongo_extra_conf }}
conn = new Mongo();
db = conn.getDB("admin");
db.auth( '{{ MONGO_ADMIN_USER }}', '{{ MONGO_ADMIN_PASSWORD }}');
{# Generate a list of hosts if no cluster members are give. Otherwise use the
hosts provided in the variable.
#}
{%- if mongo_cluster_members|length == 0 -%}
{%- set hosts = [] -%}
{%- set all_mongo_hosts = [] -%}
{%- do all_mongo_hosts.extend(groups.tag_role_mongo) -%}
{%- do all_mongo_hosts.extend(groups.tag_group_mongo) -%}
{%- for name in group_names -%}
{%- if name.startswith(mongo_aws_stack_name) -%}
{%- for host in all_mongo_hosts -%}
{%- if host in groups[name] -%}
{% do hosts.append("ip-" + host.replace('.','-') + ":" + mongo_port) %}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{%- set hosts = mongo_cluster_members|map(attribute="name") -%}
{%- endif -%}
// Check that the cluster is ok
if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';}
// Check that the cluster has the right number of members
// and add them if we are the master
if(rs.isMaster().ismaster) {
if(rs.status().members.length!={{ hosts|length }}) {
{% for host in hosts %}
{%- if host != ansible_default_ipv4["address"] -%}
rs.add({_id: {{ loop.index }}, host: '{{ host }}'});
{%- endif -%}
{% endfor %}
sleep(30000);
// Check status and member account, throw exception if not
if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';}
if(rs.status().members.length!={{ hosts|length }}) {
throw 'Could not add all members to cluster'
}
}
}
conn = new Mongo();
db = conn.getDB("admin");
db.auth( '{{ MONGO_ADMIN_USER }}', '{{ MONGO_ADMIN_PASSWORD }}');
{%- if MONGO_PRIMARY == ansible_default_ipv4["address"] -%}
{# Generate a list of hosts if no cluster members are give. Otherwise use the
hosts provided in the variable.
#}
{%- if mongo_cluster_members|length == 0 -%}
{%- set hosts = [] -%}
{%- set all_mongo_hosts = [] -%}
{%- do all_mongo_hosts.extend(groups.tag_role_mongo) -%}
{%- do all_mongo_hosts.extend(groups.tag_group_mongo) -%}
{%- for name in group_names -%}
{%- if name.startswith(mongo_aws_stack_name) -%}
{%- for host in all_mongo_hosts -%}
{%- if host in groups[name] -%}
{% do hosts.append("ip-" + host.replace('.','-') + ":" + mongo_port) %}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{%- set hosts = mongo_cluster_members|map(attribute="name") -%}
{%- endif -%}
config = {_id: '{{ mongo_repl_set }}', members: [{% for host in hosts %}
{%- if host == ansible_default_ipv4["address"] -%}
{_id: {{ loop.index }}, host: '{{ host }}'}
{%- endif -%}
{% endfor %}
],
settings: { heartbeatTimeoutSecs: {{ MONGO_HEARTBEAT_TIMEOUT_SECS }} }};
rs.initiate(config)
sleep(30000)
rs.slaveOk()
printjson(rs.status())
// Check that the cluster is ok
if(!rs.status().ok) { throw 'Mongo Cluster Not Ok';}
{%- endif -%}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment