Commit d89fd6cf by Joseph Mulloy Committed by GitHub

Merge pull request #4048 from edx/jdmulloy/ops2205/mongo_3.2

Mongo 3.2 role
parents 4bc39297 0e26a010
- Role: mongo_3_2
- Added role for mongo 3.2, not yet in use.
- Removed MONGO_CLUSTERED variable. In this role mongo replication is always configured, even if there is only one node.
- Role: edxapp
- Added creation of enterprise_worker user to provisioning. This user is used by the edx-enterprise package when making API requests to Open edX IDAs.
......
FROM edxops/xenial-common:latest
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/mongo/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook mongo.yml \
-i '127.0.0.1,' -c local \
-t 'install' \
-e@/ansible_overrides.yml
WORKDIR /edx/app
EXPOSE 27017
- name: Deploy MongoDB 3.2
hosts: all
become: True
gather_facts: True
roles:
- common_vars
- docker
- mongo_3_2
# Manages a mongo cluster.
# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG
# as used by mongo_replica_set in the mongo_3_2 role.
#
# If you are initializing a cluster, your command might look like:
# ansible-playbook mongo_3_2.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml
# If you just want to deploy an updated replica set config, you can run
# ansible-playbook mongo_3_2.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set
#
# ADDING A NEW CLUSTER MEMBER
# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory
# ansible-playbook mongo_3_2.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml
- name: Deploy MongoDB
hosts: all
become: True
gather_facts: True
roles:
- aws
- mongo_3_2
- munin_node
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: newrelic_infrastructure
when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE
// Add super user
conn = new Mongo();
db = conn.getDB("admin");
db.createUser(
{
"user": "{{ MONGO_ADMIN_USER }}",
"pwd": "{{ MONGO_ADMIN_PASSWORD }}",
"roles": ["root"]
}
);
mongo_logappend: true
#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle
mongo_logrotate: reopen
mongo_version: 3.2.16
mongo_port: "27017"
mongo_extra_conf: ''
mongo_key_file: '/etc/mongodb_key'
pymongo_version: 3.2.2
mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo"
mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo"
mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal"
mongo_user: mongodb
MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/3.2 multiverse"
MONGODB_APT_KEY: "7F0CEB10"
MONGODB_APT_KEYSERVER: "keyserver.ubuntu.com"
mongodb_debian_pkgs:
- "mongodb-org={{ mongo_version }}"
- "mongodb-org-server={{ mongo_version }}"
- "mongodb-org-shell={{ mongo_version }}"
- "mongodb-org-mongos={{ mongo_version }}"
- "mongodb-org-tools={{ mongo_version }}"
# Vars Meant to be overridden
MONGO_ADMIN_USER: 'admin'
MONGO_ADMIN_PASSWORD: 'password'
MONGO_USERS:
- user: cs_comments_service
password: password
database: cs_comments_service
roles: readWrite
- user: edxapp
password: password
database: edxapp
roles: readWrite
# This default setting is approriate for a single machine installation
# This will need to be overridden for setups where mongo is on its own server
# and/or you are configuring mongo replication. If the override value is
# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a
# specific IP.
MONGO_BIND_IP: 127.0.0.1
MONGO_REPL_SET: "rs0"
MONGO_AUTH: true
# Cluster member configuration
# Fed directly into mongodb_replica_set module
MONGO_RS_CONFIG:
members: []
# Storage engine options in 3.2: "mmapv1" or "wiredTiger"
# 3.2 and 3.4 default to wiredTiger
MONGO_STORAGE_ENGINE: "wiredTiger"
# List of dictionaries as described in the mount_ebs role's default
# for the volumes.
# Useful if you want to store your mongo data and/or journal on separate
# disks from the root volume. By default, they will end up mongo_data_dir
# on the root disk.
MONGO_VOLUMES: []
# WiredTiger takes a number of optional configuration settings
# which can be defined as a yaml structure in your secure configuration.
MONGO_STORAGE_ENGINE_OPTIONS: !!null
mongo_logpath: "{{ mongo_log_dir }}/mongodb.log"
mongo_dbpath: "{{ mongo_data_dir }}/mongodb"
# In environments that do not require durability (devstack / Jenkins)
# you can disable the journal to reduce disk usage
mongo_enable_journal: true
MONGO_LOG_SERVERSTATUS: true
[Unit]
Description="Disable Transparent Hugepage before MongoDB boots"
Before=mongod.service
[Service]
Type=oneshot
ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled'
ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag'
[Install]
RequiredBy=mongod.service
---
dependencies:
- common
- role: mount_ebs
volumes: "{{ MONGO_VOLUMES }}"
---
- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/)
copy:
src: etc/systemd/system/disable-transparent-hugepages.service
dest: "/etc/systemd/system/disable-transparent-hugepages.service"
owner: root
group: root
mode: 0644
tags:
- "hugepages"
- "install"
- "install:configuration"
- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/)
service:
name: disable-transparent-hugepages
enabled: yes
state: started
tags:
- "hugepages"
- "manage"
- "manage:start"
- name: install python pymongo for mongo_user ansible module
pip:
name: pymongo
state: present
version: "{{ pymongo_version }}"
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
tags:
- "install"
- "install:app-requirements"
- name: add the mongodb signing key
apt_key:
id: "{{ MONGODB_APT_KEY }}"
keyserver: "{{ MONGODB_APT_KEYSERVER }}"
state: present
tags:
- "install"
- "install:app-requirements"
- name: add the mongodb repo to the sources list
apt_repository:
repo: "{{ MONGODB_REPO }}"
state: present
tags:
- "install"
- "install:app-requirements"
- "mongo_packages"
- name: install mongo server and recommends
apt:
pkg: "{{ item }}"
state: present
install_recommends: yes
force: yes
update_cache: yes
with_items: "{{ mongodb_debian_pkgs }}"
tags:
- "install"
- "install:app-requirements"
- "mongo_packages"
- name: create mongo dirs
file:
path: "{{ item }}"
state: directory
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
with_items:
- "{{ mongo_data_dir }}"
- "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}"
- "{{ mongo_journal_dir }}"
tags:
- "install"
- "install:app-configuration"
- name: add serverStatus logging script
template:
src: "log-mongo-serverStatus.sh.j2"
dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh"
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
mode: 0700
when: MONGO_LOG_SERVERSTATUS
tags:
- "install"
- "install:app-configuration"
- name: add serverStatus logging script to cron
cron:
name: mongostat logging job
minute: "*/3"
job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1
become: yes
when: MONGO_LOG_SERVERSTATUS
tags:
- "install"
- "install:app-configuration"
# This will error when run on a new replica set, so we ignore_errors
# and connect anonymously next.
- name: determine if there is a replica set already
mongodb_rs_status:
host: "{{ ansible_default_ipv4['address'] }}"
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
run_once: true
register: authed_replica_set_already_configured
ignore_errors: true
tags:
- "manage"
- "manage:db-replication"
- name: Try checking the replica set with no user/pass in case this is a new box
mongodb_rs_status:
host: "{{ ansible_default_ipv4['address'] }}"
run_once: true
register: unauthed_replica_set_already_configured
when: authed_replica_set_already_configured.failed is defined
ignore_errors: true
tags:
- "manage"
- "manage:db-replication"
# We use these in the templates but also to control a whole bunch of logic
- name: set facts that default to not initializing a replica set
set_fact:
initialize_replica_set: false
skip_replica_set: false
tags:
- "install"
- "install:app-configuration"
- "update_mongod_conf"
# If either auth or unauthed access comes back with a replica set, we
# do not want to initialize one. Since initialization requires a bunch
# of extra templating and restarting, it's not something we want to do on
# existing boxes.
- name: track if you have a replica set
set_fact:
initialize_replica_set: true
skip_replica_set: true
when: authed_replica_set_already_configured.status is not defined
and unauthed_replica_set_already_configured.status is not defined
tags:
- "manage"
- "manage:db-replication"
- name: warn about unconfigured replica sets
debug: msg="You do not appear to have a Replica Set configured, deploying one for you"
when: initialize_replica_set
tags:
- "manage"
- "manage:db-replication"
- name: copy mongodb key file
copy:
content: "{{ MONGO_CLUSTER_KEY }}"
dest: "{{ mongo_key_file }}"
mode: 0600
owner: mongodb
group: mongodb
register: update_mongod_key
tags:
- "manage"
- "manage:db-replication"
- "mongodb_key"
# If skip_replica_set is true, this template will not contain a replica set stanza
# because of the fact above.
- name: copy configuration template
template:
src: mongod.conf.j2
dest: /etc/mongod.conf
backup: yes
register: update_mongod_conf
tags:
- "manage"
- "manage:db-replication"
- "update_mongod_conf"
- name: install logrotate configuration
template:
src: mongo_logrotate.j2
dest: /etc/logrotate.d/hourly/mongo
tags:
- "install"
- "install:app-configuration"
- "logrotate"
- name: restart mongo service if we changed our configuration
service:
name: mongod
state: restarted
when: update_mongod_conf.changed or update_mongod_key.changed
tags:
- "manage"
- "manage:start"
- name: wait for mongo server to start
wait_for:
port: 27017
delay: 2
tags:
- "manage"
- "manage:start"
# We only try passwordless superuser creation when
# we're initializing the replica set and need to use
# the localhost exemption to create a user who will be
# able to initialize the replica set.
# We can only create the users on one machine, the one
# where we will initialize the replica set. If we
# create users on multiple hosts, then they will fail
# to come into the replica set.
- name: create super user
mongodb_user:
name: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
database: admin
roles: root
when: initialize_replica_set
run_once: true
tags:
- "manage"
- "manage:db"
# Now that the localhost exemption has been used to create the superuser, we need
# to add replica set to our configuration. This will never happen if we detected
# a replica set in the 'determine if there is a replica set already' task.
- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set
set_fact:
skip_replica_set: false
when: initialize_replica_set
tags:
- "install"
- "install:app-configuration"
- "manage"
- "manage:db-replication"
- name: re-copy configuration template with replica set enabled
template:
src: mongod.conf.j2
dest: /etc/mongod.conf
backup: yes
when: initialize_replica_set
tags:
- "manage"
- "manage:db-replication"
- name: restart mongo service
service:
name: mongod
state: restarted
when: initialize_replica_set
tags:
- "manage"
- "manage:start"
- "manage:db-replication"
- name: wait for mongo server to start
wait_for:
port: 27017
delay: 2
when: initialize_replica_set
tags:
- "manage"
- "manage:start"
- "manage:db-replication"
- name: configure replica set
mongodb_replica_set:
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
rs_config: "{{ MONGO_RS_CONFIG }}"
run_once: true
register: replset_status
tags:
- "manage"
- "manage:db"
- "manage:db-replication"
# During initial replica set configuration, it can take a few seconds to vote
# a primary and for all members to reflect that status. During that window,
# use creation or other writes can fail. The best wait/check seems to be repeatedly
# checking the replica set status until we see a PRIMARY in the results.
- name: Wait for the replica set to update and (if needed) elect a primary
mongodb_rs_status:
host: "{{ ansible_default_ipv4['address'] }}"
username: "{{ MONGO_ADMIN_USER }}"
password: "{{ MONGO_ADMIN_PASSWORD }}"
register: status
until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list
retries: 5
delay: 2
run_once: true
tags:
- "manage"
- "manage:db"
- "manage:db-replication"
- name: create mongodb users in a replica set
mongodb_user:
database: "{{ item.database }}"
login_database: 'admin'
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
replica_set: "{{ MONGO_REPL_SET }}"
with_items: "{{ MONGO_USERS }}"
run_once: true
tags:
- "manage"
- "manage:db"
- "manage:db-replication"
#!/usr/bin/env bash
# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON
/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())'
{{ mongo_log_dir }}/serverStatus.log {
create
compress
copytruncate
delaycompress
dateext
dateformat -%Y%m%d-%s
missingok
notifempty
daily
rotate 90
size 1M
}
{{ mongo_log_dir }}/mongodb.log {
create
compress
copytruncate
delaycompress
dateext
dateformat -%Y%m%d-%s
missingok
notifempty
daily
rotate 90
size 1M
postrotate
/usr/bin/killall -USR1 mongod
endscript
}
# {{ ansible_managed }}
# mongodb.conf
storage:
# Where to store the data.
dbPath: {{ mongo_dbpath }}
# Storage Engine
engine: {{ MONGO_STORAGE_ENGINE }}
# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
journal:
{% if mongo_enable_journal %}
enabled: true
{% else %}
enabled: false
{% endif %}
{% if MONGO_STORAGE_ENGINE_OPTIONS %}
{{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }}
{% endif %}
systemLog:
#where to log
destination: file
path: "{{ mongo_logpath }}"
{% if mongo_logappend %}
logAppend: true
{% else %}
logAppend: false
{% endif %}
logRotate: {{ mongo_logrotate }}
{% if not skip_replica_set %}
replication:
replSetName: {{ MONGO_REPL_SET }}
security:
authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }}
keyFile: {{ mongo_key_file }}
{% endif %}
net:
bindIp: {{ MONGO_BIND_IP }}
port: {{ mongo_port }}
{{ mongo_extra_conf }}
......@@ -13,3 +13,7 @@
volumes: []
UNMOUNT_DISKS: false
# WARNING! FORCE_REFORMAT_DISKS will cause your volumes to always be reformatted
# even if all the volume's attributes already match what you've defined in volumes[]
# Enable this flag at your own risk with an abundance of caution
FORCE_REFORMAT_DISKS: false
......@@ -26,6 +26,16 @@
when: "UNMOUNT_DISKS and (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype != item.fstype"
with_items: "{{ volumes }}"
# If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false
# that is an errorable condition, since it will cause the format step to fail
- name: Check that we don't want to unmount disks to change fstype when UNMOUNT_DISKS is false
fail: msg="Found disks mounted with the wrong filesystem type, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe."
when:
"not UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).fstype != item.fstype"
with_items: "{{ ansible_mounts }}"
# Noop & reports "ok" if fstype is correct
# Errors if fstype is wrong and disk is mounted (hence above task)
- name: Create filesystem
......@@ -33,7 +43,7 @@
dev: "{{ item.device }}"
fstype: "{{ item.fstype }}"
# Necessary because AWS gives some ephemeral disks the wrong fstype by default
force: true
force: "{{ FORCE_REFORMAT_DISKS }}"
with_items: "{{ volumes }}"
# This can fail if one volume is mounted on a child directory as another volume
......@@ -57,7 +67,7 @@
# If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false
# that is an errorable condition, since it can easily allow us to double mount a disk.
- name: Check that we don't want to unmount disks when UNMOUNT_DISKS is false
- name: Check that we don't want to unmount disks to change mountpoint when UNMOUNT_DISKS is false
fail: msg="Found disks mounted in the wrong place, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe."
when:
not UNMOUNT_DISKS and
......
# Example ansible commands
# Three node replica set
# ansible-playbook -i '203.0.113.12,203.0.113.20,203.0.113.68' -u ubuntu edx-east/mongo_3_2.yml -e@sample_vars/test-mongo.yml
# Single node
# ansible-playbook -i '203.0.113.12' -u ubuntu edx-east/mongo_3_2.yml -e@sample_vars/test-mongo.yml
# Passwords and relication keys in this file are examples and must be changed.
# You must change any variable with the string "CHANGEME" in it
MONGO_HEARTBEAT_TIMEOUT_SECS: 3
EDXAPP_MONGO_HOSTS: "{{ MONGO_RS_CONFIG.members|map(attribute='host')|list }}"
MONGO_VOLUMES:
- device: /dev/xvdb
mount: /edx/var/mongo
options: "defaults,noatime"
fstype: ext4
- device: /dev/xvdc
mount: /edx/var/mongo/mongodb/journal
options: "defaults,noatime"
fstype: ext4
##### edx-secure/ansible/vars/stage-edx.yml #####
MONGO_ADMIN_USER: 'admin'
MONGO_ADMIN_PASSWORD: 'CHANGEME_794jtB7zLIvDjHGu2gD6wKUU'
MONGO_MONITOR_USER: 'cloud-manager'
MONGO_MONITOR_PASSWORD: 'CHANGEME_7DJ9FTWHJx4TCSPxSmx1k3DD'
MONGO_BACKUP_USER: 'backup'
MONGO_BACKUP_PASSWORD: 'CHANGEME_XbJA3LouKV5QDv2NQixnOrQj'
MONGO_REPL_SET: 'test-repl-set'
MONGO_RS_CONFIG:
_id: '{{ MONGO_REPL_SET }}'
members:
# Must use private IPs here, mongo role assumes internal ips when checking if node is in this list
- host: '203.0.113.12'
- host: '203.0.113.20'
- host: '203.0.113.68'
MONGO_CLUSTER_KEY: |
CHANGEME/CHANGE/ME/CHANGE/ME9YeSrVDYxont1rDh2nBAEGB30PhwG9ghtPY
c1QUc2etVfMnE9vbUhLimU/Xb4j4yLRDurOTi8eYoE8eAvAquLalcz7URMuw8Qt3
fIyFa3wSXyE04rpsoBrpG53HwwFrN3pra3x4YPs8g77v50V56gfwaStNJ3KPpa5w
RukdFXnCUPRyONSJEYwjPzI2WucnAZqlDYre6qjxL+6hCjZ4vS/RPgfoHGTUQ62W
9k2TiWar/c1nL6rZvGhGJHFmZalyL9pJ4SAaYoFPhCmcHusyzjlM8p27AsyJwDyr
kSI/JPBLMLDoiLUAPHGz1jrGM+iOgTilmfPVy+0UVc9Bf2H4Vs1zKJpUM2RNAPJ7
S9DzB6q8WtRothbEtwnppWojceid202uLEYCpqhCcH6LR0lTcyJiXCRyHAtue813
5Djv1m3Z8p2z6B+3ab7CDq+WV9OrBI7+eynnwYGgp4eIHQNNSb1/x/8TeiVMQYyJ
ONj4PbgVwsdhL+RUuVqCzjK0F4B4FOSSKXbu07L4F/PALqVugH/YebAUAJVo027r
ca669FSrQ8q6Jgx3M1mCoZkp23CVt3B28+EwpyABh6cwxIrTIvxU6cvxX8M2piz+
63nKUKoStNhmRA0EGfbY9WRmk1RNlC2jVJAvvJUnNXnouNF2DGV4pRNGlb7yfS+n
S+3ZZpUDpTLx36CWGPJ1ZpwuZ0p5JPbCSW6gpFZqGFZsQERg6L8Q9FkwESnbfw+V
oDiVJlClJA2AFXMnAt9q1dhM7OVBj12x9YI5yf1Lw0vVLb7JDmWI7IGaibyxtjFi
jO4bAEl4RZu3364nFH/nVf6kV2S29pAREMqxbcR5O75OuHFN9cqG7BhYClg+5mWg
mGKLLgpXsJxd6bMGjxH1uc30E2qbU1mkrW29Ocl5DFuXevK2dxVj71ZiYESIUg87
KRdC8S3Mljym9ruu4nDC3Sk4xLLuUGp/yD2O0B0dZTfYOJdt
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: "CHANGEME correct horse battery staple"
EDXAPP_MONGO_PASSWORD: 'CHANGEME_H8uoZEZJun9BeR5u8mMyA4yh'
EDXAPP_MONGO_USER: 'edxapp003'
FORUM_MONGO_USER: "comments001"
FORUM_MONGO_PASSWORD: "CHANGEME_j5fhX0pOwEL1S5WUFZkbZAyZ"
login_host: "{{ EDXAPP_MONGO_HOSTS[1] }}"
repl_set: "{{ EDXAPP_MONGO_REPLICA_SET }}"
MONGO_USERS:
- user: "{{ EDXAPP_MONGO_USER }}"
password: "{{ EDXAPP_MONGO_PASSWORD }}"
database: "{{ EDXAPP_MONGO_DB_NAME }}"
roles: readWrite
- user: "{{ COMMON_MONGO_READ_ONLY_USER }}"
password: "{{ COMMON_MONGO_READ_ONLY_PASS }}"
database: "{{ EDXAPP_MONGO_DB_NAME }}"
roles:
- { db: "{{ EDXAPP_MONGO_DB_NAME }}", role: "read" }
- { db: "admin", role: "clusterMonitor" }
- user: "{{ MONGO_MONITOR_USER }}"
password: "{{ MONGO_MONITOR_PASSWORD }}"
database: "admin"
roles: clusterMonitor
- user: "{{ MONGO_BACKUP_USER }}"
password: "{{ MONGO_BACKUP_PASSWORD }}"
database: "admin"
roles: backup
EDXAPP_MONGO_DB_NAME: 'test-mongo-db'
EDXAPP_MONGO_PORT: 27017
EDXAPP_MONGO_REPLICA_SET: '{{ MONGO_REPL_SET }}'
......@@ -8,7 +8,6 @@
- "cluster1"
- "cluster2"
- "cluster3"
MONGO_CLUSTERED: yes
MONGO_CLUSTER_KEY: 'password'
ELASTICSEARCH_CLUSTERED: yes
MARIADB_CLUSTERED: yes
......
......@@ -20,7 +20,7 @@ pathlib2==2.1.0
boto3==1.4.4
# Needed for the mongo_* modules (playbooks/library/mongo_*)
pymongo==3.1
pymongo==3.2.2
# Needed for the mysql_db module
MySQL-python==1.2.5
......@@ -49,9 +49,11 @@ To modify configuration file:
6. Wait for Travis CI to run the builds.
7. Upon completion, examine the Travis CI logs to find where your Dockerfile
was built (search for "docker build -t"). Find the amount of time the build
took by comparing the output of the date command before the build command
starts and the date command after the build command completes.
was built (search for "docker build -t"). Your Dockerfile should be built
by one of the build jobs with "MAKE_TARGET=docker.test.shard". Find the
amount of time the build took by comparing the output of the date command
before the build command starts and the date command after the build
command completes.
8. Round build time to a whole number, and add it to the
configuration/util/parsefiles\_config.yml file.
......
......@@ -28,3 +28,4 @@ weights:
- ecomworker: 4
- notes: 2
- notifier: 2
- mongo: 1
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment