Commit e2e2ee93 by Jason Bau

Merge tag 'release-gugelhupf' into edx-west/merge-gugelhupf

Gugelhupf Release 2014-02-11

Conflicts:
	playbooks/ansible.cfg
	playbooks/edx-west/README.md
	playbooks/edx-west/cme-prod-app.yml
	playbooks/edx-west/prod-app.yml
	playbooks/edx-west/stage-app.yml
	playbooks/roles/datadog/tasks/main.yml
	playbooks/roles/edxapp/defaults/main.yml
	playbooks/roles/edxapp/handlers/main.yml
	playbooks/roles/edxapp/tasks/deploy.yml
	playbooks/roles/edxapp/tasks/main.yml
	playbooks/roles/edxapp/tasks/python_sandbox_env.yml
	playbooks/roles/edxapp/tasks/service_variant_config.yml
	playbooks/roles/edxapp/templates/lms-preview.conf.j2
	playbooks/roles/gh_users/tasks/main.yml
	playbooks/roles/nginx/tasks/main.yml
	playbooks/roles/nginx/templates/basic-auth.j2
	playbooks/roles/nginx/templates/edx_logrotate_nginx_access.j2
	playbooks/roles/notifier/tasks/deploy.yml
	playbooks/roles/notifier/tasks/main.yml
	playbooks/roles/xqueue/defaults/main.yml
	playbooks/roles/xqueue/templates/xqueue_consumer.conf.j2
parents 2c4dabbd 7bc2c211
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
\#*\# \#*\#
*~ *~
.#* .#*
vagrant/devstack/cs_comments_service
vagrant/devstack/edx-platform vagrant/*/devstack/edx-platform
vagrant/release/*/devstack/cs_comments_service vagrant/*/devstack/cs_comments_service
vagrant/release/*/devstack/edx-platform vagrant/*/devstack/ora
# Additional Tasks
import cache
import clean
import ec2
import audit
import git
import hosts
import locks
import os
import ssh
import status
import migrate_check
import yaml
from dogapi import dog_stats_api, dog_http_api
from timestamps import TSWrapper
# Global tasks
import logging
from fabric.api import env, task, runs_once
from output import squelch
from datetime import datetime
import sys
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
env.linewise = True
env.noop = False
env.use_ssh_config = True
FORMAT = '[ %(asctime)s ] : %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
# add timestamps to output
sys.stdout = TSWrapper(sys.stdout)
sys.stderr = TSWrapper(sys.stderr)
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
dog_stats_api.start(api_key=package_data['datadog_api'], statsd=True)
dog_http_api.api_key = package_data['datadog_api']
@task
def noop():
"""
Disable modification of servers
"""
env.noop = True
dog_stats_api.stop()
@task
def quiet():
"""
Disables verbose output
"""
squelch()
@runs_once
@task()
def log(fname=None):
"""
Writes a logfile to disk of the run
"""
if not fname:
d = datetime.now()
fname = d.strftime('/var/tmp/fab-%Y%m%d-%H%M%S-{0}.log'.format(
os.getpid()))
env.logfile = fname
sys.stdout.log_to_file(fname)
sys.stderr.log_to_file(fname)
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_name as name
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
import logging
from fabric.api import serial, task, parallel, env, execute, runs_once, settings,sudo
from fabfile.safety import noopable
from multiprocessing import Manager
from timestamps import no_ts
from packages import PackageInfo
import tempfile
from output import notify
@task
@parallel
def collect_installed_packages(results):
"""
Collect all installed packages for the selected hosts and store them in env
"""
print env.host
pkg_info = PackageInfo()
results[env.host] = pkg_info.installed_packages()
@task
@serial
def display_installed_packages(installed_packages):
"""
Print all installed packages collected by collect_installed_packages
"""
# FIXME: env.hosts loses the port information here, not sure why
with no_ts():
for pkg in installed_packages['{0}:22'.format(env.host)]:
notify("{pkg.name} = {pkg.revision}".format(pkg=pkg))
@task(default=True)
@runs_once
def installed_packages(from_links=False):
"""
List all of the installed packages on the selected packages
"""
installed_packages = Manager().dict()
execute(collect_installed_packages, installed_packages)
execute(display_installed_packages, installed_packages)
@task
def audit_user(user, audit_output=None):
"""
Logs on provided hosts and runs id for the supplied user with sudo. Output
is logged to the provided file argument or a default using the
python gettempdir() function and the following file name format:
/tmp/audit-user-{user}.csv
The contents of this file are
host,user,command output
Note that if the file already exists, output will be appended to the
existing file.
"""
logging.info("Auditing {host}.".format(host=env.host_string))
if not audit_output:
audit_output = tempfile.gettempdir() + "/audit-user-{user}.csv".format(
user=user)
with settings(warn_only=True):
with open(audit_output, 'a') as audit:
output = noopable(sudo)("id {user}".format(user=user))
audit.write("{host},{user},{output}\n".format(
host=env.host_string,
user=user,
output=output
)
)
@task
def remove_user(user):
"""
Logs on to provided hosts and runs userdel for the supplied user with sudo.
The user's home directory is preserved.
"""
logging.info("Removing {user} user from {host}.".format(
user=user,host=env.host_string))
with settings(warn_only=True):
output = noopable(sudo)("userdel {user}".format(user=user))
logging.info("Output of userdel command on host {host} was {out}".format(
host=env.host_string,out=output
)
)
from fabric.api import task, runs_once, env, serial, puts, settings
from fabric.utils import fastprint
from fabric.colors import blue, red, white
from output import notify
from packages import PackageDescriptor
from output import unsquelched
from hosts import exemplar
from ssh_tunnel import setup_tunnel
from packages import PackageInfo
@task
@runs_once
def from_exemplar(**tags):
"""
Cache the set of packages installed on one host from the specified tags.
"""
host_string = setup_tunnel([exemplar(**tags)])[0]
with settings(host_string=host_string):
installed_packages()
@task
@runs_once
def limit_prefix(*prefix_list):
"""
Limits cached packages to those that
match one or more prefix strings
"""
env.package_descriptors = filter(
lambda pkg: any(pkg.name.startswith(prefix)
for prefix in prefix_list), env.package_descriptors)
@task(default=True)
@runs_once
def installed_packages(prefix=None):
"""
Cache the set of packages installed on the selected host.
"""
pkg_info = PackageInfo()
env.package_descriptors = [
package for package in pkg_info.installed_packages()
if prefix is None or package.name.startswith(prefix)
]
@task
@runs_once
def from_strings(**pkg_revs):
"""
Cache packages based on strings, that can be either checked with confirm
or deployed with deploy.
Each named argument specifies a package by name, and the revision of
the package to deploy
"""
packages = []
for pkg_name, pkg_rev in pkg_revs.items():
packages.append(PackageDescriptor(pkg_name, pkg_rev))
env.package_descriptors = packages
notify(env.package_descriptors)
@task
@runs_once
def from_stdin(prefix=None):
"""
Cache a list of packages from stdin.
Package names must start with prefix, if specified (any that don't
will be skipped). Package names and revisions should be separated
by = signs, and should be one per line.
"""
if prefix:
prefix_msg = white('pkg_name', bold=True) + white(
' must start with ') + blue(prefix)
else:
prefix_msg = ''
fastprint('\n')
fastprint('\n'.join([
white('Please enter pkg_name=pkg_rev, one per line\n', bold=True),
white('pkg_rev', bold=True) + white(' is a git revision hash'),
prefix_msg,
white('Complete your selections by entering a blank line.'),
]))
fastprint('\n\n')
packages = {}
while True:
line = raw_input("> ")
if not line:
break
if '=' not in line:
fastprint(red("Expected = in '{line}'. Skipping...".format(
line=line)) + white('\n'))
continue
pkg_name, _, pkg_rev = line.partition('=')
pkg_name = pkg_name.strip()
pkg_rev = pkg_rev.strip()
if prefix and not pkg_name.startswith(prefix):
fastprint(red("'{0}' does not start with '{1}'".format(
pkg_name, prefix)) + white('\n'))
continue
packages[pkg_name] = pkg_rev
from_strings(**packages)
@task
@serial
@runs_once
def prompt(*pkg_names):
packages = {}
with unsquelched():
puts("Please supply git revisions to "
"deploy for the following packages:")
for pkg in pkg_names:
packages[pkg] = raw_input("{pkg} = ".format(pkg=pkg)).strip()
from_strings(**packages)
from output import notify
from fabric.api import abort
from fabric.colors import blue, cyan, green, red, white
from fabric.utils import fastprint
def choose(msg, options):
choices = range(len(options))
fastprint(white(msg, bold=True) + white("\n"))
for i, target in enumerate(options):
fastprint("{0}. {1}\n".format(i, target))
fastprint("x. Cancel\n")
user_input = raw_input("> ")
if user_input == 'x':
abort("Cancelled")
try:
choice = int(user_input)
except:
fastprint(red("Choice must be an integer"))
return None
if choice not in choices:
fastprint(red("Choice must be one of {0}".format(choices)))
return None
return options[choice]
def multi_choose_with_input(msg, options):
"""
Options:
msg - header message for the chooser
options - dictionary of options to select
User selects one of the keys in the dictionary,
a new value is read from stdin
"""
selections = options.keys()
user_input = None
while True:
fastprint('\n{0}{1}'.format(white(msg, bold=True), white("\n")))
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, item in enumerate(selections):
fastprint(" {0}. {1} : {2}".format(white(i, bold=True),
cyan(item), cyan(options[item], bold=True)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Continue") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'x':
return None
elif int(user_input) in range(len(selections)):
name = selections[int(user_input)]
fastprint(green('Enter new msg for ') +
cyan(name))
options[name] = raw_input(white(": "))
except:
notify("Invalid selection ->" + user_input + "<-")
return options
def multi_choose(msg, options):
fastprint(white(msg, bold=True) + white("\n"))
selected = [" " for option in options]
user_input = None
while True:
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, target in enumerate(options):
fastprint(green(selected[i]))
fastprint(cyan(" {0}. {1}".format(i, target)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Deploy selections") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'a':
selected = ['*' for i in range(len(selected))]
elif user_input == 'x':
return None
elif int(user_input) in range(len(options)):
if selected[int(user_input)] == " ":
selected[int(user_input)] = "*"
else:
selected[int(user_input)] = " "
except:
notify("Invalid selection ->" + user_input + "<-")
pkgs = [options[s] for s in range(len(selected)) if selected[s] == '*']
return pkgs
from fabric.api import sudo, task, parallel
from safety import noopable
from modifiers import rolling
@task
@parallel
def apt_get_clean():
""" Runs apt-get clean on a remote server """
noopable(sudo)('apt-get clean')
@task
@rolling
def mako_template_cache():
noopable(sudo)('service gunicorn stop')
noopable(sudo)('rm -rf /tmp/tmp*mako')
noopable(sudo)('service gunicorn start')
import boto
from fabric.api import run, task, parallel, env
env.instance_ids = {}
def instance_id():
if env.host_string not in env.instance_ids:
env.instance_ids[env.host_string] = run('wget -q -O - http://169.254.169.254/latest/meta-data/instance-id')
return env.instance_ids[env.host_string]
import boto
from fabric.decorators import serial
from ssh_tunnel import setup_tunnel
import socket
from fabric.api import env, task, abort
from fabric.colors import red
import logging
def hosts_by_tag(tag, value):
"""
Return a list of all hosts that have the specified value for the specified
tag
"""
return hosts_by_tags(**{tag: value})
def hosts_by_tags(**tags):
"""
Return a list of all hosts that have the specified value for the specified
tags.
Tag values are allowed to include wildcards
If no variant tag is specified, this command will ignore all hosts
that have a variant specified.
"""
if 'env' in tags:
tags['environment'] = tags['env']
del(tags['env'])
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()
if value != '*'}):
for inst in res.instances:
if inst.state == "running":
if (inst.public_dns_name):
hosts.append(inst.public_dns_name)
else:
hosts.append(inst.private_dns_name)
print hosts
return hosts
def _fleet():
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances():
for inst in res.instances:
if inst.state == "running":
try:
instance_name = inst.tags['Name']
except:
logging.warning("Instance with id {id} and {dns} has no assigned Name.".format(id=inst.id,dns=inst.public_dns_name))
host_to_add = instance_name + "." + DOMAIN
# fallback to the public hostname if the m.edx.org
# name doesn't exist
try:
socket.gethostbyname(host_to_add.replace(':22',''))
except socket.error:
if inst.public_dns_name:
host_to_add = inst.public_dns_name
if host_to_add:
hosts.append(host_to_add)
return hosts
def exemplar(**tags):
"""
Return the hostname of one host from the specified set
of tags, or None if there is no such host
"""
hosts = hosts_by_tags(**tags)
if hosts:
return hosts[0]
else:
return None
@task(alias='exemplar')
def exemplar_from_tags(**tags):
env.hosts.append(exemplar(**tags))
@task(aliases=['tag', 'tags'])
def by_tags(**tags):
"""
Add all running hosts that match the tag names provided
as keyword arguments.
"""
env.hosts.extend(hosts_by_tags(**tags))
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
@task(aliases=['fleet'])
def fleet():
"""
Return a list of all hosts available and running via the default AWS
credentials.
Your ability to operate on these hosts will depend upon the ssh credentials
that you are using to drive fab. There is likely to be a mismatch between
what hosts you can see via IAM managed AWS credentials and which hosts
you can actually connect to even if you are using highly privileged
AWS pems.
"""
hosts = _fleet()
env.hosts.extend(hosts)
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
import os
import socket
import time
from output import notify
from safety import noopable
from fabric.api import task, run, env, settings, sudo, abort
from fabric.api import runs_once, execute, serial, hide
MAX_SLEEP_TIME = 10
LOCK_FILE = '/opt/deploy/.lock'
@task
@runs_once
def wait_for_all_locks():
execute('locks.wait_for_lock', hosts=sorted(env.hosts))
@task
@runs_once
def remove_all_locks():
execute('locks.remove_lock', hosts=sorted(env.hosts, reverse=True))
@task
@serial
def remove_lock():
noopable(sudo)("test ! -f {0} || rm {0}".format(LOCK_FILE))
@task
@serial
def wait_for_lock():
if hasattr(env, 'deploy_user'):
lock_user = env.deploy_user
else:
lock_user = env.user
LOCK_ID = 'u:{user} h:{host} pid:{pid}'.format(user=lock_user,
host=socket.gethostname(),
pid=str(os.getpid()))
sleep_time = 0.1
timeout = 120
start_time = time.time()
with settings(warn_only=True):
while True:
wait_time = time.time() - start_time
# break if the lockfile is removed or if it belongs to this pid
# if it exists lock_status will have the file's contents
with hide('running', 'stdout', 'stderr', 'warnings'):
lock_status = run("test ! -f {lfile} || "
"(cat {lfile} && "
'grep -q "{lid}" {lfile})'.format(
lfile=LOCK_FILE,
lid=LOCK_ID))
if lock_status.succeeded:
noopable(sudo)('echo "{0}" > {1}'.format(
LOCK_ID, LOCK_FILE))
notify("Took lock")
break
elif wait_time >= timeout:
abort("Timeout expired, giving up")
lock_create_time = run("stat -c %Y {0}".format(LOCK_FILE))
delta = time.time() - float(lock_create_time)
(dhour, dsec) = divmod(delta, 3600)
notify("""
!! Deploy lockfile already exists ({lockfile}) !!
Waiting: {wait}s
Lockfile info: [ {owner} ]
Lock created: {dhour}h{dmin}m ago
""".format(
lockfile=LOCK_FILE,
wait=int(timeout - wait_time),
owner=lock_status,
dhour=int(dhour),
dmin=int(dsec / 60),
))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
from fabric.api import task, parallel, put, sudo
from safety import noopable
from .modifiers import rolling
from StringIO import StringIO
import json
__all__ = ['on', 'off','maintain_service','unmaintain_service']
services = ['lms','cms','lms-xml','lms-preview']
def set_maintenance(value):
noopable(put)(StringIO(json.dumps({'maintenance': value})), '/etc/facter/facts.d/mitx_maintenance.json', use_sudo=True)
@task
@parallel
def on():
"""
Enable maintenance mode
"""
set_maintenance(True)
puppet.checkin('maintenance')
@task
@parallel
def off():
"""
Disable maintenance mode
"""
set_maintenance(False)
puppet.checkin('maintenance')
@task
@rolling
def maintain_service(service):
"""
Puts a specified edxapp service into maintenance mode by replacing
its nginx sites-enabled link with a link to the maintenance vhost.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}-maintenance"
" /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("service nginx reload")
@task
@rolling
def unmaintain_service(service):
"""
Removes a specified edxapp service from maintenance mode by replacing
the appropriate link in /etc/nginx/sites-enabled.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}"
" /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("service nginx reload")
import boto
from .ec2 import instance_id
def instance_tags_for_current_host():
"""
Returns the datadog style tags for the active host
"""
return instance_tags([instance_id()])
def instance_tags(instance_ids):
"""
Returns datadog style tags for the specified instances
"""
ec2 = boto.connect_ec2()
tags = set()
for res in ec2.get_all_instances(instance_ids):
for instance in res.instances:
ec2_tags = instance.tags
tags.add('instance_id:' + instance.id)
if 'group' in ec2_tags:
tags.add('fab-group:' + ec2_tags['group'])
if 'environment' in ec2_tags:
tags.add('fab-environment:' + ec2_tags['environment'])
if 'variant' in ec2_tags:
tags.add('fab-variant:' + ec2_tags['variant'])
return list(tags)
from fabric.api import task, sudo, runs_once, prefix, hide, abort
from fabric.contrib import console
from fabric.colors import white, green
from .safety import noopable
@task()
@runs_once
def migrate_check(auto_migrate=False):
"""
Checks to see whether migrations need to be run,
if they do it will prompt to run them before
continuing.
looks for " - Migrating" in the output of
the dry run
"""
migration_cmd = "/opt/edx/bin/django-admin.py migrate --noinput " \
"--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform"
with prefix("export SERVICE_VARIANT=lms"):
with hide('running', 'stdout', 'stderr', 'warnings'):
dryrun_out = sudo(migration_cmd + " --db-dry-run", user="www-data")
migrate = False
for chunk in dryrun_out.split('Running migrations for '):
if 'Migrating' in chunk:
print "!!! Found Migration !!!\n" + chunk
migrate = True
if migrate:
if auto_migrate or console.confirm(
green(migration_cmd) + white('\n') +
white('Run migrations? ', bold=True), default=True):
noopable(sudo)(migration_cmd, user='www-data')
import boto
import time
from collections import namedtuple
from fabric.api import task, execute, serial
from functools import wraps, partial
from safety import noopable
from output import notify
from dogapi import dog_stats_api
from .metrics import instance_tags
from .ec2 import instance_id
MAX_SLEEP_TIME = 1
LockedElb = namedtuple('LockedElb', 'name elb lock')
def await_elb_instance_state(lb, instance_id, awaited_state):
sleep_time = 0.1
start_time = time.time()
while True:
state = lb.get_instance_health([instance_id])[0].state
if state == awaited_state:
notify("Load Balancer {lb} is in awaited state {awaited_state}, proceeding.".format(
lb=lb.dns_name,
awaited_state=awaited_state
))
break
else:
notify("Checking again in {0} seconds. Elapsed time: {1}".format(sleep_time, time.time() - start_time))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
def rolling(func):
@task
@serial
@wraps(func)
def wrapper(*args, **kwargs):
elb = boto.connect_elb()
elbs = elb.get_all_load_balancers()
execute('locks.wait_for_all_locks')
inst_id = instance_id()
tags = ['task:' + func.__name__] + instance_tags(inst_id)
active_lbs = sorted(
lb
for lb in elbs
if inst_id in [info.id for info in lb.instances]
)
timer = partial(dog_stats_api.timer, tags=tags)
# Remove this node from the LB
for lb in active_lbs:
notify("Removing {id} from {lb}".format(id=inst_id, lb=lb))
with timer('rolling.deregister_instance'):
noopable(lb.deregister_instances)([inst_id])
noopable(await_elb_instance_state)(lb, inst_id, "OutOfService")
# Execute the operation
func(*args, **kwargs)
# Add this node back to the LBs
for lb in active_lbs:
notify("Adding {id} to {lb}".format(id=inst_id, lb=lb))
with timer('rolling.register_instance'):
noopable(lb.register_instances)([inst_id])
with timer('rolling.wait_for_start'):
# Wait for the node to come online in the LBs
for lb in active_lbs:
noopable(await_elb_instance_state)(lb, inst_id, "InService")
return wrapper
import sys
from contextlib import contextmanager
from fabric.api import puts
class SquelchingStream(object):
def __init__(self, stream):
self.__dict__['stream'] = stream
self.__dict__['squelched'] = False
self.__dict__['needs_line_ending'] = False
def write(self, string):
if self.squelched:
self.stream.write('.')
self.stream.flush()
self.needs_line_ending = True
else:
if self.needs_line_ending:
self.needs_line_ending = False
self.stream.write('\n')
self.stream.write(string)
def __getattr__(self, attr):
return getattr(self.stream, attr)
def __setattr__(self, attr, val):
if attr in self.__dict__:
return object.__setattr__(self, attr, val)
return setattr(self.stream, attr, val)
sys.stdout = SquelchingStream(sys.stdout)
sys.stderr = SquelchingStream(sys.stderr)
def squelch():
sys.stdout.squelched = sys.stderr.squelched = True
def unsquelch():
sys.stdout.squelched = sys.stderr.squelched = False
@contextmanager
def unsquelched(stream=sys.stdout):
old_state = stream.squelched
stream.squelched = False
yield
stream.squelched = old_state
def notify(msg, show_prefix=None, end='\n', flush=False):
with unsquelched():
puts(msg, show_prefix, end, flush)
import os
from fabric.api import run, settings, hide, sudo
from collections import defaultdict
import yaml
import re
MIN_REVISION_LENGTH = 7
class PackageInfo:
def __init__(self):
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
# exhaustive list of MITx repos
self.repo_dirs = package_data['repo_dirs']
self.cmd_list = {
'pre': package_data['pre_checkout_regex'],
'post': package_data['post_checkout_regex']}
self.service_repos = package_data['service_repos']
def repo_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[1]
def org_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[0]
def pre_post_actions(self, pkgs):
"""
Returns a dictionary containing a list of
commands that need to be executed
pre and post checkout for one or more package names.
return({
'pre': [ 'cmd1', 'cmd2', ... ],
'post': [ 'cmd1', 'cmd2', ... ]
})
"""
cmds = defaultdict(list)
for stage in ['pre', 'post']:
for regex, cmd_templates in self.cmd_list[stage]:
for pkg in pkgs:
match = re.match(regex, pkg)
if match is None:
continue
cmds[stage].extend(
cmd.format(*match.groups(), **match.groupdict())
for cmd in cmd_templates
if cmd not in cmds[stage]
)
return(cmds)
def installed_packages(self):
"""
Returns the list of PackageDescriptors for the packages
installed on the system.
This is determined by looking at every package directory
we know about and checking its revision.
"""
with settings(hide('running'), warn_only=True):
revisions = sudo(
"""
for path in {0}; do
if [[ -d "$path/.git" ]]; then
echo $path $(cd $path && git rev-parse HEAD 2>/dev/null)
fi
done
""".format(' '.join(self.repo_dirs))).split('\n')
packages = [revline.strip().split(' ') for revline in revisions
if ' ' in revline.strip()]
return [PackageDescriptor(os.path.basename(path), revision)
for path, revision in packages]
class PackageDescriptor(object):
def __init__(self, name, revision):
if revision != 'absent' and len(revision) < MIN_REVISION_LENGTH:
raise Exception("Must use at least {0} characters "
"in revision to pseudo-guarantee uniqueness".format(
MIN_REVISION_LENGTH))
self.name = name
# Find the repo_root by name
# This assumes that basename(repo_root) is unique
# for all repo_roots. If this is not true an exception
# will be raised
pkg_info = PackageInfo()
repo_roots = []
for repo_dir in pkg_info.repo_dirs.keys():
if os.path.basename(repo_dir) == name:
repo_roots.append(repo_dir)
if len(repo_roots) != 1:
raise Exception("Unable to look up directory for repo")
self.repo_root = repo_roots[0]
self.repo_name = pkg_info.repo_dirs[self.repo_root].split('/')[1]
self.repo_org = pkg_info.repo_dirs[self.repo_root].split('/')[0]
self.revision = revision
from fabric.api import env
from output import notify
def noopable(fun):
if env.noop:
def noop(*args, **kwargs):
notify("Would have called: {fun}({args}, {kwargs})".format(
fun=fun.__name__,
args=", ".join(repr(a) for a in args),
kwargs=", ".join("=".join([key, repr(val)]) for key, val in kwargs.items()),
))
return noop
else:
return fun
from fabric.api import task, env, abort
from fabric.colors import red
import os
import re
@task(default=True)
def ssh(user=None):
if user is None:
user = env.user
if len(env.hosts) != 1:
abort(red('Please specify one host for ssh'))
for host in env.hosts:
host = re.sub(':(\d+)', r' -p\1 ', host)
os.system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -l {0} {1}'.format(user, host))
from fabric.api import abort, env, fastprint
from fabric.colors import green, red, white
import subprocess
import shlex
import atexit
import time
import boto
import re
import socket
DOMAIN = 'm.edx.org:22'
class SSHTunnel:
port = 9000 # default starting port
tunnels = {}
def __init__(self, host, phost, user, lport=None):
if lport is not None:
SSHTunnel.port = lport
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect(('localhost', SSHTunnel.port))
s.shutdown(2)
# connection was successful so try a new port
SSHTunnel.port += 1
except:
self.lport = SSHTunnel.port
break
phost = re.sub(':(\d+)', r' -p\1 ', phost)
identities = ''
if env.key_filename:
# could be a list or a string
if isinstance(env.key_filename, basestring):
lst = [env.key_filename]
else:
lst = env.key_filename
identities = ' '.join('-i {f} '.format(f=f) for f in lst)
cmd = 'ssh -o UserKnownHostsFile=/dev/null ' \
'{ids}' \
'-o StrictHostKeyChecking=no -vAN -L {lport}:{host} ' \
'{user}@{phost}'.format(ids=identities, lport=self.lport,
host=host, user=user, phost=phost)
self.p = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
atexit.register(self.p.kill)
while not 'Entering interactive session' in self.p.stderr.readline():
if time.time() > start_time + 10:
abort(red("Unable to create ssh tunnel - `{0}`".format(cmd)))
def local(self):
return 'localhost:{lport}'.format(lport=self.lport)
def setup_tunnel(all_hosts, check_tag=True,
proxy_name=None, user=None, lport=None):
"""
Given a all_hosts it will check to see whether
any are proxy hosts if check_tag is True
returns a modified list
of hosts with localhost:port for tunneled hosts.
"""
if user is None:
user = env.user
ec2 = boto.connect_ec2()
# the proxy hosts
proxies = {}
if check_tag:
for res in ec2.get_all_instances(filters={'tag-key': 'proxy'}):
for inst in res.instances:
host = ".".join([inst.tags['Name'], DOMAIN])
proxy = ".".join([inst.tags['proxy'], DOMAIN])
proxies.update({host: proxy})
else:
if not proxy_name:
raise Exception("Must specify a proxy_host")
proxies = {host: proxy_name for host in all_hosts}
# local tunneling ip:port
tunnels = {}
for host in all_hosts:
if host in proxies and host not in SSHTunnel.tunnels:
t = SSHTunnel(host=host, phost=proxies[host],
user=user, lport=lport)
tunnels[host] = t.local()
fastprint(green('created {0} for {1} via {2}'.format(tunnels[host],
host, proxies[host])) + white('\n'))
SSHTunnel.tunnels.update(tunnels)
return([SSHTunnel.tunnels[host] if host in SSHTunnel.tunnels else host
for host in all_hosts])
from fabric.api import task, sudo, abort, parallel, runs_once, execute
from fabric.api import settings, hide
from fabric.operations import put
from fabric.utils import fastprint
from safety import noopable
from fabric.colors import blue, red
from fabric.contrib import console
from output import unsquelched
from timestamps import no_ts
from choose import multi_choose_with_input
import json
import tempfile
status_file = '/opt/wwc/status_message.json'
@task(default=True)
@runs_once
def status():
"""
Drops {0} which is a json formatted file that contains a
status message that will be displayed to all users on the
on the courseware for a single course or for all courses
if 'global' is set.
Message(s) are entered or removed interactively on the console.
Example usage:
$ fab groups:prod_edx status
""".format(status_file)
with hide('running', 'stdout', 'stderr', 'warnings'):
env_json = sudo("cat /opt/wwc/lms-xml.env.json")
course_listings = json.loads(env_json)['COURSE_LISTINGS']
course_ids = [course_id for course_list in course_listings.itervalues()
for course_id in course_list]
course_ids = ['global'] + course_ids
with no_ts():
course_status = None
with settings(warn_only=True):
cur_status = noopable(sudo)('cat {0}'.format(status_file))
try:
course_status = json.loads(cur_status)
# add empty entries for courses not in the list
empty_entries = set(course_ids) - set(course_status.keys())
course_status.update({entry: '' for entry in list(empty_entries)})
except ValueError:
fastprint(red("Not a valid json file, overwritting\n"))
if course_status is None:
course_status = {course: '' for course in course_ids}
new_status = multi_choose_with_input(
'Set the status message, blank to disable:',
course_status)
if new_status is not None:
# remove empty entries
new_status = {entry: new_status[entry]
for entry in new_status if len(new_status[entry]) > 1}
with unsquelched():
if not console.confirm(
'Setting new status message:\n{0}'.format(
blue(str(new_status), bold=True)),
default=False):
abort('Operation cancelled by user')
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(json.dumps(new_status))
f.flush()
execute(update_status, f.name)
else:
abort('Operation cancelled by user')
@task
@runs_once
def remove():
"""
Removes {0}, a status banner that is displayed to all
users on the front page.
""".format(status_file)
with unsquelched():
if not console.confirm(
blue('Remove /opt/wwc/status_message.html?', bold=True)):
abort('Operation cancelled by user')
execute(remove_status)
@task
@parallel
def remove_status():
noopable(sudo)('rm -f {0}'.format(status_file))
@task
@parallel
def update_status(fjson):
print status_file
noopable(put)(fjson, status_file, use_sudo=True)
from datetime import datetime
from contextlib import contextmanager
import sys
@contextmanager
def no_ts():
sys.stdout.ts = False
yield
sys.stdout.ts = True
class TSWrapper(object):
def __init__(self, stream):
self.o = stream
self.files = []
self.files.append(self.o)
self.newline = True
self.ts = True
def write(self, s):
d = datetime.now()
if self.ts:
buf = ""
lines = s.splitlines(True)
for line in lines:
if self.newline:
buf += d.strftime('[ %Y%m%d %H:%M:%S ] : {0}'.format(line))
else:
buf += str(line)
if line[-1] == '\n':
self.newline = True
else:
self.newline = False
else:
buf = s
for fh in self.files:
fh.write(buf)
fh.flush()
def log_to_file(self, fn):
fp = open(fn, 'a')
self.files.append(fp)
def __getattr__(self, attr):
return getattr(self.o, attr)
...@@ -5,18 +5,5 @@ ...@@ -5,18 +5,5 @@
[defaults] [defaults]
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False host_key_checking = False
roles_path=../../../ansible-roles
# These are environment-specific defaults
forks=10
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
ssh_args= -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
scp_if_ssh=True
# Copyright 2013 John Jarvis <john@jarv.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import json
try:
import boto.sqs
from boto.exception import NoAuthHandlerFound
except ImportError:
print "Boto is required for the sqs_notify callback plugin"
raise
class CallbackModule(object):
"""
This Ansible callback plugin sends task events
to SQS.
The following vars must be set in the environment:
ANSIBLE_ENABLE_SQS - enables the callback module
SQS_REGION - AWS region to connect to
SQS_MSG_PREFIX - Additional data that will be put
on the queue (optional)
The following events are put on the queue
- FAILURE events
- OK events
- TASK events
- START events
"""
def __init__(self):
self.start_time = time.time()
if 'ANSIBLE_ENABLE_SQS' in os.environ:
self.enable_sqs = True
if not 'SQS_REGION' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \
'not defined in environment'
sys.exit(1)
self.region = os.environ['SQS_REGION']
try:
self.sqs = boto.sqs.connect_to_region(self.region)
except NoAuthHandlerFound:
print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \
'to AWS due invalid credentials'
sys.exit(1)
if not 'SQS_NAME' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \
'defined in environment'
sys.exit(1)
self.name = os.environ['SQS_NAME']
self.queue = self.sqs.create_queue(self.name)
if 'SQS_MSG_PREFIX' in os.environ:
self.prefix = os.environ['SQS_MSG_PREFIX']
else:
self.prefix = ''
self.last_seen_ts = {}
else:
self.enable_sqs = False
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enable_sqs:
if not ignore_errors:
self._send_queue_message(res, 'FAILURE')
def runner_on_ok(self, host, res):
if self.enable_sqs:
# don't send the setup results
if res['invocation']['module_name'] != "setup":
self._send_queue_message(res, 'OK')
def playbook_on_task_start(self, name, is_conditional):
if self.enable_sqs:
self._send_queue_message(name, 'TASK')
def playbook_on_play_start(self, pattern):
if self.enable_sqs:
self._send_queue_message(pattern, 'START')
def playbook_on_stats(self, stats):
if self.enable_sqs:
d = {}
delta = time.time() - self.start_time
d['delta'] = delta
for s in ['changed', 'failures', 'ok', 'processed', 'skipped']:
d[s] = getattr(stats, s)
self._send_queue_message(d, 'STATS')
def _send_queue_message(self, msg, msg_type):
if self.enable_sqs:
from_start = time.time() - self.start_time
payload = {msg_type: msg}
payload['TS'] = from_start
payload['PREFIX'] = self.prefix
# update the last seen timestamp for
# the message type
self.last_seen_ts[msg_type] = time.time()
if msg_type in ['OK', 'FAILURE']:
# report the delta between the OK/FAILURE and
# last TASK
if 'TASK' in self.last_seen_ts:
from_task = \
self.last_seen_ts[msg_type] - self.last_seen_ts['TASK']
payload['delta'] = from_task
for output in ['stderr', 'stdout']:
if output in payload[msg_type]:
# only keep the last 1000 characters
# of stderr and stdout
if len(payload[msg_type][output]) > 1000:
payload[msg_type][output] = "(clipping) ... " \
+ payload[msg_type][output][-1000:]
self.sqs.send_message(self.queue, json.dumps(payload))
---
# This playbook demonstrates how to use the ansible cloudformation module to launch an AWS CloudFormation stack.
#
# This module requires that the boto python library is installed, and that you have your AWS credentials
# in $HOME/.boto
#The thought here is to bring up a bare infrastructure with CloudFormation, but use ansible to configure it.
#I generally do this in 2 different playbook runs as to allow the ec2.py inventory to be updated.
#This module also uses "complex arguments" which were introduced in ansible 1.1 allowing you to specify the
#Cloudformation template parameters
#This example launches a 3 node AutoScale group, with a security group, and an InstanceProfile with root permissions.
#If a stack does not exist, it will be created. If it does exist and the template file has changed, the stack will be updated.
#If the parameters are different, the stack will also be updated.
#CloudFormation stacks can take awhile to provision, if you are curious about its status, use the AWS
#web console or one of the CloudFormation CLI's.
#Example update -- try first launching the stack with 3 as the ClusterSize. After it is launched, change it to 4
#and run the playbook again.
- name: provision stack
hosts: localhost
connection: local
gather_facts: false
# Launch the cloudformation-example.json template. Register the output.
tasks:
- name: edX configuration
cloudformation: >
stack_name="$name" state=present
region=$region disable_rollback=false
template=../cloudformation_templates/edx-server-multi-instance.json
args:
template_parameters:
KeyName: $key
InstanceType: m1.small
GroupTag: $group
register: stack
- name: show stack outputs
debug: msg="My stack outputs are ${stack.stack_outputs}"
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
host_key_checking=False
roles_path=../../../ansible-roles
../ansible.cfg
\ No newline at end of file
../callback_plugins
\ No newline at end of file
...@@ -2,7 +2,14 @@ ...@@ -2,7 +2,14 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- common
- supervisor
- certs - certs
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -2,6 +2,14 @@ ...@@ -2,6 +2,14 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- gh_users
- common - common
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
# Creates a cname for a sandbox ec2 instance
- name: Creates a CNAME
hosts: all
gather_facts: False
tasks:
- name: Add DNS name
route53:
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ sandbox }}"
# This is a utility play to initialize the mysql dbs for the following
# roles:
# - edxapp
# - xqueue
# - ora
# - discern
#
# The mysql root user MUST be passed in as extra vars for
# at least one of the databases.
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root xqueue_db_root_user=root"
#
#
- name: Create all databases on the edX stack
hosts: all
gather_facts: False
vars:
# These should be set to the root user for the
# db, if left 'None' the databse will be skipped
edxapp_db_root_user: 'None'
xqueue_db_root_user: 'None'
ora_db_root_user: 'None'
discern_db_root_user: 'None'
vars_prompt:
# passwords use vars_prompt so they aren't in the
# bash history
- name: "edxapp_db_root_pass"
prompt: "Password for edxapp root mysql user (enter to skip)"
default: "None"
private: True
- name: "xqueue_db_root_pass"
prompt: "Password for xqueue root mysql user (enter to skip)"
default: "None"
private: True
- name: "ora_db_root_pass"
prompt: "Password for ora root mysql user (enter to skip)"
default: "None"
private: True
- name: "discern_db_root_pass"
prompt: "Password for discern root mysql user (enter to skip)"
default: "None"
private: True
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: create mysql databases for the edX stack
mysql_db: >
db={{ item[0] }}{{ item[1].db_name }}
state=present
login_host={{ item[1].db_host }}
login_user={{ item[1].db_user }}
login_password={{ item[1].db_pass }}
encoding=utf8
when: item[1].db_user != 'None'
with_nested:
- ['{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_', '']
-
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ xqueue_db_root_pass }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ ora_db_root_pass }}"
- name: assign mysql user permissions for db user
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
# Creates a single user on a server
# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv"
- name: Create a single user
hosts: all
sudo: True
gather_facts: False
pre_tasks:
- fail: msg="You must pass a user into this play"
when: not user
- set_fact:
gh_users:
- "{{ user }}"
roles:
- gh_users
- name: Deploy demo course
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- demo
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- name: Deploy discern
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- discern
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- edxapp
- name: Deploy forum
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- forum
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: False
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
roles:
- common
- role: nginx
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
nginx_default_sites:
- lms
- name: Deploy xqueue
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- role: xqueue
tags: ['xqueue']
- name: Deploy xserver
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- role: xserver
tags: ['xserver']
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@gh_users.yml" -e "@/path/to/secure/ansible/vars/hotg.yml" -e "@/path/to/configuration-secure/ansible/vars/common/common.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_role_bastion
sudo: True
gather_facts: False
roles:
- gh_users
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_role_admin
sudo: True
gather_facts: True
roles:
- common
- gh_users
- jenkins_master
- hotg
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- common
- supervisor
- devpi - devpi
- name: Deploy discern
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- discern
- discern
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
---
# dummy var file
# This file is needed as a fall through
# for vars_files
dummy_var: True
- name: Deploy the edx_ansible role
hosts: all
sudo: True
gather_facts: False
roles:
- edx_ansible
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
migrate_db: "yes" migrate_db: "yes"
openid_workaround: True openid_workaround: True
roles: roles:
- common
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- cms - cms
...@@ -15,14 +14,15 @@ ...@@ -15,14 +14,15 @@
- ora - ora
- xqueue - xqueue
- xserver - xserver
nginx_default_sites:
- lms
- edxlocal - edxlocal
- supervisor
- mongo - mongo
- { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- role: demo - role: demo
tags: ['demo'] tags: ['demo']
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
...@@ -31,3 +31,4 @@ ...@@ -31,3 +31,4 @@
- ora - ora
- discern - discern
- certs - certs
- edx_ansible
...@@ -14,19 +14,19 @@ ...@@ -14,19 +14,19 @@
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- datadog - datadog
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
EDXAPP_LMS_NGINX_PORT: 80 EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80 EDXAPP_CMS_NGINX_PORT: 80
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'sarina/install-datadog' edx_platform_version: 'sarina/install-datadog'
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker - hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker
sudo: True sudo: True
vars_files: vars_files:
...@@ -34,25 +34,24 @@ ...@@ -34,25 +34,24 @@
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- datadog - datadog
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
edx_platform_commit: 'sarina/install-datadog' edx_platform_version: 'sarina/install-datadog'
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xserver #- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xserver
# sudo: True # sudo: True
# vars_files: # vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml" # - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml" # - "{{ secure_dir }}/vars/users.yml"
# roles: # roles:
# - common
# - nginx # - nginx
# - xserver # - xserver
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_rabbitmq #- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_rabbitmq
...@@ -62,7 +61,6 @@ ...@@ -62,7 +61,6 @@
# - "{{ secure_dir }}/vars/dev/dev2.yml" # - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml" # - "{{ secure_dir }}/vars/users.yml"
# roles: # roles:
# - common
# - rabbitmq # - rabbitmq
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xqueue #- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xqueue
# sudo: True # sudo: True
...@@ -70,6 +68,5 @@ ...@@ -70,6 +68,5 @@
# - "{{ secure_dir }}/vars/dev/dev2.yml" # - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml" # - "{{ secure_dir }}/vars/users.yml"
# roles: # roles:
# - common
# - nginx # - nginx
# - xqueue # - xqueue
...@@ -6,17 +6,17 @@ ...@@ -6,17 +6,17 @@
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- datadog - datadog
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'release' edx_platform_version: 'release'
- splunkforwarder - splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker
sudo: True sudo: True
...@@ -25,18 +25,18 @@ ...@@ -25,18 +25,18 @@
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- datadog - datadog
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
edx_platform_commit: 'release' edx_platform_version: 'release'
- splunkforwarder - splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver
sudo: True sudo: True
...@@ -44,8 +44,6 @@ ...@@ -44,8 +44,6 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- xserver - xserver
...@@ -58,8 +56,6 @@ ...@@ -58,8 +56,6 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- rabbitmq - rabbitmq
- splunkforwarder - splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue
...@@ -68,8 +64,6 @@ ...@@ -68,8 +64,6 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- xqueue - xqueue
...@@ -81,6 +75,5 @@ ...@@ -81,6 +75,5 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml" - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common
- role: 'mongo' - role: 'mongo'
mongo_clustered: true mongo_clustered: true
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
- xqueue - xqueue
- xserver - xserver
- ora - ora
nginx_default_sites:
- lms
- edxlocal - edxlocal
- mongo - mongo
- edxapp - edxapp
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
sudo: True sudo: True
gather_facts: False gather_facts: False
roles: roles:
- common
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- devpi - devpi
......
...@@ -6,8 +6,6 @@ ...@@ -6,8 +6,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
gather_facts: True gather_facts: True
roles: roles:
- common
- supervisor
- role: virtualenv - role: virtualenv
virtualenv_user: "notifier" virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier" virtualenv_user_home: "/opt/wwc/notifier"
...@@ -22,8 +20,6 @@ ...@@ -22,8 +20,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
gather_facts: True gather_facts: True
roles: roles:
- common
- supervisor
- role: virtualenv - role: virtualenv
virtualenv_user: "notifier" virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier" virtualenv_user_home: "/opt/wwc/notifier"
...@@ -38,8 +34,6 @@ ...@@ -38,8 +34,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
gather_facts: True gather_facts: True
roles: roles:
- common
- supervisor
- role: virtualenv - role: virtualenv
virtualenv_user: "notifier" virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier" virtualenv_user_home: "/opt/wwc/notifier"
...@@ -54,8 +48,6 @@ ...@@ -54,8 +48,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
gather_facts: True gather_facts: True
roles: roles:
- common
- supervisor
- role: virtualenv - role: virtualenv
virtualenv_user: "notifier" virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier" virtualenv_user_home: "/opt/wwc/notifier"
...@@ -71,8 +63,6 @@ ...@@ -71,8 +63,6 @@
gather_facts: True gather_facts: True
vars: vars:
roles: roles:
- common
- supervisor
- role: virtualenv - role: virtualenv
virtualenv_user: "notifier" virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier" virtualenv_user_home: "/opt/wwc/notifier"
......
...@@ -2,33 +2,91 @@ ...@@ -2,33 +2,91 @@
hosts: localhost hosts: localhost
connection: local connection: local
gather_facts: False gather_facts: False
pre_tasks: vars:
keypair: continuous-integration
instance_type: m1.medium
security_group: sandbox
# ubuntu 12.04
ami: ami-d0f89fb9
region: us-east-1
zone: us-east-1b
instance_tags:
environment: sandbox
github_username: temp
Name: sandbox-temp
source: provisioning-script
owner: temp
root_ebs_size: 50
dns_name: temp
dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp
elb: false
roles: roles:
- role: launch_ec2 - role: launch_ec2
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}" instance_type: "{{ instance_type }}"
security_group: "{{ security_group }}" security_group: "{{ security_group }}"
ami_image: "{{ ami }}" ami: "{{ ami }}"
region: "{{ region }}" region: "{{ region }}"
instance_tags: "{{ instance_tags }}" instance_tags: "{{ instance_tags }}"
root_ebs_size: "{{ root_ebs_size }}" root_ebs_size: "{{ root_ebs_size }}"
dns_name: "{{ dns_name }}" dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}" dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
terminate_instance: true terminate_instance: true
instance_profile_name: sandbox
- name: Configure instance(s) - name: Configure instance(s)
hosts: launched hosts: launched
sudo: True sudo: True
gather_facts: False gather_facts: False
vars:
elb: false
pre_tasks: pre_tasks:
- name: Wait for cloud-init to finish - name: Wait for cloud-init to finish
wait_for: > wait_for: >
path=/var/log/cloud-init.log path=/var/log/cloud-init.log
timeout=15 timeout=15
search_regex="final-message" search_regex="final-message"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles: roles:
# rerun common to set the hostname # rerun common to set the hostname, nginx to set basic auth
- common - common
- role: nginx
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
- forum
nginx_default_sites:
- lms
# gh_users hash must be passed # gh_users hash must be passed
# in as a -e variable # in as a -e variable
- gh_users - gh_users
post_tasks:
- name: get instance id for elb registration
local_action:
module: ec2_lookup
region: us-east-1
tags:
Name: "{{ name_tag }}"
register: ec2_info
when: elb
sudo: False
- name: register instance into an elb if one was provided
local_action:
module: ec2_elb_local_1.3
region: "{{ region }}"
instance_id: "{{ ec2_info.instance_ids[0] }}"
state: present
ec2_elbs:
- "{{ elb }}"
when: elb
sudo: False
---
# This playbook is to configuration
# the official edX sandbox instance
# sandbox.edx.org
- name: Configure instance(s)
hosts: tag_Name_edx-sandbox
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
mysql5_workaround: True
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal
- mongo
- edxapp
- rabbitmq
- oraclejdk
- elasticsearch
- { role: 'edxapp', celery_worker: True }
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_dir: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
...@@ -2,91 +2,86 @@ ...@@ -2,91 +2,86 @@
- hosts: first_in_tag_role_mongo - hosts: first_in_tag_role_mongo
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/common/common.yml"
roles: roles:
- common - gh_users
- role: 'mongo' - role: 'mongo'
mongo_create_users: yes mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo #- hosts: tag_role_mongo:!first_in_tag_role_mongo
# sudo: True # sudo: True
# vars_files: # vars_files:
# - "{{ secure_dir }}/vars/dev/feanilsandbox.yml" # - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
# - "{{ secure_dir }}/vars/users.yml" # - "{{ secure_dir }}/vars/common/common.yml"
# roles: # roles:
# - common # - gh_users
# - mongo # - mongo
- hosts: first_in_tag_role_edxapp - hosts: first_in_tag_role_edxapp
sudo: True sudo: True
serial: 1 serial: 1
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- datadog - datadog
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
migrate_db: 'yes' migrate_db: '{{ RUN_EDXAPP_MIGRATION }}'
openid_workaround: 'yes' openid_workaround: 'yes'
edx_platform_commit: 'HEAD'
- splunkforwarder - splunkforwarder
- hosts: tag_role_edxapp:!first_in_tag_role_edxapp - hosts: tag_role_edxapp:!first_in_tag_role_edxapp
sudo: True sudo: True
serial: 1 serial: 1
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- datadog - datadog
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_site:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'HEAD'
- splunkforwarder - splunkforwarder
- hosts: tag_role_worker - hosts: tag_role_worker
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- datadog - datadog
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_site:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
edx_platform_commit: 'HEAD'
- splunkforwarder - splunkforwarder
- hosts: tag_role_xserver - hosts: tag_role_xserver
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- xserver - xserver
...@@ -96,38 +91,32 @@ ...@@ -96,38 +91,32 @@
serial: 1 serial: 1
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- supervisor
- rabbitmq - rabbitmq
- splunkforwarder - splunkforwarder
- hosts: first_in_tag_role_xqueue - hosts: first_in_tag_role_xqueue
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- xqueue - xqueue
- role: xqueue - role: xqueue
migrate_db: 'yes' migrate_db: '{{ RUN_XQUEUE_MIGRATION }}'
- splunkforwarder - splunkforwarder
- hosts: tag_role_xqueue:!first_in_tag_role_xqueue - hosts: tag_role_xqueue:!first_in_tag_role_xqueue
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- xqueue - xqueue
...@@ -136,12 +125,10 @@ ...@@ -136,12 +125,10 @@
- hosts: tag_role_forum - hosts: tag_role_forum
sudo: True sudo: True
vars_files: vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml" - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - gh_users
- supervisor
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- lms
- cms
nginx_default_sites:
- lms
- edxapp
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- hosts: tag_play_commoncluster:&tag_environment_stage:&tag_deployment_edx
sudo: True
vars_files:
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml"
roles:
- common
- gh_users
- oraclejdk
- elasticsearch
- name: Deploy forum
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- forum
- forum
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
---
# Deploys gerrit on to a server.
#
# Usage:
# ansible-playbook gerrit_deploy.yml -i gerrit_inventory.ini -e "secure_dir=/path/to/secure/dir"
- name: Install and configure gerrit
hosts: gerrit
sudo: True
gather_facts: True
vars_files:
- "{{ secure_dir }}/vars/gerrit.yml"
pre_tasks:
- name: update apt
apt: update_cache=yes
roles:
- gerrit
...@@ -9,4 +9,6 @@ ...@@ -9,4 +9,6 @@
vars: vars:
COMMON_DATA_DIR: "/mnt" COMMON_DATA_DIR: "/mnt"
roles: roles:
- common
- gh_users
- jenkins_master - jenkins_master
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
hosts: jenkins_worker hosts: jenkins_worker
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
mongo_enable_journal: False
roles: roles:
- common - common
- edxlocal - edxlocal
......
# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENV_TYPE=stage secure_dir=/path/to/secure/dir"
- name: Deploy legacy_ora
hosts: all
sudo: True
gather_facts: True
vars:
ora_app_dir: '/opt/wwc'
ora_user: 'www-data'
serial: 1
roles:
- legacy_ora
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/mlapi_prod_users.yml" - "{{ secure_dir }}/vars/mlapi_prod_users.yml"
roles: roles:
- common
- supervisor
- discern - discern
sudo: True sudo: True
- hosts: - hosts:
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/mlapi_sandbox_users.yml" - "{{ secure_dir }}/vars/mlapi_sandbox_users.yml"
roles: roles:
- common
- supervisor
- discern - discern
sudo: True sudo: True
- hosts: - hosts:
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/mlapi_stage_users.yml" - "{{ secure_dir }}/vars/mlapi_stage_users.yml"
roles: roles:
- common
- supervisor
- discern - discern
sudo: True sudo: True
- hosts: - hosts:
......
...@@ -2,7 +2,10 @@ ...@@ -2,7 +2,10 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- common - role: nginx
- supervisor nginx_sites:
- ora
- ora - ora
...@@ -2,5 +2,7 @@ ...@@ -2,5 +2,7 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: False gather_facts: False
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- rabbitmq - rabbitmq
- name: Stop all services
hosts: all
sudo: True
gather_facts: False
roles:
- stop_all_edx_services
- name: Deploy worker
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: edxapp
celery_worker: True
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- name: Deploy xqueue
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- name: Deploy xserver
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- xserver
- role: xserver
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
# Stanford Ansible Configuration Files Readme
------
This directory has the live playbooks that we use here at Stanford to This directory has the live playbooks that we use here at Stanford to
maintain our instance of OpenEdX at [class.stanford.edu][c]. We check maintain our instance of OpenEdX at [class.stanford.edu][c]. We check
...@@ -22,7 +23,8 @@ Other install docs: ...@@ -22,7 +23,8 @@ Other install docs:
[1]: https://docs.google.com/document/d/1ZDx51Jxa-zffyeKvHmTp_tIskLW9D9NRg9NytPTbnrA/edit#heading=h.iggugvghbcpf [1]: https://docs.google.com/document/d/1ZDx51Jxa-zffyeKvHmTp_tIskLW9D9NRg9NytPTbnrA/edit#heading=h.iggugvghbcpf
## Ansible Commands - Prod Ansible Commands - Prod
-----------------------
Generally we do installs as the "ubuntu" user. You want to make Generally we do installs as the "ubuntu" user. You want to make
sure that the stanford-deploy-20130415 ssh key is in your ssh agent. sure that the stanford-deploy-20130415 ssh key is in your ssh agent.
...@@ -46,10 +48,10 @@ Some specifics: ...@@ -46,10 +48,10 @@ Some specifics:
and so forth). and so forth).
## Ansible Commands - Stage Ansible Commands - Stage
-------------------------
Command is: Command is:
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=stage-ec2.ini ansible-playbook stage-app.yml -e "machine=app1" -u ubuntu -c ssh -i ./ec2.py ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=stage-ec2.ini ansible-playbook stage-app.yml -e "machine=app1" -u ubuntu -c ssh -i ./ec2.py
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
- cms - cms
- phpmyadmin - phpmyadmin
nginx_template_directory: "{{local_dir}}/nginx/templates/carnegie/" nginx_template_directory: "{{local_dir}}/nginx/templates/carnegie/"
nginx_conf: true
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'carnegie'} - {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'carnegie'}
- datadog - datadog
#- splunkforwarder #- splunkforwarder
......
...@@ -48,11 +48,12 @@ ...@@ -48,11 +48,12 @@
- common - common
- supervisor - supervisor
- role: nginx - role: nginx
nginx_conf: true
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
nginx_template_directory: "{{local_dir}}/nginx/templates/cme/" nginx_template_directory: "{{local_dir}}/nginx/templates/cme/"
nginx_default_sites:
- lms
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'} - {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'}
- datadog - datadog
#- splunkforwarder #- splunkforwarder
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- edxapp - edxapp
- ruby - ruby
post_tasks: post_tasks:
......
...@@ -38,10 +38,12 @@ ...@@ -38,10 +38,12 @@
- supervisor - supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- verify - verify
nginx_template_directory: "{{local_dir}}/nginx/templates/" nginx_template_directory: "{{local_dir}}/nginx/templates/"
nginx_default_sites:
- lms
tags: nginx tags: nginx
- edxapp - edxapp
- apache - apache
......
- hosts: tag_Name_log10_prod
sudo: True
vars_files:
- "{{ secure_dir }}/vars/users.yml"
vars:
secure_dir: '../../../configuration-secure/ansible'
local_dir: '../../../configuration-secure/ansible/local'
roles:
- common
...@@ -36,10 +36,12 @@ ...@@ -36,10 +36,12 @@
tags: users tags: users
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms - lms
- cms - cms
- verify - verify
nginx_template_directory: "{{local_dir}}/nginx/templates/" nginx_template_directory: "{{local_dir}}/nginx/templates/"
nginx_default_sites:
- lms
- role: edxapp - role: edxapp
devstack: false devstack: false
- apache - apache
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- edxapp - edxapp
- ansible_debug - ansible_debug
#- apache #- apache
......
- name: Configure stage instance(s)
hosts: notifier_stage
sudo: True
vars_files:
- "{{ secure_dir }}/vars/stage/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure loadtest instance(s)
hosts: notifier_loadtest
sudo: True
vars_files:
- "{{ secure_dir }}/vars/loadtest/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure stage edge instance(s)
hosts: notifier_edge_stage
sudo: True
vars_files:
- "{{ secure_dir }}/vars/edge_stage/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure prod instance(s)
hosts: notifier_prod
sudo: True
vars_files:
- "{{ secure_dir }}/vars/prod/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure edge prod instance(s)
hosts: notifier_edge_prod
sudo: True
vars_files:
- "{{ secure_dir }}/vars/edge_prod/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
vars:
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Create sandbox instance
hosts: localhost
connection: local
gather_facts: False
vars:
keypair: continuous-integration
instance_type: m1.small
security_group: sandbox
image: ami-d0f89fb9
region: us-east-1
instance_tags: '{"disposable": "true"}'
roles:
- launch_instance
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
ansible_ssh_private_key_file: /var/lib/jenkins/continuous-integration.pem
vars_files:
- "{{ secure_dir }}/vars/edxapp_ref_users.yml"
- "{{ secure_dir }}/vars/edxapp_sandbox.yml"
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xserver
- xqueue
- edxlocal
- mongo
- edxapp
- xqueue
- xserver
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: ec2
state: 'absent'
instance_ids: ${ec2.instance_ids}
# This playbook is to configure ---
# the official edX sandbox instance
# sandbox.edx.org # Example sandbox configuration
# # for single server community
# On the machine you want to configure run the following # installs
# command from the configuration/playbooks directory:
# ansible-playbook -c local --limit "localhost:127.0.0.1" /path/to/configuration/playbooks/edx_sandbox.yml -i "localhost,"
#
# To use different default ports for lms-preview, cms and to set the lms_base and lms_preview_base,
# for the following configuration:
# studio listening on port 80 - studio.example.com
# lms listening on port 80 - example.com
# lms-preview listening on port 80 - preview.example.com
#
# ansible-playbook -c local --limit "localhost:127.0.0.1" path/to/configuration/playbooks/edx_sandbox.yml -i "localhost," -e "EDXAPP_CMS_NGINX_PORT=80 EDXAPP_LMS_PREVIEW_NGINX_PORT=80 EDXAPP_LMS_BASE=example.com EDXAPP_PREVIEW_LMS_BASE=preview.example.com"
#
- name: Configure instance(s) - name: Configure instance(s)
hosts: localhost hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: "yes" migrate_db: "yes"
openid_workaround: True openid_workaround: True
EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master'
roles: roles:
- common
- supervisor
- role: nginx - role: nginx
nginx_sites: nginx_sites:
- lms
- cms - cms
- lms-preview - lms
- xqueue - forum
- ora - ora
- xqueue
nginx_default_sites:
- lms
- edxlocal - edxlocal
- mongo - mongo
- { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- ora - ora
- discern
- edx_ansible
- hosts: tag_Group_edxapp_ref
sudo: True
vars_files:
- "{{ secure_dir }}/vars/edxapp_ref_vars.yml"
- "{{ secure_dir }}/vars/edxapp_ref_users.yml"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- gunicorn
- edxapp
- ruby
- npm
# run this role last
- in_production
# ansible-playbook -v --user=ubuntu edxapp_rolling_example.yml -i ./ec2.py --private-key=/path/to/deployment.pem
- hosts: tag_Group_anothermulti
serial: 2
vars_files:
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml"
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Removing instance from the ELB
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- ruby
post_tasks:
- name: Adding instance back to the ELB
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ ec2_elbs }}"
state: 'present'
[jenkins_test]
jenkins-test.sandbox.edx.org
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELB(s)
description:
- This module de-registers or registers an AWS EC2 instance from the ELB(s)
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
requirements: [ "boto" ]
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
def2ault: None
aliases: ['ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key' ]
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
import boto
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
aws_access_key=None, aws_secret_key=None, region=None):
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.module = module
self.instance_id = instance_id
self.region = region
self.lbs = self._get_instance_lbs(ec2_elbs)
# if there are no ELBs to operate on
# there will be no changes made
if len(self.lbs) > 0:
self.changed = True
else:
self.changed = False
def deregister(self, wait):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
lb.deregister_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'OutOfService')
def register(self, wait):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService')
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _await_elb_instance_state(self, lb, awaited_state):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
while True:
state = lb.get_instance_health([self.instance_id])[0].state
if state == awaited_state:
break
else:
time.sleep(1)
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def main():
module = AnsibleModule(
argument_spec=dict(
state={'required': True,
'choices': ['present', 'absent']},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
aws_secret_key={'default': None, 'aliases': ['ec2_secret_key', 'secret_key'], 'no_log': True},
aws_access_key={'default': None, 'aliases': ['ec2_access_key', 'access_key']},
region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS},
wait={'required': False, 'choices': BOOLEANS, 'default': True}
)
)
aws_secret_key = module.params['aws_secret_key']
aws_access_key = module.params['aws_access_key']
ec2_elbs = module.params['ec2_elbs']
region = module.params['region']
wait = module.params['wait']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
if not region:
module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set."))
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key,
aws_secret_key, region=region)
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait)
elif module.params['state'] == 'absent':
elb_man.deregister(wait)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
...@@ -121,7 +121,7 @@ options: ...@@ -121,7 +121,7 @@ options:
required: False required: False
default: 1 default: 1
aliases: [] aliases: []
monitor: monitoring:
version_added: "1.1" version_added: "1.1"
description: description:
- enable detailed monitoring (CloudWatch) for instance - enable detailed monitoring (CloudWatch) for instance
...@@ -185,7 +185,7 @@ options: ...@@ -185,7 +185,7 @@ options:
default: 'present' default: 'present'
aliases: [] aliases: []
root_ebs_size: root_ebs_size:
version_added: "1.4" version_added: "1.5"
desription: desription:
- size of the root volume in gigabytes - size of the root volume in gigabytes
required: false required: false
...@@ -193,7 +193,7 @@ options: ...@@ -193,7 +193,7 @@ options:
aliases: [] aliases: []
requirements: [ "boto" ] requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade, John Jarvis author: Seth Vidal, Tim Gerla, Lester Wade
''' '''
EXAMPLES = ''' EXAMPLES = '''
...@@ -210,17 +210,6 @@ EXAMPLES = ''' ...@@ -210,17 +210,6 @@ EXAMPLES = '''
group: webserver group: webserver
count: 3 count: 3
# Basic provisioning example with setting the root volume size to 50GB
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
root_ebs_size: 50
# Advanced example with tagging and CloudWatch # Advanced example with tagging and CloudWatch
- local_action: - local_action:
module: ec2 module: ec2
...@@ -231,7 +220,8 @@ EXAMPLES = ''' ...@@ -231,7 +220,8 @@ EXAMPLES = '''
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes' instance_tags: '{"db":"postgres"}'
monitoring=yes
# Multiple groups example # Multiple groups example
local_action: local_action:
...@@ -243,7 +233,8 @@ local_action: ...@@ -243,7 +233,8 @@ local_action:
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes' instance_tags: '{"db":"postgres"}'
monitoring=yes
# VPC example # VPC example
- local_action: - local_action:
...@@ -406,6 +397,7 @@ def create_instances(module, ec2): ...@@ -406,6 +397,7 @@ def create_instances(module, ec2):
else: else:
bdm = None bdm = None
# group_id and group_name are exclusive of each other # group_id and group_name are exclusive of each other
if group_id and group_name: if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
...@@ -416,9 +408,7 @@ def create_instances(module, ec2): ...@@ -416,9 +408,7 @@ def create_instances(module, ec2):
if group_name: if group_name:
grp_details = ec2.get_all_security_groups() grp_details = ec2.get_all_security_groups()
if type(group_name) == list: if type(group_name) == list:
# FIXME: this should be a nice list comprehension group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# also not py 2.4 compliant
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
elif type(group_name) == str: elif type(group_name) == str:
for grp in grp_details: for grp in grp_details:
if str(group_name) in str(grp): if str(group_name) in str(grp):
...@@ -501,7 +491,7 @@ def create_instances(module, ec2): ...@@ -501,7 +491,7 @@ def create_instances(module, ec2):
if instance_tags: if instance_tags:
try: try:
ec2.create_tags(instids, module.from_json(instance_tags)) ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e: except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
...@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids): ...@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids):
""" """
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False changed = False
instance_dict_array = [] instance_dict_array = []
...@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids): ...@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids):
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True changed = True
return (changed, instance_dict_array, terminated_instance_ids) # wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = len(response.pop().instances)
except Exception, e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
return (changed, instance_dict_array, terminated_instance_ids)
def main(): def main():
...@@ -593,16 +609,16 @@ def main(): ...@@ -593,16 +609,16 @@ def main():
image = dict(), image = dict(),
kernel = dict(), kernel = dict(),
count = dict(default='1'), count = dict(default='1'),
monitoring = dict(choices=BOOLEANS, default=False), monitoring = dict(type='bool', default=False),
ramdisk = dict(), ramdisk = dict(),
wait = dict(choices=BOOLEANS, default=False), wait = dict(type='bool', default=False),
wait_timeout = dict(default=300), wait_timeout = dict(default=300),
ec2_url = dict(), ec2_url = dict(),
aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
aws_access_key = dict(aliases=['ec2_access_key', 'access_key']), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
placement_group = dict(), placement_group = dict(),
user_data = dict(), user_data = dict(),
instance_tags = dict(), instance_tags = dict(type='dict'),
vpc_subnet_id = dict(), vpc_subnet_id = dict(),
private_ip = dict(), private_ip = dict(),
instance_profile_name = dict(), instance_profile_name = dict(),
...@@ -612,33 +628,9 @@ def main(): ...@@ -612,33 +628,9 @@ def main():
) )
) )
ec2_url = module.params.get('ec2_url') # def get_ec2_creds(module):
aws_secret_key = module.params.get('aws_secret_key') # return ec2_url, ec2_access_key, ec2_secret_key, region
aws_access_key = module.params.get('aws_access_key') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
region = module.params.get('region')
# allow eucarc environment variables to be used if ansible vars aren't set
if not ec2_url and 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
# If we have a region specified, connect to its endpoint. # If we have a region specified, connect to its endpoint.
if region: if region:
...@@ -672,8 +664,8 @@ def main(): ...@@ -672,8 +664,8 @@ def main():
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
# import module snippets
# this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import *
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> from ansible.module_utils.ec2 import *
main() main()
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vpc_lookup
short_description: returns a list of subnet Ids using tags as criteria
description:
- Returns a list of subnet Ids for a given set of tags that identify one or more VPCs
version_added: "1.5"
options:
region:
description:
- The AWS region to use. Must be specified if ec2_url
is not used. If not specified then the value of the
EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of
the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the
AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
tags:
desription:
- tags to lookup
required: false
default: null
type: dict
aliases: []
requirements: [ "boto" ]
author: John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Return all instances that match the tag "Name: foo"
- local_action:
module: vpc_lookup
tags:
Name: foo
'''
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
from boto.vpc import VPCConnection
from boto.vpc import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
region=dict(choices=AWS_REGIONS),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None, type='dict'),
)
)
tags = module.params.get('tags')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_conn = VPCConnection()
subnet_ids = []
for subnet in vpc_conn.get_all_subnets(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
subnet_ids.append(subnet.id)
vpc_ids = []
for vpc in vpc.get_all_vpcs(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
vpc_ids.append(vpc.id)
module.exit_json(changed=False, subnet_ids=subnet_ids, vpc_ids=vpc_ids)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
...@@ -87,4 +87,4 @@ as_redhat_pkgs: ...@@ -87,4 +87,4 @@ as_redhat_pkgs:
# Installed via pip to get the IAM role feature. # Installed via pip to get the IAM role feature.
# #
as_pip_pkgs: as_pip_pkgs:
- git+https://github.com/s3tools/s3cmd.git#egg=s3cmd - git+https://github.com/s3tools/s3cmd.git#egg=s3cmd
\ No newline at end of file
...@@ -15,10 +15,8 @@ ...@@ -15,10 +15,8 @@
# #
# #
- name: analytics-server | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
tags: deploy
- name: analytics-server | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
tags: deploy
--- ---
dependencies: dependencies:
- { - {
role: automated, role: automated,
...@@ -8,4 +7,4 @@ dependencies: ...@@ -8,4 +7,4 @@ dependencies:
automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2' automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2'
} }
\ No newline at end of file
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | upload ssh script - name: upload ssh script
template: template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }} src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750 force=yes owner=root group=adm mode=750
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | install read-only ssh key required for checkout - name: install read-only ssh key required for checkout
copy: copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }} src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600 force=yes owner=ubuntu group=adm mode=0600
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
- name: analytics-server | checkout code - name: checkout code
git: git:
dest={{ as_code_dir }} repo={{ as_source_repo }} dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true version={{ as_version }} force=true
environment: environment:
GIT_SSH: $as_git_ssh GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service notify: restart the analytics service
notify: analytics-server | start the analytics service notify: start the analytics service
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | update src permissions - name: update src permissions
file: file:
path={{ as_code_dir }} state=directory owner={{ as_user }} path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes group={{ as_web_user }} mode=2750 recurse=yes
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent file: path={{ as_git_identity_dest }} state=absent
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | remove ssh script - name: remove ssh script
file: path={{ as_git_ssh }} state=absent file: path={{ as_git_ssh }} state=absent
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
- name: analytics-server | install application requirements - name: install application requirements
pip: pip:
requirements={{ as_requirements_file }} requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present virtualenv={{ as_venv_dir }} state=present
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service notify: start the analytics service
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# #
# #
# Tasks for role analytics-server # Tasks for role analytics-server
# #
# Overview: # Overview:
# #
# Installs the edX analytics-server Django application which provides # Installs the edX analytics-server Django application which provides
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
# common role # common role
# #
# Depends upon the automated role # Depends upon the automated role
# #
# Example play: # Example play:
# #
# - name: Configure analytics instance(s) # - name: Configure analytics instance(s)
...@@ -37,79 +37,79 @@ ...@@ -37,79 +37,79 @@
# - common # - common
# - analytics-server # - analytics-server
# #
- name: analytics-server | install system packages - name: install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | create analytics-server user {{ as_user }} - name: create analytics-server user {{ as_user }}
user: user:
name={{ as_user }} state=present shell=/bin/bash name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes home={{ as_home }} createhome=yes
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | setup the analytics-server env - name: setup the analytics-server env
template: template:
src=opt/wwc/analytics-server/{{ as_env }}.j2 src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }} dest={{ as_home }}/{{ as_env }}
owner="{{ as_user }}" group="{{ as_user }}" owner="{{ as_user }}" group="{{ as_user }}"
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile dest={{ as_home }}/.bash_profile
owner={{ as_user }} owner={{ as_user }}
group={{ as_user }} group={{ as_user }}
# Awaiting next ansible release. # Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists #- name: ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch # file: path={{ as_home }}/.bashrc state=touch
# sudo: true # sudo: true
# sudo_user: "{{ as_user }}" # sudo_user: "{{ as_user }}"
# tags: # tags:
# - analytics-server # - analytics-server
# - install # - install
# - update # - update
- name: analytics-server | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc shell: touch {{ as_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | add source of analytics-server_env to .bashrc - name: add source of analytics-server_env to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env' regexp='. {{ as_home }}/analytics-server_env'
line='. {{ as_home }}/analytics_server_env' line='. {{ as_home }}/analytics_server_env'
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate' regexp='. {{ as_venv_dir }}/bin/activate'
line='. {{ as_venv_dir }}/bin/activate' line='. {{ as_venv_dir }}/bin/activate'
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: as_pip_pkgs with_items: as_pip_pkgs
tags: tags:
...@@ -117,8 +117,8 @@ ...@@ -117,8 +117,8 @@
- install - install
- update - update
- name: analytics-server | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
mode=0600 mode=0600
...@@ -127,10 +127,10 @@ ...@@ -127,10 +127,10 @@
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
- include: deploy.yml - include: deploy.yml tags=deploy
\ No newline at end of file
...@@ -87,4 +87,4 @@ analytics_redhat_pkgs: ...@@ -87,4 +87,4 @@ analytics_redhat_pkgs:
# Installed via pip to get the IAM role feature. # Installed via pip to get the IAM role feature.
# #
analytics_pip_pkgs: analytics_pip_pkgs:
- git+https://github.com/s3tools/s3cmd.git#egg=s3cmd - git+https://github.com/s3tools/s3cmd.git#egg=s3cmd
\ No newline at end of file
...@@ -15,10 +15,8 @@ ...@@ -15,10 +15,8 @@
# #
# #
- name: analytics | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
tags: deploy
- name: analytics | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
tags: deploy
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | upload ssh script - name: upload ssh script
template: template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }} src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750 force=yes owner=root group=adm mode=750
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | install read-only ssh key required for checkout - name: install read-only ssh key required for checkout
copy: copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }} src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600 force=yes owner=ubuntu group=adm mode=0600
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
- name: analytics | checkout code - name: checkout code
git: git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }} dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true version={{ analytics_version }} force=true
environment: environment:
GIT_SSH: $analytics_git_ssh GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service notify: restart the analytics service
notify: analytics | start the analytics service notify: start the analytics service
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | update src permissions - name: update src permissions
file: file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }} path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes group={{ analytics_web_user }} mode=2750 recurse=yes
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent file: path={{ analytics_git_identity_dest }} state=absent
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | remove ssh script - name: remove ssh script
file: path={{ analytics_git_ssh }} state=absent file: path={{ analytics_git_ssh }} state=absent
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
- name: analytics | install application requirements - name: install application requirements
pip: pip:
requirements={{ analytics_requirements_file }} requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present virtualenv={{ analytics_venv_dir }} state=present
sudo: true sudo: true
sudo_user: "{{ analytics_user }}" sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service notify: start the analytics service
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment