Commit b56ffe09 by John Jarvis

Merge pull request #729 from edx/jarv/focaccia

Jarv/focaccia
parents 50041fa8 38fc11c9
# Additional Tasks
import cache
import clean
import ec2
import audit
import git
import hosts
import locks
import os
import ssh
import status
import migrate_check
import yaml
from dogapi import dog_stats_api, dog_http_api
from timestamps import TSWrapper
# Global tasks
import logging
from fabric.api import env, task, runs_once
from output import squelch
from datetime import datetime
import sys
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
env.linewise = True
env.noop = False
env.use_ssh_config = True
FORMAT = '[ %(asctime)s ] : %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
# add timestamps to output
sys.stdout = TSWrapper(sys.stdout)
sys.stderr = TSWrapper(sys.stderr)
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
dog_stats_api.start(api_key=package_data['datadog_api'], statsd=True)
dog_http_api.api_key = package_data['datadog_api']
@task
def noop():
"""
Disable modification of servers
"""
env.noop = True
dog_stats_api.stop()
@task
def quiet():
"""
Disables verbose output
"""
squelch()
@runs_once
@task()
def log(fname=None):
"""
Writes a logfile to disk of the run
"""
if not fname:
d = datetime.now()
fname = d.strftime('/var/tmp/fab-%Y%m%d-%H%M%S-{0}.log'.format(
os.getpid()))
env.logfile = fname
sys.stdout.log_to_file(fname)
sys.stderr.log_to_file(fname)
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_name as name
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
import logging
from fabric.api import serial, task, parallel, env, execute, runs_once, settings,sudo
from fabfile.safety import noopable
from multiprocessing import Manager
from timestamps import no_ts
from packages import PackageInfo
import tempfile
from output import notify
@task
@parallel
def collect_installed_packages(results):
"""
Collect all installed packages for the selected hosts and store them in env
"""
print env.host
pkg_info = PackageInfo()
results[env.host] = pkg_info.installed_packages()
@task
@serial
def display_installed_packages(installed_packages):
"""
Print all installed packages collected by collect_installed_packages
"""
# FIXME: env.hosts loses the port information here, not sure why
with no_ts():
for pkg in installed_packages['{0}:22'.format(env.host)]:
notify("{pkg.name} = {pkg.revision}".format(pkg=pkg))
@task(default=True)
@runs_once
def installed_packages(from_links=False):
"""
List all of the installed packages on the selected packages
"""
installed_packages = Manager().dict()
execute(collect_installed_packages, installed_packages)
execute(display_installed_packages, installed_packages)
@task
def audit_user(user, audit_output=None):
"""
Logs on provided hosts and runs id for the supplied user with sudo. Output
is logged to the provided file argument or a default using the
python gettempdir() function and the following file name format:
/tmp/audit-user-{user}.csv
The contents of this file are
host,user,command output
Note that if the file already exists, output will be appended to the
existing file.
"""
logging.info("Auditing {host}.".format(host=env.host_string))
if not audit_output:
audit_output = tempfile.gettempdir() + "/audit-user-{user}.csv".format(
user=user)
with settings(warn_only=True):
with open(audit_output, 'a') as audit:
output = noopable(sudo)("id {user}".format(user=user))
audit.write("{host},{user},{output}\n".format(
host=env.host_string,
user=user,
output=output
)
)
@task
def remove_user(user):
"""
Logs on to provided hosts and runs userdel for the supplied user with sudo.
The user's home directory is preserved.
"""
logging.info("Removing {user} user from {host}.".format(
user=user,host=env.host_string))
with settings(warn_only=True):
output = noopable(sudo)("userdel {user}".format(user=user))
logging.info("Output of userdel command on host {host} was {out}".format(
host=env.host_string,out=output
)
)
from fabric.api import task, runs_once, env, serial, puts, settings
from fabric.utils import fastprint
from fabric.colors import blue, red, white
from output import notify
from packages import PackageDescriptor
from output import unsquelched
from hosts import exemplar
from ssh_tunnel import setup_tunnel
from packages import PackageInfo
@task
@runs_once
def from_exemplar(**tags):
"""
Cache the set of packages installed on one host from the specified tags.
"""
host_string = setup_tunnel([exemplar(**tags)])[0]
with settings(host_string=host_string):
installed_packages()
@task
@runs_once
def limit_prefix(*prefix_list):
"""
Limits cached packages to those that
match one or more prefix strings
"""
env.package_descriptors = filter(
lambda pkg: any(pkg.name.startswith(prefix)
for prefix in prefix_list), env.package_descriptors)
@task(default=True)
@runs_once
def installed_packages(prefix=None):
"""
Cache the set of packages installed on the selected host.
"""
pkg_info = PackageInfo()
env.package_descriptors = [
package for package in pkg_info.installed_packages()
if prefix is None or package.name.startswith(prefix)
]
@task
@runs_once
def from_strings(**pkg_revs):
"""
Cache packages based on strings, that can be either checked with confirm
or deployed with deploy.
Each named argument specifies a package by name, and the revision of
the package to deploy
"""
packages = []
for pkg_name, pkg_rev in pkg_revs.items():
packages.append(PackageDescriptor(pkg_name, pkg_rev))
env.package_descriptors = packages
notify(env.package_descriptors)
@task
@runs_once
def from_stdin(prefix=None):
"""
Cache a list of packages from stdin.
Package names must start with prefix, if specified (any that don't
will be skipped). Package names and revisions should be separated
by = signs, and should be one per line.
"""
if prefix:
prefix_msg = white('pkg_name', bold=True) + white(
' must start with ') + blue(prefix)
else:
prefix_msg = ''
fastprint('\n')
fastprint('\n'.join([
white('Please enter pkg_name=pkg_rev, one per line\n', bold=True),
white('pkg_rev', bold=True) + white(' is a git revision hash'),
prefix_msg,
white('Complete your selections by entering a blank line.'),
]))
fastprint('\n\n')
packages = {}
while True:
line = raw_input("> ")
if not line:
break
if '=' not in line:
fastprint(red("Expected = in '{line}'. Skipping...".format(
line=line)) + white('\n'))
continue
pkg_name, _, pkg_rev = line.partition('=')
pkg_name = pkg_name.strip()
pkg_rev = pkg_rev.strip()
if prefix and not pkg_name.startswith(prefix):
fastprint(red("'{0}' does not start with '{1}'".format(
pkg_name, prefix)) + white('\n'))
continue
packages[pkg_name] = pkg_rev
from_strings(**packages)
@task
@serial
@runs_once
def prompt(*pkg_names):
packages = {}
with unsquelched():
puts("Please supply git revisions to "
"deploy for the following packages:")
for pkg in pkg_names:
packages[pkg] = raw_input("{pkg} = ".format(pkg=pkg)).strip()
from_strings(**packages)
from output import notify
from fabric.api import abort
from fabric.colors import blue, cyan, green, red, white
from fabric.utils import fastprint
def choose(msg, options):
choices = range(len(options))
fastprint(white(msg, bold=True) + white("\n"))
for i, target in enumerate(options):
fastprint("{0}. {1}\n".format(i, target))
fastprint("x. Cancel\n")
user_input = raw_input("> ")
if user_input == 'x':
abort("Cancelled")
try:
choice = int(user_input)
except:
fastprint(red("Choice must be an integer"))
return None
if choice not in choices:
fastprint(red("Choice must be one of {0}".format(choices)))
return None
return options[choice]
def multi_choose_with_input(msg, options):
"""
Options:
msg - header message for the chooser
options - dictionary of options to select
User selects one of the keys in the dictionary,
a new value is read from stdin
"""
selections = options.keys()
user_input = None
while True:
fastprint('\n{0}{1}'.format(white(msg, bold=True), white("\n")))
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, item in enumerate(selections):
fastprint(" {0}. {1} : {2}".format(white(i, bold=True),
cyan(item), cyan(options[item], bold=True)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Continue") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'x':
return None
elif int(user_input) in range(len(selections)):
name = selections[int(user_input)]
fastprint(green('Enter new msg for ') +
cyan(name))
options[name] = raw_input(white(": "))
except:
notify("Invalid selection ->" + user_input + "<-")
return options
def multi_choose(msg, options):
fastprint(white(msg, bold=True) + white("\n"))
selected = [" " for option in options]
user_input = None
while True:
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, target in enumerate(options):
fastprint(green(selected[i]))
fastprint(cyan(" {0}. {1}".format(i, target)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Deploy selections") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'a':
selected = ['*' for i in range(len(selected))]
elif user_input == 'x':
return None
elif int(user_input) in range(len(options)):
if selected[int(user_input)] == " ":
selected[int(user_input)] = "*"
else:
selected[int(user_input)] = " "
except:
notify("Invalid selection ->" + user_input + "<-")
pkgs = [options[s] for s in range(len(selected)) if selected[s] == '*']
return pkgs
from fabric.api import sudo, task, parallel
from safety import noopable
from modifiers import rolling
@task
@parallel
def apt_get_clean():
""" Runs apt-get clean on a remote server """
noopable(sudo)('apt-get clean')
@task
@rolling
def mako_template_cache():
noopable(sudo)('service gunicorn stop')
noopable(sudo)('rm -rf /tmp/tmp*mako')
noopable(sudo)('service gunicorn start')
import boto
from fabric.api import run, task, parallel, env
env.instance_ids = {}
def instance_id():
if env.host_string not in env.instance_ids:
env.instance_ids[env.host_string] = run('wget -q -O - http://169.254.169.254/latest/meta-data/instance-id')
return env.instance_ids[env.host_string]
#!/bin/sh
exec ssh -i "/etc/git-identity" -o "StrictHostKeyChecking no" "$@"
import boto
from fabric.decorators import serial
from ssh_tunnel import setup_tunnel
import socket
from fabric.api import env, task, abort
from fabric.colors import red
import logging
def hosts_by_tag(tag, value):
"""
Return a list of all hosts that have the specified value for the specified
tag
"""
return hosts_by_tags(**{tag: value})
def hosts_by_tags(**tags):
"""
Return a list of all hosts that have the specified value for the specified
tags.
Tag values are allowed to include wildcards
If no variant tag is specified, this command will ignore all hosts
that have a variant specified.
"""
if 'env' in tags:
tags['environment'] = tags['env']
del(tags['env'])
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()
if value != '*'}):
for inst in res.instances:
if inst.state == "running":
if (inst.public_dns_name):
hosts.append(inst.public_dns_name)
else:
hosts.append(inst.private_dns_name)
print hosts
return hosts
def _fleet():
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances():
for inst in res.instances:
if inst.state == "running":
try:
instance_name = inst.tags['Name']
except:
logging.warning("Instance with id {id} and {dns} has no assigned Name.".format(id=inst.id,dns=inst.public_dns_name))
host_to_add = instance_name + "." + DOMAIN
# fallback to the public hostname if the m.edx.org
# name doesn't exist
try:
socket.gethostbyname(host_to_add.replace(':22',''))
except socket.error:
if inst.public_dns_name:
host_to_add = inst.public_dns_name
if host_to_add:
hosts.append(host_to_add)
return hosts
def exemplar(**tags):
"""
Return the hostname of one host from the specified set
of tags, or None if there is no such host
"""
hosts = hosts_by_tags(**tags)
if hosts:
return hosts[0]
else:
return None
@task(alias='exemplar')
def exemplar_from_tags(**tags):
env.hosts.append(exemplar(**tags))
@task(aliases=['tag', 'tags'])
def by_tags(**tags):
"""
Add all running hosts that match the tag names provided
as keyword arguments.
"""
env.hosts.extend(hosts_by_tags(**tags))
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
@task(aliases=['fleet'])
def fleet():
"""
Return a list of all hosts available and running via the default AWS
credentials.
Your ability to operate on these hosts will depend upon the ssh credentials
that you are using to drive fab. There is likely to be a mismatch between
what hosts you can see via IAM managed AWS credentials and which hosts
you can actually connect to even if you are using highly privileged
AWS pems.
"""
hosts = _fleet()
env.hosts.extend(hosts)
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
import os
import socket
import time
from output import notify
from safety import noopable
from fabric.api import task, run, env, settings, sudo, abort
from fabric.api import runs_once, execute, serial, hide
MAX_SLEEP_TIME = 10
LOCK_FILE = '/opt/deploy/.lock'
@task
@runs_once
def wait_for_all_locks():
execute('locks.wait_for_lock', hosts=sorted(env.hosts))
@task
@runs_once
def remove_all_locks():
execute('locks.remove_lock', hosts=sorted(env.hosts, reverse=True))
@task
@serial
def remove_lock():
noopable(sudo)("test ! -f {0} || rm {0}".format(LOCK_FILE))
@task
@serial
def wait_for_lock():
if hasattr(env, 'deploy_user'):
lock_user = env.deploy_user
else:
lock_user = env.user
LOCK_ID = 'u:{user} h:{host} pid:{pid}'.format(user=lock_user,
host=socket.gethostname(),
pid=str(os.getpid()))
sleep_time = 0.1
timeout = 120
start_time = time.time()
with settings(warn_only=True):
while True:
wait_time = time.time() - start_time
# break if the lockfile is removed or if it belongs to this pid
# if it exists lock_status will have the file's contents
with hide('running', 'stdout', 'stderr', 'warnings'):
lock_status = run("test ! -f {lfile} || "
"(cat {lfile} && "
'grep -q "{lid}" {lfile})'.format(
lfile=LOCK_FILE,
lid=LOCK_ID))
if lock_status.succeeded:
noopable(sudo)('echo "{0}" > {1}'.format(
LOCK_ID, LOCK_FILE))
notify("Took lock")
break
elif wait_time >= timeout:
abort("Timeout expired, giving up")
lock_create_time = run("stat -c %Y {0}".format(LOCK_FILE))
delta = time.time() - float(lock_create_time)
(dhour, dsec) = divmod(delta, 3600)
notify("""
!! Deploy lockfile already exists ({lockfile}) !!
Waiting: {wait}s
Lockfile info: [ {owner} ]
Lock created: {dhour}h{dmin}m ago
""".format(
lockfile=LOCK_FILE,
wait=int(timeout - wait_time),
owner=lock_status,
dhour=int(dhour),
dmin=int(dsec / 60),
))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
from fabric.api import task, parallel, put, sudo
from safety import noopable
from .modifiers import rolling
from StringIO import StringIO
import json
__all__ = ['on', 'off','maintain_service','unmaintain_service']
services = ['lms','cms','lms-xml','lms-preview']
def set_maintenance(value):
noopable(put)(StringIO(json.dumps({'maintenance': value})), '/etc/facter/facts.d/mitx_maintenance.json', use_sudo=True)
@task
@parallel
def on():
"""
Enable maintenance mode
"""
set_maintenance(True)
puppet.checkin('maintenance')
@task
@parallel
def off():
"""
Disable maintenance mode
"""
set_maintenance(False)
puppet.checkin('maintenance')
@task
@rolling
def maintain_service(service):
"""
Puts a specified edxapp service into maintenance mode by replacing
its nginx sites-enabled link with a link to the maintenance vhost.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}-maintenance"
" /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("service nginx reload")
@task
@rolling
def unmaintain_service(service):
"""
Removes a specified edxapp service from maintenance mode by replacing
the appropriate link in /etc/nginx/sites-enabled.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}"
" /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("service nginx reload")
import boto
from .ec2 import instance_id
def instance_tags_for_current_host():
"""
Returns the datadog style tags for the active host
"""
return instance_tags([instance_id()])
def instance_tags(instance_ids):
"""
Returns datadog style tags for the specified instances
"""
ec2 = boto.connect_ec2()
tags = set()
for res in ec2.get_all_instances(instance_ids):
for instance in res.instances:
ec2_tags = instance.tags
tags.add('instance_id:' + instance.id)
if 'group' in ec2_tags:
tags.add('fab-group:' + ec2_tags['group'])
if 'environment' in ec2_tags:
tags.add('fab-environment:' + ec2_tags['environment'])
if 'variant' in ec2_tags:
tags.add('fab-variant:' + ec2_tags['variant'])
return list(tags)
from fabric.api import task, sudo, runs_once, prefix, hide, abort
from fabric.contrib import console
from fabric.colors import white, green
from .safety import noopable
@task()
@runs_once
def migrate_check(auto_migrate=False):
"""
Checks to see whether migrations need to be run,
if they do it will prompt to run them before
continuing.
looks for " - Migrating" in the output of
the dry run
"""
migration_cmd = "/opt/edx/bin/django-admin.py migrate --noinput " \
"--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform"
with prefix("export SERVICE_VARIANT=lms"):
with hide('running', 'stdout', 'stderr', 'warnings'):
dryrun_out = sudo(migration_cmd + " --db-dry-run", user="www-data")
migrate = False
for chunk in dryrun_out.split('Running migrations for '):
if 'Migrating' in chunk:
print "!!! Found Migration !!!\n" + chunk
migrate = True
if migrate:
if auto_migrate or console.confirm(
green(migration_cmd) + white('\n') +
white('Run migrations? ', bold=True), default=True):
noopable(sudo)(migration_cmd, user='www-data')
import boto
import time
from collections import namedtuple
from fabric.api import task, execute, serial
from functools import wraps, partial
from safety import noopable
from output import notify
from dogapi import dog_stats_api
from .metrics import instance_tags
from .ec2 import instance_id
MAX_SLEEP_TIME = 1
LockedElb = namedtuple('LockedElb', 'name elb lock')
def await_elb_instance_state(lb, instance_id, awaited_state):
sleep_time = 0.1
start_time = time.time()
while True:
state = lb.get_instance_health([instance_id])[0].state
if state == awaited_state:
notify("Load Balancer {lb} is in awaited state {awaited_state}, proceeding.".format(
lb=lb.dns_name,
awaited_state=awaited_state
))
break
else:
notify("Checking again in {0} seconds. Elapsed time: {1}".format(sleep_time, time.time() - start_time))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
def rolling(func):
@task
@serial
@wraps(func)
def wrapper(*args, **kwargs):
elb = boto.connect_elb()
elbs = elb.get_all_load_balancers()
execute('locks.wait_for_all_locks')
inst_id = instance_id()
tags = ['task:' + func.__name__] + instance_tags(inst_id)
active_lbs = sorted(
lb
for lb in elbs
if inst_id in [info.id for info in lb.instances]
)
timer = partial(dog_stats_api.timer, tags=tags)
# Remove this node from the LB
for lb in active_lbs:
notify("Removing {id} from {lb}".format(id=inst_id, lb=lb))
with timer('rolling.deregister_instance'):
noopable(lb.deregister_instances)([inst_id])
noopable(await_elb_instance_state)(lb, inst_id, "OutOfService")
# Execute the operation
func(*args, **kwargs)
# Add this node back to the LBs
for lb in active_lbs:
notify("Adding {id} to {lb}".format(id=inst_id, lb=lb))
with timer('rolling.register_instance'):
noopable(lb.register_instances)([inst_id])
with timer('rolling.wait_for_start'):
# Wait for the node to come online in the LBs
for lb in active_lbs:
noopable(await_elb_instance_state)(lb, inst_id, "InService")
return wrapper
import sys
from contextlib import contextmanager
from fabric.api import puts
class SquelchingStream(object):
def __init__(self, stream):
self.__dict__['stream'] = stream
self.__dict__['squelched'] = False
self.__dict__['needs_line_ending'] = False
def write(self, string):
if self.squelched:
self.stream.write('.')
self.stream.flush()
self.needs_line_ending = True
else:
if self.needs_line_ending:
self.needs_line_ending = False
self.stream.write('\n')
self.stream.write(string)
def __getattr__(self, attr):
return getattr(self.stream, attr)
def __setattr__(self, attr, val):
if attr in self.__dict__:
return object.__setattr__(self, attr, val)
return setattr(self.stream, attr, val)
sys.stdout = SquelchingStream(sys.stdout)
sys.stderr = SquelchingStream(sys.stderr)
def squelch():
sys.stdout.squelched = sys.stderr.squelched = True
def unsquelch():
sys.stdout.squelched = sys.stderr.squelched = False
@contextmanager
def unsquelched(stream=sys.stdout):
old_state = stream.squelched
stream.squelched = False
yield
stream.squelched = old_state
def notify(msg, show_prefix=None, end='\n', flush=False):
with unsquelched():
puts(msg, show_prefix, end, flush)
import os
from fabric.api import run, settings, hide, sudo
from collections import defaultdict
import yaml
import re
MIN_REVISION_LENGTH = 7
class PackageInfo:
def __init__(self):
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
# exhaustive list of MITx repos
self.repo_dirs = package_data['repo_dirs']
self.cmd_list = {
'pre': package_data['pre_checkout_regex'],
'post': package_data['post_checkout_regex']}
self.service_repos = package_data['service_repos']
def repo_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[1]
def org_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[0]
def pre_post_actions(self, pkgs):
"""
Returns a dictionary containing a list of
commands that need to be executed
pre and post checkout for one or more package names.
return({
'pre': [ 'cmd1', 'cmd2', ... ],
'post': [ 'cmd1', 'cmd2', ... ]
})
"""
cmds = defaultdict(list)
for stage in ['pre', 'post']:
for regex, cmd_templates in self.cmd_list[stage]:
for pkg in pkgs:
match = re.match(regex, pkg)
if match is None:
continue
cmds[stage].extend(
cmd.format(*match.groups(), **match.groupdict())
for cmd in cmd_templates
if cmd not in cmds[stage]
)
return(cmds)
def installed_packages(self):
"""
Returns the list of PackageDescriptors for the packages
installed on the system.
This is determined by looking at every package directory
we know about and checking its revision.
"""
with settings(hide('running'), warn_only=True):
revisions = sudo(
"""
for path in {0}; do
if [[ -d "$path/.git" ]]; then
echo $path $(cd $path && git rev-parse HEAD 2>/dev/null)
fi
done
""".format(' '.join(self.repo_dirs))).split('\n')
packages = [revline.strip().split(' ') for revline in revisions
if ' ' in revline.strip()]
return [PackageDescriptor(os.path.basename(path), revision)
for path, revision in packages]
class PackageDescriptor(object):
def __init__(self, name, revision):
if revision != 'absent' and len(revision) < MIN_REVISION_LENGTH:
raise Exception("Must use at least {0} characters "
"in revision to pseudo-guarantee uniqueness".format(
MIN_REVISION_LENGTH))
self.name = name
# Find the repo_root by name
# This assumes that basename(repo_root) is unique
# for all repo_roots. If this is not true an exception
# will be raised
pkg_info = PackageInfo()
repo_roots = []
for repo_dir in pkg_info.repo_dirs.keys():
if os.path.basename(repo_dir) == name:
repo_roots.append(repo_dir)
if len(repo_roots) != 1:
raise Exception("Unable to look up directory for repo")
self.repo_root = repo_roots[0]
self.repo_name = pkg_info.repo_dirs[self.repo_root].split('/')[1]
self.repo_org = pkg_info.repo_dirs[self.repo_root].split('/')[0]
self.revision = revision
from fabric.api import env
from output import notify
def noopable(fun):
if env.noop:
def noop(*args, **kwargs):
notify("Would have called: {fun}({args}, {kwargs})".format(
fun=fun.__name__,
args=", ".join(repr(a) for a in args),
kwargs=", ".join("=".join([key, repr(val)]) for key, val in kwargs.items()),
))
return noop
else:
return fun
from fabric.api import task, env, abort
from fabric.colors import red
import os
import re
@task(default=True)
def ssh(user=None):
if user is None:
user = env.user
if len(env.hosts) != 1:
abort(red('Please specify one host for ssh'))
for host in env.hosts:
host = re.sub(':(\d+)', r' -p\1 ', host)
os.system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -l {0} {1}'.format(user, host))
from fabric.api import abort, env, fastprint
from fabric.colors import green, red, white
import subprocess
import shlex
import atexit
import time
import boto
import re
import socket
DOMAIN = 'm.edx.org:22'
class SSHTunnel:
port = 9000 # default starting port
tunnels = {}
def __init__(self, host, phost, user, lport=None):
if lport is not None:
SSHTunnel.port = lport
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect(('localhost', SSHTunnel.port))
s.shutdown(2)
# connection was successful so try a new port
SSHTunnel.port += 1
except:
self.lport = SSHTunnel.port
break
phost = re.sub(':(\d+)', r' -p\1 ', phost)
identities = ''
if env.key_filename:
# could be a list or a string
if isinstance(env.key_filename, basestring):
lst = [env.key_filename]
else:
lst = env.key_filename
identities = ' '.join('-i {f} '.format(f=f) for f in lst)
cmd = 'ssh -o UserKnownHostsFile=/dev/null ' \
'{ids}' \
'-o StrictHostKeyChecking=no -vAN -L {lport}:{host} ' \
'{user}@{phost}'.format(ids=identities, lport=self.lport,
host=host, user=user, phost=phost)
self.p = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
atexit.register(self.p.kill)
while not 'Entering interactive session' in self.p.stderr.readline():
if time.time() > start_time + 10:
abort(red("Unable to create ssh tunnel - `{0}`".format(cmd)))
def local(self):
return 'localhost:{lport}'.format(lport=self.lport)
def setup_tunnel(all_hosts, check_tag=True,
proxy_name=None, user=None, lport=None):
"""
Given a all_hosts it will check to see whether
any are proxy hosts if check_tag is True
returns a modified list
of hosts with localhost:port for tunneled hosts.
"""
if user is None:
user = env.user
ec2 = boto.connect_ec2()
# the proxy hosts
proxies = {}
if check_tag:
for res in ec2.get_all_instances(filters={'tag-key': 'proxy'}):
for inst in res.instances:
host = ".".join([inst.tags['Name'], DOMAIN])
proxy = ".".join([inst.tags['proxy'], DOMAIN])
proxies.update({host: proxy})
else:
if not proxy_name:
raise Exception("Must specify a proxy_host")
proxies = {host: proxy_name for host in all_hosts}
# local tunneling ip:port
tunnels = {}
for host in all_hosts:
if host in proxies and host not in SSHTunnel.tunnels:
t = SSHTunnel(host=host, phost=proxies[host],
user=user, lport=lport)
tunnels[host] = t.local()
fastprint(green('created {0} for {1} via {2}'.format(tunnels[host],
host, proxies[host])) + white('\n'))
SSHTunnel.tunnels.update(tunnels)
return([SSHTunnel.tunnels[host] if host in SSHTunnel.tunnels else host
for host in all_hosts])
from fabric.api import task, sudo, abort, parallel, runs_once, execute
from fabric.api import settings, hide
from fabric.operations import put
from fabric.utils import fastprint
from safety import noopable
from fabric.colors import blue, red
from fabric.contrib import console
from output import unsquelched
from timestamps import no_ts
from choose import multi_choose_with_input
import json
import tempfile
status_file = '/opt/wwc/status_message.json'
@task(default=True)
@runs_once
def status():
"""
Drops {0} which is a json formatted file that contains a
status message that will be displayed to all users on the
on the courseware for a single course or for all courses
if 'global' is set.
Message(s) are entered or removed interactively on the console.
Example usage:
$ fab groups:prod_edx status
""".format(status_file)
with hide('running', 'stdout', 'stderr', 'warnings'):
env_json = sudo("cat /opt/wwc/lms-xml.env.json")
course_listings = json.loads(env_json)['COURSE_LISTINGS']
course_ids = [course_id for course_list in course_listings.itervalues()
for course_id in course_list]
course_ids = ['global'] + course_ids
with no_ts():
course_status = None
with settings(warn_only=True):
cur_status = noopable(sudo)('cat {0}'.format(status_file))
try:
course_status = json.loads(cur_status)
# add empty entries for courses not in the list
empty_entries = set(course_ids) - set(course_status.keys())
course_status.update({entry: '' for entry in list(empty_entries)})
except ValueError:
fastprint(red("Not a valid json file, overwritting\n"))
if course_status is None:
course_status = {course: '' for course in course_ids}
new_status = multi_choose_with_input(
'Set the status message, blank to disable:',
course_status)
if new_status is not None:
# remove empty entries
new_status = {entry: new_status[entry]
for entry in new_status if len(new_status[entry]) > 1}
with unsquelched():
if not console.confirm(
'Setting new status message:\n{0}'.format(
blue(str(new_status), bold=True)),
default=False):
abort('Operation cancelled by user')
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(json.dumps(new_status))
f.flush()
execute(update_status, f.name)
else:
abort('Operation cancelled by user')
@task
@runs_once
def remove():
"""
Removes {0}, a status banner that is displayed to all
users on the front page.
""".format(status_file)
with unsquelched():
if not console.confirm(
blue('Remove /opt/wwc/status_message.html?', bold=True)):
abort('Operation cancelled by user')
execute(remove_status)
@task
@parallel
def remove_status():
noopable(sudo)('rm -f {0}'.format(status_file))
@task
@parallel
def update_status(fjson):
print status_file
noopable(put)(fjson, status_file, use_sudo=True)
from datetime import datetime
from contextlib import contextmanager
import sys
@contextmanager
def no_ts():
sys.stdout.ts = False
yield
sys.stdout.ts = True
class TSWrapper(object):
def __init__(self, stream):
self.o = stream
self.files = []
self.files.append(self.o)
self.newline = True
self.ts = True
def write(self, s):
d = datetime.now()
if self.ts:
buf = ""
lines = s.splitlines(True)
for line in lines:
if self.newline:
buf += d.strftime('[ %Y%m%d %H:%M:%S ] : {0}'.format(line))
else:
buf += str(line)
if line[-1] == '\n':
self.newline = True
else:
self.newline = False
else:
buf = s
for fh in self.files:
fh.write(buf)
fh.flush()
def log_to_file(self, fn):
fp = open(fn, 'a')
self.files.append(fp)
def __getattr__(self, attr):
return getattr(self.o, attr)
......@@ -5,5 +5,5 @@
[defaults]
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
roles_path=../../../ansible-roles
../ansible.cfg
\ No newline at end of file
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
host_key_checking=False
roles_path=../../../ansible-roles
......@@ -2,7 +2,14 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- certs
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
......@@ -2,7 +2,14 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
......@@ -16,3 +16,5 @@
- ora
- xqueue
- xserver
nginx_default_sites:
- lms
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@gh_users.yml" -e "@/path/to/secure/ansible/vars/hotg.yml" -e "@/path/to/configuration-secure/ansible/vars/common/common.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_role_bastion
sudo: True
gather_facts: False
roles:
- gh_users
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_role_admin
sudo: True
gather_facts: True
roles:
- common
- gh_users
- jenkins_master
- hotg
......@@ -2,7 +2,17 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- discern
- discern
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- name: Deploy ansible
- name: Deploy the edx_ansible role
hosts: all
sudo: True
gather_facts: True
gather_facts: False
roles:
- edx_ansible
......@@ -14,13 +14,15 @@
- ora
- xqueue
- xserver
nginx_default_sites:
- lms
- edxlocal
- mongo
- { role: 'edxapp', celery_worker: True }
- edxapp
- role: demo
tags: ['demo']
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk
- elasticsearch
- forum
......@@ -29,3 +31,4 @@
- ora
- discern
- certs
- edx_ansible
......@@ -20,6 +20,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
......@@ -38,6 +40,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
......
......@@ -12,6 +12,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_version: 'release'
......@@ -29,6 +31,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
......
......@@ -22,6 +22,8 @@
- xqueue
- xserver
- ora
nginx_default_sites:
- lms
- edxlocal
- mongo
- edxapp
......
......@@ -2,18 +2,37 @@
hosts: localhost
connection: local
gather_facts: False
pre_tasks:
vars:
keypair: continuous-integration
instance_type: m1.medium
security_group: sandbox
# ubuntu 12.04
ami: ami-d0f89fb9
region: us-east-1
zone: us-east-1b
instance_tags:
environment: sandbox
github_username: temp
Name: sandbox-temp
source: provisioning-script
owner: temp
root_ebs_size: 50
dns_name: temp
dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp
elb: false
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}"
security_group: "{{ security_group }}"
ami_image: "{{ ami }}"
ami: "{{ ami }}"
region: "{{ region }}"
instance_tags: "{{ instance_tags }}"
root_ebs_size: "{{ root_ebs_size }}"
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
terminate_instance: true
instance_profile_name: sandbox
......@@ -21,6 +40,8 @@
hosts: launched
sudo: True
gather_facts: False
vars:
elb: false
pre_tasks:
- name: Wait for cloud-init to finish
wait_for: >
......@@ -32,6 +53,7 @@
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles:
# rerun common to set the hostname, nginx to set basic auth
- common
......@@ -42,6 +64,9 @@
- ora
- xqueue
- xserver
- forum
nginx_default_sites:
- lms
# gh_users hash must be passed
# in as a -e variable
- gh_users
......@@ -57,7 +82,7 @@
sudo: False
- name: register instance into an elb if one was provided
local_action:
module: ec2_elb
module: ec2_elb_local_1.3
region: "{{ region }}"
instance_id: "{{ ec2_info.instance_ids[0] }}"
state: present
......
......@@ -30,6 +30,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
migrate_db: '{{ RUN_EDXAPP_MIGRATION }}'
......@@ -49,6 +51,8 @@
- lms
- cms
- lms-preview
nginx_default_site:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
- splunkforwarder
......@@ -65,6 +69,8 @@
- lms
- cms
- lms-preview
nginx_default_site:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
......
......@@ -2,7 +2,20 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- lms
- cms
nginx_default_sites:
- lms
- edxapp
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
......@@ -2,7 +2,17 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- forum
- forum
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
---
# Deploys gerrit on to a server.
#
# Usage:
# ansible-playbook gerrit_deploy.yml -i gerrit_inventory.ini -e "secure_dir=/path/to/secure/dir"
- name: Install and configure gerrit
hosts: gerrit
sudo: True
gather_facts: True
vars_files:
- "{{ secure_dir }}/vars/gerrit.yml"
pre_tasks:
- name: update apt
apt: update_cache=yes
roles:
- gerrit
......@@ -5,4 +5,7 @@
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- ora
- ora
- name: Stop all services
hosts: all
sudo: True
gather_facts: False
roles:
- stop_all_edx_services
......@@ -2,8 +2,15 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: edxapp
celery_worker: True
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
......@@ -2,7 +2,17 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
......@@ -2,7 +2,17 @@
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
- xserver
- role: xserver
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
......@@ -39,6 +39,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'}
# run this role last
# - in_production
......@@ -21,6 +21,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- edxapp
- ruby
post_tasks:
......
......@@ -32,6 +32,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- edxapp
- apache
- shibboleth
......
......@@ -24,6 +24,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- edxapp
- apache
- shibboleth
......
......@@ -19,6 +19,8 @@
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- edxapp
- ansible_debug
#- apache
......
......@@ -18,14 +18,17 @@
nginx_sites:
- cms
- lms
- forum
- ora
- xqueue
nginx_default_sites:
- lms
- edxlocal
- mongo
- { role: 'edxapp', celery_worker: True }
- edxapp
- demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk
- elasticsearch
- forum
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELB(s)
description:
- This module de-registers or registers an AWS EC2 instance from the ELB(s)
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
requirements: [ "boto" ]
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
def2ault: None
aliases: ['ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key' ]
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
import boto
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
aws_access_key=None, aws_secret_key=None, region=None):
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.module = module
self.instance_id = instance_id
self.region = region
self.lbs = self._get_instance_lbs(ec2_elbs)
# if there are no ELBs to operate on
# there will be no changes made
if len(self.lbs) > 0:
self.changed = True
else:
self.changed = False
def deregister(self, wait):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
lb.deregister_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'OutOfService')
def register(self, wait):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService')
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _await_elb_instance_state(self, lb, awaited_state):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
while True:
state = lb.get_instance_health([self.instance_id])[0].state
if state == awaited_state:
break
else:
time.sleep(1)
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def main():
module = AnsibleModule(
argument_spec=dict(
state={'required': True,
'choices': ['present', 'absent']},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
aws_secret_key={'default': None, 'aliases': ['ec2_secret_key', 'secret_key'], 'no_log': True},
aws_access_key={'default': None, 'aliases': ['ec2_access_key', 'access_key']},
region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS},
wait={'required': False, 'choices': BOOLEANS, 'default': True}
)
)
aws_secret_key = module.params['aws_secret_key']
aws_access_key = module.params['aws_access_key']
ec2_elbs = module.params['ec2_elbs']
region = module.params['region']
wait = module.params['wait']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
if not region:
module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set."))
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key,
aws_secret_key, region=region)
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait)
elif module.params['state'] == 'absent':
elb_man.deregister(wait)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
......@@ -121,7 +121,7 @@ options:
required: False
default: 1
aliases: []
monitor:
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
......@@ -185,7 +185,7 @@ options:
default: 'present'
aliases: []
root_ebs_size:
version_added: "1.4"
version_added: "1.5"
desription:
- size of the root volume in gigabytes
required: false
......@@ -193,7 +193,7 @@ options:
aliases: []
requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade, John Jarvis
author: Seth Vidal, Tim Gerla, Lester Wade
'''
EXAMPLES = '''
......@@ -210,17 +210,6 @@ EXAMPLES = '''
group: webserver
count: 3
# Basic provisioning example with setting the root volume size to 50GB
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
root_ebs_size: 50
# Advanced example with tagging and CloudWatch
- local_action:
module: ec2
......@@ -231,7 +220,8 @@ EXAMPLES = '''
wait: yes
wait_timeout: 500
count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes'
instance_tags: '{"db":"postgres"}'
monitoring=yes
# Multiple groups example
local_action:
......@@ -243,7 +233,8 @@ local_action:
wait: yes
wait_timeout: 500
count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes'
instance_tags: '{"db":"postgres"}'
monitoring=yes
# VPC example
- local_action:
......@@ -406,6 +397,7 @@ def create_instances(module, ec2):
else:
bdm = None
# group_id and group_name are exclusive of each other
if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
......@@ -416,9 +408,7 @@ def create_instances(module, ec2):
if group_name:
grp_details = ec2.get_all_security_groups()
if type(group_name) == list:
# FIXME: this should be a nice list comprehension
# also not py 2.4 compliant
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
elif type(group_name) == str:
for grp in grp_details:
if str(group_name) in str(grp):
......@@ -501,7 +491,7 @@ def create_instances(module, ec2):
if instance_tags:
try:
ec2.create_tags(instids, module.from_json(instance_tags))
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
......@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids):
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
......@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids):
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, terminated_instance_ids)
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = len(response.pop().instances)
except Exception, e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
return (changed, instance_dict_array, terminated_instance_ids)
def main():
......@@ -593,16 +609,16 @@ def main():
image = dict(),
kernel = dict(),
count = dict(default='1'),
monitoring = dict(choices=BOOLEANS, default=False),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(choices=BOOLEANS, default=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
ec2_url = dict(),
aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key = dict(aliases=['ec2_access_key', 'access_key']),
ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(),
instance_tags = dict(type='dict'),
vpc_subnet_id = dict(),
private_ip = dict(),
instance_profile_name = dict(),
......@@ -612,33 +628,9 @@ def main():
)
)
ec2_url = module.params.get('ec2_url')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
# allow eucarc environment variables to be used if ansible vars aren't set
if not ec2_url and 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
# def get_ec2_creds(module):
# return ec2_url, ec2_access_key, ec2_secret_key, region
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
# If we have a region specified, connect to its endpoint.
if region:
......@@ -672,8 +664,8 @@ def main():
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
......@@ -15,10 +15,8 @@
#
#
- name: analytics-server | stop the analytics service
- name: stop the analytics service
service: name=analytics state=stopped
tags: deploy
- name: analytics-server | start the analytics service
- name: start the analytics service
service: name=analytics state=started
tags: deploy
#
# TODO: Needed while this repo is private
#
- name: analytics-server | upload ssh script
template:
- name: upload ssh script
template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | install read-only ssh key required for checkout
copy:
- name: install read-only ssh key required for checkout
copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
tags:
- analytics-server
- deploy
- install
- update
- name: analytics-server | checkout code
git:
dest={{ as_code_dir }} repo={{ as_source_repo }}
- name: checkout code
git:
dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true
environment:
GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service
notify: analytics-server | start the analytics service
notify: restart the analytics service
notify: start the analytics service
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | update src permissions
file:
path={{ as_code_dir }} state=directory owner={{ as_user }}
- name: update src permissions
file:
path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove read-only ssh key for the content repo
- name: remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove ssh script
- name: remove ssh script
file: path={{ as_git_ssh }} state=absent
tags:
- analytics-server
- deploy
- install
- update
- name: analytics-server | install application requirements
pip:
- name: install application requirements
pip:
requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present
sudo: true
sudo: true
sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service
notify: start the analytics service
tags:
- analytics-server
- deploy
- install
- update
......@@ -11,7 +11,7 @@
#
#
# Tasks for role analytics-server
#
#
# Overview:
#
# Installs the edX analytics-server Django application which provides
......@@ -22,7 +22,7 @@
# common role
#
# Depends upon the automated role
#
#
# Example play:
#
# - name: Configure analytics instance(s)
......@@ -37,79 +37,79 @@
# - common
# - analytics-server
#
- name: analytics-server | install system packages
- name: install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags:
- analytics-server
- install
- update
- name: analytics-server | create analytics-server user {{ as_user }}
user:
name={{ as_user }} state=present shell=/bin/bash
- name: create analytics-server user {{ as_user }}
user:
name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes
tags:
- analytics-server
- install
- update
- name: analytics-server | setup the analytics-server env
template:
- name: setup the analytics-server env
template:
src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }}
dest={{ as_home }}/{{ as_env }}
owner="{{ as_user }}" group="{{ as_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | drop a bash_profile
- name: drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile
owner={{ as_user }}
src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile
owner={{ as_user }}
group={{ as_user }}
# Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists
#- name: ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch
# sudo: true
# sudo: true
# sudo_user: "{{ as_user }}"
# tags:
# - analytics-server
# - install
# - update
- name: analytics-server | ensure .bashrc exists
- name: ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc
sudo: true
sudo: true
sudo_user: "{{ as_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | add source of analytics-server_env to .bashrc
- name: add source of analytics-server_env to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env'
regexp='. {{ as_home }}/analytics-server_env'
line='. {{ as_home }}/analytics_server_env'
tags:
- analytics-server
- install
- update
- name: analytics-server | add source venv to .bashrc
- name: add source venv to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate'
regexp='. {{ as_venv_dir }}/bin/activate'
line='. {{ as_venv_dir }}/bin/activate'
tags:
- analytics-server
- install
- update
- name: analytics-server | install global python requirements
- name: install global python requirements
pip: name={{ item }}
with_items: as_pip_pkgs
tags:
......@@ -117,8 +117,8 @@
- install
- update
- name: analytics-server | create config
template:
- name: create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
mode=0600
......@@ -127,10 +127,10 @@
- analytics-server
- install
- update
- name: analytics-server | install service
- name: install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
- include: deploy.yml
- include: deploy.yml tags=deploy
......@@ -15,10 +15,8 @@
#
#
- name: analytics | stop the analytics service
- name: stop the analytics service
service: name=analytics state=stopped
tags: deploy
- name: analytics | start the analytics service
- name: start the analytics service
service: name=analytics state=started
tags: deploy
#
# TODO: Needed while this repo is private
#
- name: analytics | upload ssh script
template:
- name: upload ssh script
template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | install read-only ssh key required for checkout
copy:
- name: install read-only ssh key required for checkout
copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
tags:
- analytics
- deploy
- install
- update
- name: analytics | checkout code
git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
- name: checkout code
git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true
environment:
GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service
notify: analytics | start the analytics service
notify: restart the analytics service
notify: start the analytics service
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | update src permissions
file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
- name: update src permissions
file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | remove read-only ssh key for the content repo
- name: remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | remove ssh script
- name: remove ssh script
file: path={{ analytics_git_ssh }} state=absent
tags:
- analytics
- deploy
- install
- update
- name: analytics | install application requirements
pip:
- name: install application requirements
pip:
requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present
sudo: true
sudo: true
sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service
notify: start the analytics service
tags:
- analytics
- deploy
- install
- update
......@@ -11,7 +11,7 @@
#
#
# Tasks for role analytics
#
#
# Overview:
#
# Installs the edX analytics Django application which provides
......@@ -22,7 +22,7 @@
# common role
#
# Depends upon the automated role
#
#
# Example play:
#
# - name: Configure analytics instance(s)
......@@ -37,79 +37,79 @@
# - common
# - analytics
#
- name: analytics | install system packages
- name: install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags:
- analytics
- install
- update
- name: analytics | create analytics user {{ analytics_user }}
user:
name={{ analytics_user }} state=present shell=/bin/bash
- name: create analytics user {{ analytics_user }}
user:
name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes
tags:
- analytics
- install
- update
- name: analytics | setup the analytics env
template:
- name: setup the analytics env
template:
src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }}
dest={{ analytics_home }}/{{ analytics_env }}
owner="{{ analytics_user }}" group="{{ analytics_user }}"
tags:
- analytics
- install
- update
- name: analytics | drop a bash_profile
- name: drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile
owner={{ analytics_user }}
src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile
owner={{ analytics_user }}
group={{ analytics_user }}
# Awaiting next ansible release.
#- name: analytics | ensure .bashrc exists
#- name: ensure .bashrc exists
# file: path={{ analytics_home }}/.bashrc state=touch
# sudo: true
# sudo: true
# sudo_user: "{{ analytics_user }}"
# tags:
# - analytics
# - install
# - update
- name: analytics | ensure .bashrc exists
- name: ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc
sudo: true
sudo: true
sudo_user: "{{ analytics_user }}"
tags:
- analytics
- install
- update
- name: analytics | add source of analytics_env to .bashrc
- name: add source of analytics_env to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env'
regexp='. {{ analytics_home }}/analytics_env'
line='. {{ analytics_home }}/analytics_env'
tags:
- analytics
- install
- update
- name: analytics | add source venv to .bashrc
- name: add source venv to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate'
regexp='. {{ analytics_venv_dir }}/bin/activate'
line='. {{ analytics_venv_dir }}/bin/activate'
tags:
- analytics
- install
- update
- name: analytics | install global python requirements
- name: install global python requirements
pip: name={{ item }}
with_items: analytics_pip_pkgs
tags:
......@@ -117,8 +117,8 @@
- install
- update
- name: analytics | create config
template:
- name: create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
mode=0600
......@@ -127,10 +127,10 @@
- analytics
- install
- update
- name: analytics | install service
- name: install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
- include: deploy.yml
- include: deploy.yml tags=deploy
---
- name: ansible-role | check if the role exists
- name: check if the role exists
command: test -d roles/{{ role_name }}
register: role_exists
ignore_errors: yes
- name: ansible-role | prompt for overwrite
- name: prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success
- name: ansible-role | create role directories
- name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory
with_items:
- tasks
......@@ -19,7 +19,7 @@
- templates
- files
- name: ansible-role | make an ansible role
- name: make an ansible role
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items:
- tasks
......
......@@ -7,5 +7,5 @@
# Overview:
#
#
- name: {{ role_name }} | notify me
- name: notify me
debug: msg="stub handler"
......@@ -14,6 +14,6 @@
#
#
- name: {{ role_name }} | stub ansible task
- name: stub ansible task
debug: msg="This is a stub task created by the ansible-role role"
notify: {{ role_name }} | notify me
\ No newline at end of file
notify: notify me
---
- name: apache | restart apache
- name: restart apache
service: name=apache2 state=restarted
tags: deploy
# Requires nginx package
---
- name: apache | Copying apache config {{ site_name }}
- name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2"
notify: apache | restart apache
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
- name: apache | Creating apache2 config link {{ site_name }}
- name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: apache | restart apache
notify: restart apache
when: apache_role_run is defined
tags:
- apache
......
#Installs apache and runs the lms wsgi
---
- name: apache | Installs apache and mod_wsgi from apt
- name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items:
- apache2
- libapache2-mod-wsgi
notify: apache | restart apache
notify: restart apache
tags:
- apache
- install
- name: apache | disables default site
- name: disables default site
command: a2dissite 000-default
notify: apache | restart apache
notify: restart apache
tags:
- apache
- install
- name: apache | rewrite apache ports conf
- name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: apache | restart apache
notify: restart apache
tags:
- apache
- install
- name: apache | Register the fact that apache role has run
- name: Register the fact that apache role has run
command: echo True
register: apache_role_run
tags:
......
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6noLNy7YVFNK6OSOFgPbnGGovgZqLsvJxjhs82jT7tZIsYOjVVCAMk0kkSnBt0etDjGSJlJ664r1aBhubZrujzxns0oOzA7J+tWQ3CiaOBLtOSffeh8a3dTWWNPCAGg9KflPaufXdd31Bf96g9ACGZR7uLYgWUP/J0jOPMCPE1RBfRNFeZ7cHlh3t/pI+JzTcyZTka4AAEsCejBKHngYxVoOk+gfxe+Qo703st0MFuoxVAMymeBGi/1lCwKsV6r9BijzuvIFyQCl2vThjoF32yHmmP8by//hmgpo5UNqG7jbmSrCJhkdh+My3SgEebn5c2QLJepOrUfrZFwz1BQ1l task@edx.org
\ No newline at end of file
......@@ -57,135 +57,87 @@
- fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined
- name: automated | create automated user
- name: create automated user
user:
name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes
tags:
- automated
- install
- update
- name: automated | create sudoers file from file
- name: create sudoers file from file
copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_file
tags:
- automated
- install
- update
- name: automated | create sudoers file from template
- name: create sudoers file from template
template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_template
tags:
- automated
- install
- update
#
# Prevent user from updating their PATH and
# environment.
#
- name: automated | update shell file mode
- name: update shell file mode
file:
path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
with_items:
- .bashrc
- .profile
- .bash_logout
- name: automated | change ~automated ownership
- name: change ~automated ownership
file:
path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
#
# This ensures that the links are updated with each run
# and that links that were remove from the role are
# removed.
#
- name: automated | remove ~automated/bin directory
- name: remove ~automated/bin directory
file:
path={{ automated_home }}/bin state=absent
ignore_errors: yes
tags:
- automated
- install
- update
- name: automated | create ~automated/bin directory
- name: create ~automated/bin directory
file:
path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | re-write .profile
- name: re-write .profile
copy:
src=home/automator/.profile
dest={{ automated_home }}/.profile
owner="root"
group={{ automated_user }}
mode="0744"
tags:
- automated
- install
- update
- name: automated | re-write .bashrc
- name: re-write .bashrc
copy:
src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc
owner="root"
group={{ automated_user }}
mode="0744"
tags:
- automated
- install
- update
- name: automated | create .ssh directory
- name: create .ssh directory
file:
path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | copy key to .ssh/authorized_keys
copy:
src=home/automator/.ssh/authorized_keys
- name: build authorized_keys file
template:
src=home/automator/.ssh/authorized_keys.j2
dest={{ automated_home }}/.ssh/authorized_keys mode=0600
owner={{ automated_user }} group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | create allowed command links
- name: create allowed command links
file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link
with_items: automated_rbash_links
tags:
- automated
- install
- update
with_items: automated_rbash_links
\ No newline at end of file
# Install browsers required to run the JavaScript
# and acceptance test suite locally without a display
---
- name: browsers | install system packages
- name: install system packages
apt: pkg={{','.join(browser_deb_pkgs)}}
state=present update_cache=yes
- name: browsers | download browser debian packages from S3
- name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb
with_items: "{{ browser_s3_deb_pkgs }}"
with_items: browser_s3_deb_pkgs
- name: browsers | install browser debian packages
- name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed
with_items: "{{ browser_s3_deb_pkgs }}"
with_items: browser_s3_deb_pkgs
- name: browsers | Install ChromeDriver
- name: Install ChromeDriver
get_url:
url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: browsers | Install ChromeDriver 2
- name: Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp
- name: browsers | Install ChromeDriver 3
- name: Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4
- name: Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755
- name: browsers | create xvfb upstart script
- name: create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: browsers | start xvfb
- name: start xvfb
shell: start xvfb
ignore_errors: yes
......@@ -14,11 +14,10 @@
# Overview:
#
- name: certs | restart certs
- name: restart certs
supervisorctl_local: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
tags: deploy
when: certs_installed is defined
---
- name: certs | create certificate application config
- name: create certificate application config
template: >
src=certs.env.json.j2
dest={{ certs_app_dir }}/env.json
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
tags: deploy
notify: restart certs
- name: certs | create certificate auth file
- name: create certificate auth file
template: >
src=certs.auth.json.j2
dest={{ certs_app_dir }}/auth.json
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
tags: deploy
notify: restart certs
- name: certs | writing supervisor script for certificates
- name: writing supervisor script for certificates
template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644
notify: certs | restart certs
tags: deploy
notify: restart certs
- name: certs | create ssh script for git
- name: create ssh script for git
template: >
src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }}
owner={{ certs_user }} mode=750
notify: certs | restart certs
tags: deploy
notify: restart certs
- name: certs | install read-only ssh key for the certs repo
- name: install read-only ssh key for the certs repo
copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600
notify: certs | restart certs
tags: deploy
notify: restart certs
- name: certs | checkout certificates repo into {{ certs_code_dir }}
- name: checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }}
sudo_user: "{{ certs_user }}"
environment:
GIT_SSH: "{{ certs_git_ssh }}"
notify: certs | restart certs
tags: deploy
notify: restart certs
- name: certs | remove read-only ssh key for the certs repo
- name: remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent
notify: certs | restart certs
tags: deploy
notify: restart certs
- name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
tags: deploy
notify: restart certs
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: certs | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
- name: certs | ensure certs has started
- name: ensure certs has started
supervisorctl_local: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
sudo_user: "{{ supervisor_service_user }}"
- name: create a symlink for venv python
file: >
src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link
with_items:
- python
- pip
- set_fact: certs_installed=true
......@@ -35,56 +35,46 @@
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY
- name: certs | create application user
- name: create application user
user: >
name="{{ certs_user }}"
home="{{ certs_app_dir }}"
createhome=no
shell=/bin/false
notify: certs | restart certs
notify: restart certs
- name: certs | create certs app and data dirs
- name: create certs app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ certs_user }}"
group="{{ common_web_group }}"
notify: certs | restart certs
notify: restart certs
with_items:
- "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}"
- name: certs | create certs gpg dir
- name: create certs gpg dir
file: >
path="{{ certs_gpg_dir }}" state=directory
owner="{{ common_web_user }}"
mode=0700
notify: certs | restart certs
notify: restart certs
- name: certs | copy the private gpg signing key
- name: copy the private gpg signing key
copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
owner={{ common_web_user }} mode=0600
notify: certs | restart certs
notify: restart certs
register: certs_gpg_key
- name: certs | load the gpg key
- name: load the gpg key
shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed
notify: certs | restart certs
notify: restart certs
- include: deploy.yml
- name: certs | create a symlink for venv python
file: >
src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link
notify: certs | restart certs
with_items:
- python
- pip
- include: deploy.yml tags=deploy
......@@ -37,7 +37,7 @@ common_debian_pkgs:
- python2.7-dev
common_pip_pkgs:
- virtualenv
- virtualenv==1.10.1
- virtualenvwrapper
common_web_user: www-data
......@@ -49,3 +49,11 @@ common_git_ppa: "ppa:git-core/ppa"
# Skip supervisor tasks
# Useful when supervisor is not installed (local dev)
devstack: False
common_debian_variants:
- Ubuntu
- Debian
common_redhat_variants:
- CentOS
- Red Hat Enterprise Linux
---
- name: common | restart rsyslogd
- name: restart rsyslogd
service: name=rsyslog state=restarted
sudo: True
tags: deploy
---
- name: common | Add user www-data
- name: Add user www-data
# This is the default user for nginx
user: >
name="{{ common_web_user }}"
shell=/bin/false
- name: common | Create common directories
- name: Create common directories
file: >
path={{ item }} state=directory owner=root
group=root mode=0755
......@@ -16,57 +16,57 @@
- "{{ COMMON_CFG_DIR }}"
# Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl
- name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes
# Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors
- name: common | Add git apt repository
- name: Add git apt repository
apt_repository: repo="{{ common_git_ppa }}"
- name: common | Install role-independent useful system packages
- name: Install role-independent useful system packages
# do this before log dir setup; rsyslog package guarantees syslog user present
apt: >
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes
- name: common | Create common log directory
- name: Create common log directory
file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755
- name: common | upload sudo config for key forwarding as root
- name: upload sudo config for key forwarding as root
copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
validate='visudo -c -f %s' owner=root group=root mode=0440
- name: common | pip install virtualenv
- name: pip install virtualenv
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: common_pip_pkgs
- name: common | Install rsyslog configuration for edX
- name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd
notify: restart rsyslogd
- name: common | Install logrotate configuration for edX
- name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
- name: common | update /etc/hosts
- name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME
register: etc_hosts
- name: common | update /etc/hostname
- name: update /etc/hostname
template: src=hostname.j2 dest=/etc/hostname
when: COMMON_HOSTNAME
register: etc_hostname
- name: common | run hostname
- name: run hostname
shell: >
hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
---
- name: datadog | restart the datadog service
- name: restart the datadog service
service: name=datadog-agent state=restarted
tags: deploy
---
dependencies:
- common
......@@ -2,10 +2,10 @@
#
# datadog
#
#
# Overview:
#
# Installs datadog
#
# Installs datadog
##
# Dependencies:
#
......@@ -15,43 +15,43 @@
# - datadog
#
- name: datadog | install debian needed pkgs
- name: install debian needed pkgs
apt: pkg={{ item }}
with_items: datadog_debian_pkgs
tags:
- datadog
- name: datadog | add apt key
- name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
tags:
- datadog
- name: datadog | install apt repository
- name: install apt repository
apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes
tags:
- datadog
- name: datadog | install datadog agent
- name: install datadog agent
apt: pkg="datadog-agent"
tags:
- datadog
- name: datadog | bootstrap config
- name: bootstrap config
shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf
tags:
- datadog
- name: datadog | update api-key
- name: update api-key
lineinfile: >
dest="/etc/dd-agent/datadog.conf"
dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}"
notify:
- datadog | restart the datadog service
- restart the datadog service
tags:
- datadog
- name: datadog | ensure started and enabled
- name: ensure started and enabled
service: name=datadog-agent state=started enabled=yes
tags:
- datadog
---
- name: demo | check out the demo course
- name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
sudo_user: "{{ edxapp_user }}"
register: demo_checkout
tags: deploy
- name: demo | import demo course
- name: import demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
tags: deploy
- name: demo | create some test users and enroll them in the course
- name: create some test users and enroll them in the course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
with_items: demo_test_users
when: demo_checkout.changed
tags: deploy
- name: demo | create staff user
- name: create staff user
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
tags: deploy
- name: demo | add test users to the certificate whitelist
- name: add test users to the certificate whitelist
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
tags: deploy
- name: demo | seed the forums for the demo course
- name: seed the forums for the demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
tags: deploy
......@@ -30,9 +30,9 @@
# - edxapp
# - demo
- name: demo | create demo app and data dirs
- name: create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}"
- include: deploy.yml
- include: deploy.yml tags=deploy
......@@ -11,11 +11,10 @@
# Defaults for role devpi
#
---
- name: devpi | restart devpi
- name: restart devpi
supervisorctl_local: >
state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }}
config={{ devpi_supervisor_cfg }}
name=devpi-server
sudo_user: "{{ devpi_supervisor_user }}"
tags: deploy
......@@ -30,13 +30,13 @@
# - devpi
---
- name: devpi | create devpi user
- name: create devpi user
user: >
name={{ devpi_user }}
shell=/bin/false createhome=no
notify: devpi | restart devpi
notify: restart devpi
- name: devpi | create devpi application directories
- name: create devpi application directories
file: >
path={{ item }}
state=directory
......@@ -45,9 +45,9 @@
with_items:
- "{{ devpi_app_dir }}"
- "{{ devpi_venv_dir }}"
notify: devpi | restart devpi
notify: restart devpi
- name: devpi | create the devpi data directory, needs write access by the service user
- name: create the devpi data directory, needs write access by the service user
file: >
path={{ item }}
state=directory
......@@ -56,40 +56,40 @@
with_items:
- "{{ devpi_data_dir }}"
- "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi
notify: restart devpi
- name: devpi | install devpi pip pkgs
- name: install devpi pip pkgs
pip: >
name={{ item }}
state=present
virtualenv={{ devpi_venv_dir }}
sudo_user: "{{ devpi_user }}"
with_items: devpi_pip_pkgs
notify: devpi | restart devpi
notify: restart devpi
- name: devpi | writing supervisor script
- name: writing supervisor script
template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi
notify: restart devpi
- name: devpi | create a symlink for venv python, pip
- name: create a symlink for venv python, pip
file: >
src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link
notify: devpi | restart devpi
notify: restart devpi
with_items:
- python
- pip
- name: devpi | create a symlink for venv supervisor
- name: create a symlink for venv supervisor
file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link
- name: devpi | create a symlink for supervisor config
- name: create a symlink for supervisor config
file: >
src="{{ devpi_supervisor_app_dir }}/supervisord.conf"
dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi
......@@ -100,13 +100,12 @@
# the services if any of the configurations
# have changed.
#
- name: devpi | update devpi supervisor configuration
- name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
tags: deploy
- name: devpi | ensure devpi is started
- name: ensure devpi is started
supervisorctl_local: >
state=started
supervisorctl_path={{ devpi_supervisor_ctl }}
......
---
- name: discern | restart discern
- name: restart discern
supervisorctl_local: >
name=discern
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: discern_installed is defined
with_items:
- discern
- discern_celery
tags: deploy
---
- name: discern | create supervisor scripts - discern, discern_celery
- name: create supervisor scripts - discern, discern_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644
......@@ -8,70 +8,56 @@
with_items: ['discern', 'discern_celery']
#Upload config files for django (auth and env)
- name: discern | create discern application config env.json file
- name: create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- restart discern
- name: discern | create discern auth file auth.json
- name: create discern auth file auth.json
template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- restart discern
- name: discern | git checkout discern repo into discern_code_dir
- name: git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- restart discern
- name: discern | git checkout ease repo into discern_ease_code_dir
- name: git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- restart discern
#Numpy has to be a pre-requirement in order for scipy to build
- name : discern | install python pre-requirements for discern and ease
- name : install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
with_items:
- "{{ discern_pre_requirements_file }}"
- "{{ discern_ease_pre_requirements_file }}"
tags:
- deploy
- name : discern | install python requirements for discern and ease
- name : install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
with_items:
- "{{ discern_post_requirements_file }}"
- "{{ discern_ease_post_requirements_file }}"
tags:
- deploy
- name: discern | install ease python package
- name: install ease python package
shell: >
{{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install
notify:
- discern | restart discern
tags:
- deploy
- restart discern
- name: discern | download and install nltk
- name: download and install nltk
shell: |
set -e
curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }}
......@@ -82,36 +68,30 @@
chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- restart discern
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment
- name: discern | django syncdb migrate and collectstatic for discern
- name: django syncdb migrate and collectstatic for discern
shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
with_items:
- syncdb
- migrate
- collectstatic
tags:
- deploy
#Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern
- name: django update_index for discern
shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- restart discern
# call supervisorctl update. this reloads
......@@ -119,14 +99,13 @@
# the services if any of the configurations
# have changed.
#
- name: discern | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
tags: deploy
- name: discern | ensure discern, discern_celery has started
- name: ensure discern, discern_celery has started
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
......@@ -135,4 +114,14 @@
with_items:
- discern
- discern_celery
tags: deploy
- name: create a symlink for venv python
file: >
src="{{ discern_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.discern
state=link
with_items:
- python
- pip
- set_fact: discern_installed=true
---
- name: discern | create application user
- name: create application user
user: >
name="{{ discern_user }}"
home="{{ discern_app_dir }}"
createhome=no
shell=/bin/false
notify:
- discern | restart discern
- restart discern
- name: discern | create discern app dirs owned by discern
- name: create discern app dirs owned by discern
file: >
path="{{ item }}"
state=directory
owner="{{ discern_user }}"
group="{{ common_web_group }}"
notify:
- discern | restart discern
- restart discern
with_items:
- "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}"
- name: discern | create discern data dir, owned by {{ common_web_user }}
- name: create discern data dir, owned by {{ common_web_user }}
file: >
path="{{ discern_data_dir }}" state=directory
owner="{{ common_web_user }}" group="{{ discern_user }}"
mode=0775
notify:
- discern | restart discern
- restart discern
- name: discern | install debian packages that discern needs
- name: install debian packages that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
- restart discern
with_items: discern_debian_pkgs
- name: discern | install debian packages for ease that discern needs
- name: install debian packages for ease that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
- restart discern
with_items: discern_ease_debian_pkgs
- name: discern | copy sudoers file for discern
- name: copy sudoers file for discern
copy: >
src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root
notify:
- discern | restart discern
- restart discern
#Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis
- name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1
notify:
- discern | restart discern
- restart discern
- include: deploy.yml
- name: discern | create a symlink for venv python
file: >
src="{{ discern_venv_bin }}/python"
dest={{ COMMON_BIN_DIR }}/python.discern
state=link
- include: deploy.yml tags=deploy
......@@ -32,5 +32,6 @@ edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin"
edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
edx_ansible_var_file: "{{ edx_ansible_data_dir }}/server-vars.yml"
# edX configuration repo
configuration_version: master
......@@ -10,4 +10,4 @@
##
# Role includes for role edx_ansible
dependencies:
- supervisor
- common
---
- name: edx_ansible | git checkout edx_ansible repo into edx_ansible_code_dir
- name: git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
sudo_user: "{{ edx_ansible_user }}"
tags: deploy
- name : edx_ansible | install edx_ansible venv requirements
- name : install edx_ansible venv requirements
pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present
sudo_user: "{{ edx_ansible_user }}"
tags: deploy
- name: create update script
template: >
dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: create a symlink for update.sh
file: >
src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update
state=link
- name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
- name: clean up var file, removing all version vars
shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }}
with_items:
# deploy versions
- "^edx_platform_version:"
- "^edx_platform_commit:"
- "^xqueue_version:"
- "^forum_version:"
- "^xserver_version:"
- "^discern_ease_version:"
- "^ora_ease_version:"
- "^discern_version:"
- "^ora_version:"
- "^configuration_version:"
- "^ease_version:"
- "^certs_version:"
# other misc vars
- "^tags:"
- "^_original_file:"
- name: create a symlink for var file
file: >
src={{ edx_ansible_var_file }}
dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }}
state=link
......@@ -23,14 +23,14 @@
#
#
#
- name: edx_ansible | create application user
- name: create application user
user: >
name="{{ edx_ansible_user }}"
home="{{ edx_ansible_app_dir }}"
createhome=no
shell=/bin/false
- name: edx_ansible | create edx_ansible app and venv dir
- name: create edx_ansible app and venv dir
file: >
path="{{ item }}"
state=directory
......@@ -38,20 +38,10 @@
group="{{ common_web_group }}"
with_items:
- "{{ edx_ansible_app_dir }}"
- "{{ edx_ansible_data_dir }}"
- "{{ edx_ansible_venvs_dir }}"
- name: edx_ansible | install a bunch of system packages on which edx_ansible relies
- name: install a bunch of system packages on which edx_ansible relies
apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present
- include: deploy.yml
- name: edx_ansible | create update script
template: >
dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: edxapp | create a symlink for update.sh
file: >
src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update
state=link
- include: deploy.yml tags=deploy
......@@ -12,24 +12,13 @@ IFS=","
-v add verbosity to edx_ansible run
-h this
<repo> - must be one of [${!repos_to_cmd[*]}]
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, discern, edx-ora, configuration
<version> - can be a commit or tag
EO
IFS=$SAVE_IFS
}
declare -A repos_to_cmd
edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy"
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["forums"]="$edx_ansible_cmd forums.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
PROG=${0##*/}
while getopts "vh" opt; do
case $opt in
......@@ -45,6 +34,23 @@ while getopts "vh" opt; do
done
if [[ -f {{ edx_ansible_var_file }} ]]; then
extra_args="-e@{{ edx_ansible_var_file }}"
fi
declare -A repos_to_cmd
edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy $extra_args "
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
if [[ -z $1 || -z $2 ]]; then
echo
echo "ERROR: You must specify a repo and commit"
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role edx_service
#
#
# vars are namespace with the module name.
#
edx_service_role_name: edx_service
#
# OS packages
#
edx_service_debian_pkgs: []
edx_service_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role edx_service
#
# Overview:
#
#
- name: edx_service | notify me
debug: msg="stub handler"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role edx_service
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role edx_service
#
# Overview:
#
# This role performs the repetive tasks that most edX roles
# require in our default configuration.
#
# Example play:
#
# Rather than being included in the play, this role
# is included as a dependency by other roles in the meta/mail.yml
# file. The including role should add the following
# depency definition.
#
# dependencies:
# - { role: edx_service, edx_service_name: "hotg" }
#
- name: create application user
user: >
name="{{ edx_service_name }}"
home="{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
createhome=no
shell=/bin/false
- name: create edx_service app and venv dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_service_name }}"
group="{{ common_web_group }}"
with_items:
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_debian_pkgs"
when: ansible_distribution in common_debian_variants
- name: install a bunch of system packages on which edx_service relies
yum: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants
\ No newline at end of file
......@@ -40,7 +40,7 @@ EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend'
EDXAPP_LOG_LEVEL: 'INFO'
EDXAPP_MEMCACHE: [ 'localhost:11211' ]
EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:4567'
EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080'
EDXAPP_COMMENTS_SERVICE_KEY: 'password'
EDXAPP_EDXAPP_SECRET_KEY: ''
......@@ -66,6 +66,7 @@ EDXAPP_FEATURES:
SUBDOMAIN_COURSE_LISTINGS: false
PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE
ENABLE_S3_GRADE_DOWNLOADS: true
USE_CUSTOM_THEME: $edxapp_use_custom_theme
EDXAPP_BOOK_URL: ''
# This needs to be set to localhost
......@@ -83,8 +84,11 @@ EDXAPP_RABBIT_HOSTNAME: 'localhost'
EDXAPP_XML_MAPPINGS: {}
EDXAPP_LMS_NGINX_PORT: 18000
EDXAPP_LMS_SSL_NGINX_PORT: 48000
EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020
EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_CMS_SSL_NGINX_PORT: 48010
EDXAPP_LANG: 'en_US.UTF-8'
EDXAPP_TIME_ZONE: 'America/New_York'
......@@ -113,6 +117,20 @@ EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades'
# Configure rake tasks in edx-platform to skip Python/Ruby/Node installation
EDXAPP_NO_PREREQ_INSTALL: 1
# whether to setup the python codejail or not
EDXAPP_PYTHON_SANDBOX: false
# this next setting, if true, turns on actual sandbox enforcement. If not true,
# it puts the sandbox in 'complain' mode, for reporting but not enforcement
EDXAPP_SANDBOX_ENFORCE: true
# Supply authorized keys used for remote management via the automated
# role, see meta/main.yml. Ensure you know what this does before
# enabling. The boolean flag determines whether the role is included.
# This is done to make it possible to disable remote access easily by
# setting the flag to true and providing an empty array.
EDXAPP_INCLUDE_AUTOMATOR_ROLE: false
EDXAPP_AUTOMATOR_AUTHORIZED_KEYS: []
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -160,6 +178,13 @@ edxapp_workers:
service_variant: lms
concurrency: 2
# setup for python codejail
edxapp_sandbox_venv_dir: '{{ edxapp_venvs_dir }}/edxapp-sandbox'
edxapp_sandbox_user: 'sandbox' # I think something about the codejail requires hardcoding this to sandbox:sandbox
# apparmor command
edxapp_aa_command: "{% if EDXAPP_SANDBOX_ENFORCE %}aa-enforce{% else %}aa-complain{% endif %}"
# Requirement files we explicitely
# check for changes before attempting
# to update the venv
......@@ -358,6 +383,14 @@ lms_auth_config:
lms_env_config:
<<: *edxapp_generic_env
'CODE_JAIL':
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
'limits':
'VMEM': 0
'REALTIME': 5
'user': '{{ edxapp_sandbox_user }}'
cms_auth_config:
<<: *edxapp_generic_auth
cms_env_config:
......@@ -404,9 +437,12 @@ worker_core_mult:
cms: 2
# Theming
# To turn off theming, specify edxapp_theme_name: ""
# Turn theming on and off with edxapp_use_custom_theme
# Set theme name with edxapp_theme_name
# Stanford, for example, uses edxapp_theme_name: 'stanford'
#
# TODO: change variables to ALL-CAPS, since they are meant to be externally overridden
edxapp_use_custom_theme: false
edxapp_theme_name: ""
edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git'
edxapp_theme_version: 'HEAD'
......@@ -427,9 +463,6 @@ sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base
sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt"
sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt"
#do we want to install the sandbox requirements into the regular virtual env
install_sandbox_reqs_into_regular_venv: true
edxapp_debian_pkgs:
- npm
# for compiling the virtualenv
......@@ -471,3 +504,9 @@ edxapp_cms_variant: cms
# Worker Settings
worker_django_settings_module: 'aws'
# This array is used by the automator role to provide
# access to a limited set of commands via rbash. The
# commands listed here will be symlinked to ~/bin/ for
# the automator user.
edxapp_automated_rbash_links:
- /usr/bin/sudo
\ No newline at end of file
---
- name: edxapp | restart edxapp
- name: restart edxapp
supervisorctl_local: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
when: celery_worker is not defined and not devstack
when: edxapp_installed is defined and celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled
tags: deploy
- name: edxapp | restart edxapp_workers
- name: restart edxapp_workers
supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: celery_worker is defined and not devstack
when: edxapp_installed is defined and celery_worker is defined and not devstack
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
tags: deploy
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment