Commit 1533c84e by John Jarvis

Bringing over fab files from the old sysadmin repo

Attempted to strip out files and tasks we no longer
need.  There is more work to be done
parent 94250149
# Additional Tasks
import cache
import clean
import ec2
import audit
import git
import hosts
import locks
import os
import ssh
import status
import migrate_check
import yaml
from dogapi import dog_stats_api, dog_http_api
from timestamps import TSWrapper
# Global tasks
import logging
from fabric.api import env, task, runs_once
from output import squelch
from datetime import datetime
import sys
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
env.linewise = True
env.noop = False
env.use_ssh_config = True
FORMAT = '[ %(asctime)s ] : %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
# add timestamps to output
sys.stdout = TSWrapper(sys.stdout)
sys.stderr = TSWrapper(sys.stderr)
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
dog_stats_api.start(api_key=package_data['datadog_api'], statsd=True)
dog_http_api.api_key = package_data['datadog_api']
@task
def noop():
"""
Disable modification of servers
"""
env.noop = True
dog_stats_api.stop()
@task
def quiet():
"""
Disables verbose output
"""
squelch()
@runs_once
@task()
def log(fname=None):
"""
Writes a logfile to disk of the run
"""
if not fname:
d = datetime.now()
fname = d.strftime('/var/tmp/fab-%Y%m%d-%H%M%S-{0}.log'.format(
os.getpid()))
env.logfile = fname
sys.stdout.log_to_file(fname)
sys.stderr.log_to_file(fname)
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_name as name
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
import logging
from fabric.api import serial, task, parallel, env, execute, runs_once, settings,sudo
from fabfile.safety import noopable
from multiprocessing import Manager
from timestamps import no_ts
from packages import PackageInfo
import tempfile
from output import notify
@task
@parallel
def collect_installed_packages(results):
"""
Collect all installed packages for the selected hosts and store them in env
"""
print env.host
pkg_info = PackageInfo()
results[env.host] = pkg_info.installed_packages()
@task
@serial
def display_installed_packages(installed_packages):
"""
Print all installed packages collected by collect_installed_packages
"""
# FIXME: env.hosts loses the port information here, not sure why
with no_ts():
for pkg in installed_packages['{0}:22'.format(env.host)]:
notify("{pkg.name} = {pkg.revision}".format(pkg=pkg))
@task(default=True)
@runs_once
def installed_packages(from_links=False):
"""
List all of the installed packages on the selected packages
"""
installed_packages = Manager().dict()
execute(collect_installed_packages, installed_packages)
execute(display_installed_packages, installed_packages)
@task
def audit_user(user, audit_output=None):
"""
Logs on provided hosts and runs id for the supplied user with sudo. Output
is logged to the provided file argument or a default using the
python gettempdir() function and the following file name format:
/tmp/audit-user-{user}.csv
The contents of this file are
host,user,command output
Note that if the file already exists, output will be appended to the
existing file.
"""
logging.info("Auditing {host}.".format(host=env.host_string))
if not audit_output:
audit_output = tempfile.gettempdir() + "/audit-user-{user}.csv".format(
user=user)
with settings(warn_only=True):
with open(audit_output, 'a') as audit:
output = noopable(sudo)("id {user}".format(user=user))
audit.write("{host},{user},{output}\n".format(
host=env.host_string,
user=user,
output=output
)
)
@task
def remove_user(user):
"""
Logs on to provided hosts and runs userdel for the supplied user with sudo.
The user's home directory is preserved.
"""
logging.info("Removing {user} user from {host}.".format(
user=user,host=env.host_string))
with settings(warn_only=True):
output = noopable(sudo)("userdel {user}".format(user=user))
logging.info("Output of userdel command on host {host} was {out}".format(
host=env.host_string,out=output
)
)
from fabric.api import task, runs_once, env, serial, puts, settings
from fabric.utils import fastprint
from fabric.colors import blue, red, white
from output import notify
from packages import PackageDescriptor
from output import unsquelched
from hosts import exemplar
from ssh_tunnel import setup_tunnel
from packages import PackageInfo
@task
@runs_once
def from_exemplar(**tags):
"""
Cache the set of packages installed on one host from the specified tags.
"""
host_string = setup_tunnel([exemplar(**tags)])[0]
with settings(host_string=host_string):
installed_packages()
@task
@runs_once
def limit_prefix(*prefix_list):
"""
Limits cached packages to those that
match one or more prefix strings
"""
env.package_descriptors = filter(
lambda pkg: any(pkg.name.startswith(prefix)
for prefix in prefix_list), env.package_descriptors)
@task(default=True)
@runs_once
def installed_packages(prefix=None):
"""
Cache the set of packages installed on the selected host.
"""
pkg_info = PackageInfo()
env.package_descriptors = [
package for package in pkg_info.installed_packages()
if prefix is None or package.name.startswith(prefix)
]
@task
@runs_once
def from_strings(**pkg_revs):
"""
Cache packages based on strings, that can be either checked with confirm
or deployed with deploy.
Each named argument specifies a package by name, and the revision of
the package to deploy
"""
packages = []
for pkg_name, pkg_rev in pkg_revs.items():
packages.append(PackageDescriptor(pkg_name, pkg_rev))
env.package_descriptors = packages
notify(env.package_descriptors)
@task
@runs_once
def from_stdin(prefix=None):
"""
Cache a list of packages from stdin.
Package names must start with prefix, if specified (any that don't
will be skipped). Package names and revisions should be separated
by = signs, and should be one per line.
"""
if prefix:
prefix_msg = white('pkg_name', bold=True) + white(
' must start with ') + blue(prefix)
else:
prefix_msg = ''
fastprint('\n')
fastprint('\n'.join([
white('Please enter pkg_name=pkg_rev, one per line\n', bold=True),
white('pkg_rev', bold=True) + white(' is a git revision hash'),
prefix_msg,
white('Complete your selections by entering a blank line.'),
]))
fastprint('\n\n')
packages = {}
while True:
line = raw_input("> ")
if not line:
break
if '=' not in line:
fastprint(red("Expected = in '{line}'. Skipping...".format(
line=line)) + white('\n'))
continue
pkg_name, _, pkg_rev = line.partition('=')
pkg_name = pkg_name.strip()
pkg_rev = pkg_rev.strip()
if prefix and not pkg_name.startswith(prefix):
fastprint(red("'{0}' does not start with '{1}'".format(
pkg_name, prefix)) + white('\n'))
continue
packages[pkg_name] = pkg_rev
from_strings(**packages)
@task
@serial
@runs_once
def prompt(*pkg_names):
packages = {}
with unsquelched():
puts("Please supply git revisions to "
"deploy for the following packages:")
for pkg in pkg_names:
packages[pkg] = raw_input("{pkg} = ".format(pkg=pkg)).strip()
from_strings(**packages)
from output import notify
from fabric.api import abort
from fabric.colors import blue, cyan, green, red, white
from fabric.utils import fastprint
def choose(msg, options):
choices = range(len(options))
fastprint(white(msg, bold=True) + white("\n"))
for i, target in enumerate(options):
fastprint("{0}. {1}\n".format(i, target))
fastprint("x. Cancel\n")
user_input = raw_input("> ")
if user_input == 'x':
abort("Cancelled")
try:
choice = int(user_input)
except:
fastprint(red("Choice must be an integer"))
return None
if choice not in choices:
fastprint(red("Choice must be one of {0}".format(choices)))
return None
return options[choice]
def multi_choose_with_input(msg, options):
"""
Options:
msg - header message for the chooser
options - dictionary of options to select
User selects one of the keys in the dictionary,
a new value is read from stdin
"""
selections = options.keys()
user_input = None
while True:
fastprint('\n{0}{1}'.format(white(msg, bold=True), white("\n")))
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, item in enumerate(selections):
fastprint(" {0}. {1} : {2}".format(white(i, bold=True),
cyan(item), cyan(options[item], bold=True)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Continue") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'x':
return None
elif int(user_input) in range(len(selections)):
name = selections[int(user_input)]
fastprint(green('Enter new msg for ') +
cyan(name))
options[name] = raw_input(white(": "))
except:
notify("Invalid selection ->" + user_input + "<-")
return options
def multi_choose(msg, options):
fastprint(white(msg, bold=True) + white("\n"))
selected = [" " for option in options]
user_input = None
while True:
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, target in enumerate(options):
fastprint(green(selected[i]))
fastprint(cyan(" {0}. {1}".format(i, target)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Deploy selections") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'a':
selected = ['*' for i in range(len(selected))]
elif user_input == 'x':
return None
elif int(user_input) in range(len(options)):
if selected[int(user_input)] == " ":
selected[int(user_input)] = "*"
else:
selected[int(user_input)] = " "
except:
notify("Invalid selection ->" + user_input + "<-")
pkgs = [options[s] for s in range(len(selected)) if selected[s] == '*']
return pkgs
from fabric.api import sudo, task, parallel
from safety import noopable
from modifiers import rolling
@task
@parallel
def apt_get_clean():
""" Runs apt-get clean on a remote server """
noopable(sudo)('apt-get clean')
@task
@rolling
def mako_template_cache():
noopable(sudo)('service gunicorn stop')
noopable(sudo)('rm -rf /tmp/tmp*mako')
noopable(sudo)('service gunicorn start')
import boto
from fabric.api import run, task, parallel, env
env.instance_ids = {}
def instance_id():
if env.host_string not in env.instance_ids:
env.instance_ids[env.host_string] = run('wget -q -O - http://169.254.169.254/latest/meta-data/instance-id')
return env.instance_ids[env.host_string]
This diff is collapsed. Click to expand it.
#!/bin/sh
exec ssh -i "/etc/git-identity" -o "StrictHostKeyChecking no" "$@"
import boto
from fabric.decorators import serial
from ssh_tunnel import setup_tunnel
import socket
from fabric.api import env, task, abort
from fabric.colors import red
import logging
def hosts_by_tag(tag, value):
"""
Return a list of all hosts that have the specified value for the specified
tag
"""
return hosts_by_tags(**{tag: value})
def hosts_by_tags(**tags):
"""
Return a list of all hosts that have the specified value for the specified
tags.
Tag values are allowed to include wildcards
If no variant tag is specified, this command will ignore all hosts
that have a variant specified.
"""
if 'env' in tags:
tags['environment'] = tags['env']
del(tags['env'])
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()
if value != '*'}):
for inst in res.instances:
if inst.state == "running":
if (inst.public_dns_name):
hosts.append(inst.public_dns_name)
else:
hosts.append(inst.private_dns_name)
print hosts
return hosts
def _fleet():
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances():
for inst in res.instances:
if inst.state == "running":
try:
instance_name = inst.tags['Name']
except:
logging.warning("Instance with id {id} and {dns} has no assigned Name.".format(id=inst.id,dns=inst.public_dns_name))
host_to_add = instance_name + "." + DOMAIN
# fallback to the public hostname if the m.edx.org
# name doesn't exist
try:
socket.gethostbyname(host_to_add.replace(':22',''))
except socket.error:
if inst.public_dns_name:
host_to_add = inst.public_dns_name
if host_to_add:
hosts.append(host_to_add)
return hosts
def exemplar(**tags):
"""
Return the hostname of one host from the specified set
of tags, or None if there is no such host
"""
hosts = hosts_by_tags(**tags)
if hosts:
return hosts[0]
else:
return None
@task(alias='exemplar')
def exemplar_from_tags(**tags):
env.hosts.append(exemplar(**tags))
@task(aliases=['tag', 'tags'])
def by_tags(**tags):
"""
Add all running hosts that match the tag names provided
as keyword arguments.
"""
env.hosts.extend(hosts_by_tags(**tags))
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
@task(aliases=['fleet'])
def fleet():
"""
Return a list of all hosts available and running via the default AWS
credentials.
Your ability to operate on these hosts will depend upon the ssh credentials
that you are using to drive fab. There is likely to be a mismatch between
what hosts you can see via IAM managed AWS credentials and which hosts
you can actually connect to even if you are using highly privileged
AWS pems.
"""
hosts = _fleet()
env.hosts.extend(hosts)
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
import os
import socket
import time
from output import notify
from safety import noopable
from fabric.api import task, run, env, settings, sudo, abort
from fabric.api import runs_once, execute, serial, hide
MAX_SLEEP_TIME = 10
LOCK_FILE = '/opt/deploy/.lock'
@task
@runs_once
def wait_for_all_locks():
execute('locks.wait_for_lock', hosts=sorted(env.hosts))
@task
@runs_once
def remove_all_locks():
execute('locks.remove_lock', hosts=sorted(env.hosts, reverse=True))
@task
@serial
def remove_lock():
noopable(sudo)("test ! -f {0} || rm {0}".format(LOCK_FILE))
@task
@serial
def wait_for_lock():
if hasattr(env, 'deploy_user'):
lock_user = env.deploy_user
else:
lock_user = env.user
LOCK_ID = 'u:{user} h:{host} pid:{pid}'.format(user=lock_user,
host=socket.gethostname(),
pid=str(os.getpid()))
sleep_time = 0.1
timeout = 120
start_time = time.time()
with settings(warn_only=True):
while True:
wait_time = time.time() - start_time
# break if the lockfile is removed or if it belongs to this pid
# if it exists lock_status will have the file's contents
with hide('running', 'stdout', 'stderr', 'warnings'):
lock_status = run("test ! -f {lfile} || "
"(cat {lfile} && "
'grep -q "{lid}" {lfile})'.format(
lfile=LOCK_FILE,
lid=LOCK_ID))
if lock_status.succeeded:
noopable(sudo)('echo "{0}" > {1}'.format(
LOCK_ID, LOCK_FILE))
notify("Took lock")
break
elif wait_time >= timeout:
abort("Timeout expired, giving up")
lock_create_time = run("stat -c %Y {0}".format(LOCK_FILE))
delta = time.time() - float(lock_create_time)
(dhour, dsec) = divmod(delta, 3600)
notify("""
!! Deploy lockfile already exists ({lockfile}) !!
Waiting: {wait}s
Lockfile info: [ {owner} ]
Lock created: {dhour}h{dmin}m ago
""".format(
lockfile=LOCK_FILE,
wait=int(timeout - wait_time),
owner=lock_status,
dhour=int(dhour),
dmin=int(dsec / 60),
))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
from fabric.api import task, parallel, put, sudo
from safety import noopable
from .modifiers import rolling
from StringIO import StringIO
import json
__all__ = ['on', 'off','maintain_service','unmaintain_service']
services = ['lms','cms','lms-xml','lms-preview']
def set_maintenance(value):
noopable(put)(StringIO(json.dumps({'maintenance': value})), '/etc/facter/facts.d/mitx_maintenance.json', use_sudo=True)
@task
@parallel
def on():
"""
Enable maintenance mode
"""
set_maintenance(True)
puppet.checkin('maintenance')
@task
@parallel
def off():
"""
Disable maintenance mode
"""
set_maintenance(False)
puppet.checkin('maintenance')
@task
@rolling
def maintain_service(service):
"""
Puts a specified edxapp service into maintenance mode by replacing
its nginx sites-enabled link with a link to the maintenance vhost.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}-maintenance"
" /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("service nginx reload")
@task
@rolling
def unmaintain_service(service):
"""
Removes a specified edxapp service from maintenance mode by replacing
the appropriate link in /etc/nginx/sites-enabled.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}"
" /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("service nginx reload")
import boto
from .ec2 import instance_id
def instance_tags_for_current_host():
"""
Returns the datadog style tags for the active host
"""
return instance_tags([instance_id()])
def instance_tags(instance_ids):
"""
Returns datadog style tags for the specified instances
"""
ec2 = boto.connect_ec2()
tags = set()
for res in ec2.get_all_instances(instance_ids):
for instance in res.instances:
ec2_tags = instance.tags
tags.add('instance_id:' + instance.id)
if 'group' in ec2_tags:
tags.add('fab-group:' + ec2_tags['group'])
if 'environment' in ec2_tags:
tags.add('fab-environment:' + ec2_tags['environment'])
if 'variant' in ec2_tags:
tags.add('fab-variant:' + ec2_tags['variant'])
return list(tags)
from fabric.api import task, sudo, runs_once, prefix, hide, abort
from fabric.contrib import console
from fabric.colors import white, green
from .safety import noopable
@task()
@runs_once
def migrate_check(auto_migrate=False):
"""
Checks to see whether migrations need to be run,
if they do it will prompt to run them before
continuing.
looks for " - Migrating" in the output of
the dry run
"""
migration_cmd = "/opt/edx/bin/django-admin.py migrate --noinput " \
"--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform"
with prefix("export SERVICE_VARIANT=lms"):
with hide('running', 'stdout', 'stderr', 'warnings'):
dryrun_out = sudo(migration_cmd + " --db-dry-run", user="www-data")
migrate = False
for chunk in dryrun_out.split('Running migrations for '):
if 'Migrating' in chunk:
print "!!! Found Migration !!!\n" + chunk
migrate = True
if migrate:
if auto_migrate or console.confirm(
green(migration_cmd) + white('\n') +
white('Run migrations? ', bold=True), default=True):
noopable(sudo)(migration_cmd, user='www-data')
import boto
import time
from collections import namedtuple
from fabric.api import task, execute, serial
from functools import wraps, partial
from safety import noopable
from output import notify
from dogapi import dog_stats_api
from .metrics import instance_tags
from .ec2 import instance_id
MAX_SLEEP_TIME = 1
LockedElb = namedtuple('LockedElb', 'name elb lock')
def await_elb_instance_state(lb, instance_id, awaited_state):
sleep_time = 0.1
start_time = time.time()
while True:
state = lb.get_instance_health([instance_id])[0].state
if state == awaited_state:
notify("Load Balancer {lb} is in awaited state {awaited_state}, proceeding.".format(
lb=lb.dns_name,
awaited_state=awaited_state
))
break
else:
notify("Checking again in {0} seconds. Elapsed time: {1}".format(sleep_time, time.time() - start_time))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
def rolling(func):
@task
@serial
@wraps(func)
def wrapper(*args, **kwargs):
elb = boto.connect_elb()
elbs = elb.get_all_load_balancers()
execute('locks.wait_for_all_locks')
inst_id = instance_id()
tags = ['task:' + func.__name__] + instance_tags(inst_id)
active_lbs = sorted(
lb
for lb in elbs
if inst_id in [info.id for info in lb.instances]
)
timer = partial(dog_stats_api.timer, tags=tags)
# Remove this node from the LB
for lb in active_lbs:
notify("Removing {id} from {lb}".format(id=inst_id, lb=lb))
with timer('rolling.deregister_instance'):
noopable(lb.deregister_instances)([inst_id])
noopable(await_elb_instance_state)(lb, inst_id, "OutOfService")
# Execute the operation
func(*args, **kwargs)
# Add this node back to the LBs
for lb in active_lbs:
notify("Adding {id} to {lb}".format(id=inst_id, lb=lb))
with timer('rolling.register_instance'):
noopable(lb.register_instances)([inst_id])
with timer('rolling.wait_for_start'):
# Wait for the node to come online in the LBs
for lb in active_lbs:
noopable(await_elb_instance_state)(lb, inst_id, "InService")
return wrapper
import sys
from contextlib import contextmanager
from fabric.api import puts
class SquelchingStream(object):
def __init__(self, stream):
self.__dict__['stream'] = stream
self.__dict__['squelched'] = False
self.__dict__['needs_line_ending'] = False
def write(self, string):
if self.squelched:
self.stream.write('.')
self.stream.flush()
self.needs_line_ending = True
else:
if self.needs_line_ending:
self.needs_line_ending = False
self.stream.write('\n')
self.stream.write(string)
def __getattr__(self, attr):
return getattr(self.stream, attr)
def __setattr__(self, attr, val):
if attr in self.__dict__:
return object.__setattr__(self, attr, val)
return setattr(self.stream, attr, val)
sys.stdout = SquelchingStream(sys.stdout)
sys.stderr = SquelchingStream(sys.stderr)
def squelch():
sys.stdout.squelched = sys.stderr.squelched = True
def unsquelch():
sys.stdout.squelched = sys.stderr.squelched = False
@contextmanager
def unsquelched(stream=sys.stdout):
old_state = stream.squelched
stream.squelched = False
yield
stream.squelched = old_state
def notify(msg, show_prefix=None, end='\n', flush=False):
with unsquelched():
puts(msg, show_prefix, end, flush)
import os
from fabric.api import run, settings, hide, sudo
from collections import defaultdict
import yaml
import re
MIN_REVISION_LENGTH = 7
class PackageInfo:
def __init__(self):
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
# exhaustive list of MITx repos
self.repo_dirs = package_data['repo_dirs']
self.cmd_list = {
'pre': package_data['pre_checkout_regex'],
'post': package_data['post_checkout_regex']}
self.service_repos = package_data['service_repos']
def repo_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[1]
def org_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[0]
def pre_post_actions(self, pkgs):
"""
Returns a dictionary containing a list of
commands that need to be executed
pre and post checkout for one or more package names.
return({
'pre': [ 'cmd1', 'cmd2', ... ],
'post': [ 'cmd1', 'cmd2', ... ]
})
"""
cmds = defaultdict(list)
for stage in ['pre', 'post']:
for regex, cmd_templates in self.cmd_list[stage]:
for pkg in pkgs:
match = re.match(regex, pkg)
if match is None:
continue
cmds[stage].extend(
cmd.format(*match.groups(), **match.groupdict())
for cmd in cmd_templates
if cmd not in cmds[stage]
)
return(cmds)
def installed_packages(self):
"""
Returns the list of PackageDescriptors for the packages
installed on the system.
This is determined by looking at every package directory
we know about and checking its revision.
"""
with settings(hide('running'), warn_only=True):
revisions = sudo(
"""
for path in {0}; do
if [[ -d "$path/.git" ]]; then
echo $path $(cd $path && git rev-parse HEAD 2>/dev/null)
fi
done
""".format(' '.join(self.repo_dirs))).split('\n')
packages = [revline.strip().split(' ') for revline in revisions
if ' ' in revline.strip()]
return [PackageDescriptor(os.path.basename(path), revision)
for path, revision in packages]
class PackageDescriptor(object):
def __init__(self, name, revision):
if revision != 'absent' and len(revision) < MIN_REVISION_LENGTH:
raise Exception("Must use at least {0} characters "
"in revision to pseudo-guarantee uniqueness".format(
MIN_REVISION_LENGTH))
self.name = name
# Find the repo_root by name
# This assumes that basename(repo_root) is unique
# for all repo_roots. If this is not true an exception
# will be raised
pkg_info = PackageInfo()
repo_roots = []
for repo_dir in pkg_info.repo_dirs.keys():
if os.path.basename(repo_dir) == name:
repo_roots.append(repo_dir)
if len(repo_roots) != 1:
raise Exception("Unable to look up directory for repo")
self.repo_root = repo_roots[0]
self.repo_name = pkg_info.repo_dirs[self.repo_root].split('/')[1]
self.repo_org = pkg_info.repo_dirs[self.repo_root].split('/')[0]
self.revision = revision
from fabric.api import env
from output import notify
def noopable(fun):
if env.noop:
def noop(*args, **kwargs):
notify("Would have called: {fun}({args}, {kwargs})".format(
fun=fun.__name__,
args=", ".join(repr(a) for a in args),
kwargs=", ".join("=".join([key, repr(val)]) for key, val in kwargs.items()),
))
return noop
else:
return fun
from fabric.api import task, env, abort
from fabric.colors import red
import os
import re
@task(default=True)
def ssh(user=None):
if user is None:
user = env.user
if len(env.hosts) != 1:
abort(red('Please specify one host for ssh'))
for host in env.hosts:
host = re.sub(':(\d+)', r' -p\1 ', host)
os.system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -l {0} {1}'.format(user, host))
from fabric.api import abort, env, fastprint
from fabric.colors import green, red, white
import subprocess
import shlex
import atexit
import time
import boto
import re
import socket
DOMAIN = 'm.edx.org:22'
class SSHTunnel:
port = 9000 # default starting port
tunnels = {}
def __init__(self, host, phost, user, lport=None):
if lport is not None:
SSHTunnel.port = lport
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect(('localhost', SSHTunnel.port))
s.shutdown(2)
# connection was successful so try a new port
SSHTunnel.port += 1
except:
self.lport = SSHTunnel.port
break
phost = re.sub(':(\d+)', r' -p\1 ', phost)
identities = ''
if env.key_filename:
# could be a list or a string
if isinstance(env.key_filename, basestring):
lst = [env.key_filename]
else:
lst = env.key_filename
identities = ' '.join('-i {f} '.format(f=f) for f in lst)
cmd = 'ssh -o UserKnownHostsFile=/dev/null ' \
'{ids}' \
'-o StrictHostKeyChecking=no -vAN -L {lport}:{host} ' \
'{user}@{phost}'.format(ids=identities, lport=self.lport,
host=host, user=user, phost=phost)
self.p = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
atexit.register(self.p.kill)
while not 'Entering interactive session' in self.p.stderr.readline():
if time.time() > start_time + 10:
abort(red("Unable to create ssh tunnel - `{0}`".format(cmd)))
def local(self):
return 'localhost:{lport}'.format(lport=self.lport)
def setup_tunnel(all_hosts, check_tag=True,
proxy_name=None, user=None, lport=None):
"""
Given a all_hosts it will check to see whether
any are proxy hosts if check_tag is True
returns a modified list
of hosts with localhost:port for tunneled hosts.
"""
if user is None:
user = env.user
ec2 = boto.connect_ec2()
# the proxy hosts
proxies = {}
if check_tag:
for res in ec2.get_all_instances(filters={'tag-key': 'proxy'}):
for inst in res.instances:
host = ".".join([inst.tags['Name'], DOMAIN])
proxy = ".".join([inst.tags['proxy'], DOMAIN])
proxies.update({host: proxy})
else:
if not proxy_name:
raise Exception("Must specify a proxy_host")
proxies = {host: proxy_name for host in all_hosts}
# local tunneling ip:port
tunnels = {}
for host in all_hosts:
if host in proxies and host not in SSHTunnel.tunnels:
t = SSHTunnel(host=host, phost=proxies[host],
user=user, lport=lport)
tunnels[host] = t.local()
fastprint(green('created {0} for {1} via {2}'.format(tunnels[host],
host, proxies[host])) + white('\n'))
SSHTunnel.tunnels.update(tunnels)
return([SSHTunnel.tunnels[host] if host in SSHTunnel.tunnels else host
for host in all_hosts])
from fabric.api import task, sudo, abort, parallel, runs_once, execute
from fabric.api import settings, hide
from fabric.operations import put
from fabric.utils import fastprint
from safety import noopable
from fabric.colors import blue, red
from fabric.contrib import console
from output import unsquelched
from timestamps import no_ts
from choose import multi_choose_with_input
import json
import tempfile
status_file = '/opt/wwc/status_message.json'
@task(default=True)
@runs_once
def status():
"""
Drops {0} which is a json formatted file that contains a
status message that will be displayed to all users on the
on the courseware for a single course or for all courses
if 'global' is set.
Message(s) are entered or removed interactively on the console.
Example usage:
$ fab groups:prod_edx status
""".format(status_file)
with hide('running', 'stdout', 'stderr', 'warnings'):
env_json = sudo("cat /opt/wwc/lms-xml.env.json")
course_listings = json.loads(env_json)['COURSE_LISTINGS']
course_ids = [course_id for course_list in course_listings.itervalues()
for course_id in course_list]
course_ids = ['global'] + course_ids
with no_ts():
course_status = None
with settings(warn_only=True):
cur_status = noopable(sudo)('cat {0}'.format(status_file))
try:
course_status = json.loads(cur_status)
# add empty entries for courses not in the list
empty_entries = set(course_ids) - set(course_status.keys())
course_status.update({entry: '' for entry in list(empty_entries)})
except ValueError:
fastprint(red("Not a valid json file, overwritting\n"))
if course_status is None:
course_status = {course: '' for course in course_ids}
new_status = multi_choose_with_input(
'Set the status message, blank to disable:',
course_status)
if new_status is not None:
# remove empty entries
new_status = {entry: new_status[entry]
for entry in new_status if len(new_status[entry]) > 1}
with unsquelched():
if not console.confirm(
'Setting new status message:\n{0}'.format(
blue(str(new_status), bold=True)),
default=False):
abort('Operation cancelled by user')
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(json.dumps(new_status))
f.flush()
execute(update_status, f.name)
else:
abort('Operation cancelled by user')
@task
@runs_once
def remove():
"""
Removes {0}, a status banner that is displayed to all
users on the front page.
""".format(status_file)
with unsquelched():
if not console.confirm(
blue('Remove /opt/wwc/status_message.html?', bold=True)):
abort('Operation cancelled by user')
execute(remove_status)
@task
@parallel
def remove_status():
noopable(sudo)('rm -f {0}'.format(status_file))
@task
@parallel
def update_status(fjson):
print status_file
noopable(put)(fjson, status_file, use_sudo=True)
from datetime import datetime
from contextlib import contextmanager
import sys
@contextmanager
def no_ts():
sys.stdout.ts = False
yield
sys.stdout.ts = True
class TSWrapper(object):
def __init__(self, stream):
self.o = stream
self.files = []
self.files.append(self.o)
self.newline = True
self.ts = True
def write(self, s):
d = datetime.now()
if self.ts:
buf = ""
lines = s.splitlines(True)
for line in lines:
if self.newline:
buf += d.strftime('[ %Y%m%d %H:%M:%S ] : {0}'.format(line))
else:
buf += str(line)
if line[-1] == '\n':
self.newline = True
else:
self.newline = False
else:
buf = s
for fh in self.files:
fh.write(buf)
fh.flush()
def log_to_file(self, fn):
fp = open(fn, 'a')
self.files.append(fp)
def __getattr__(self, attr):
return getattr(self.o, attr)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment