Commit ac462780 by Gabe Mulley

run tasks using ansible

parent 6b0ca795
"""Remote Task
Usage:
remote-task [--job-flow-id=ID] [--branch=BRANCH] [--repo=REPO] [--name=NAME] [--verbose] [<launch-task-args>]
Options:
--job-flow-id=ID EMR job flow to run the task
--branch=BRANCH git branch to checkout before running the task [default: release]
--repo=REPO git repository to clone
--name=NAME an identifier for this task
--verbose display very verbose output
<launch-task-args> a single-quoted string of arguments to pass-through to launch-task on the remote machine
"""
import os
from subprocess import Popen
import sys
import uuid
from docopt import docopt
def main():
arguments = docopt(__doc__)
change_directory_to_ansible_script_home()
extra_vars = convert_cli_arguments_to_ansible_extra_vars(arguments)
run_ansible_playbook(arguments.get('--verbose', False), extra_vars)
def change_directory_to_ansible_script_home():
os.chdir(os.path.join(sys.prefix, 'share', 'edx.analytics.tasks'))
def convert_cli_arguments_to_ansible_extra_vars(arguments):
uid = arguments.get('--name') or str(uuid.uuid4())
extra_vars = {
'name': arguments['--job-flow-id'],
'branch': arguments.get('--branch', 'release'),
'task_arguments': arguments.get('<launch-task-args>', '') + ' >/tmp/{0}.out 2>/tmp/{0}.err'.format(uid),
'uuid': uid,
}
if arguments['--repo']:
extra_vars['repo'] = arguments['--repo']
return ' '.join(["{}='{}'".format(k, extra_vars[k]) for k in extra_vars])
def run_ansible_playbook(verbose, extra_vars):
ansible_playbook_path = os.path.join(sys.prefix, 'bin', 'ansible-playbook')
command = [
ansible_playbook_path, '-i', 'ec2.py', 'task.yml', '-e', extra_vars
]
if verbose:
command.append('-vvvv')
env = dict(os.environ)
env.update({
'ANSIBLE_SSH_ARGS': '-o ForwardAgent=yes'
})
with open('/dev/null', 'rw') as devnull:
proc = Popen(command, stdin=devnull, env=env)
proc.communicate()
......@@ -3,5 +3,7 @@ boto==2.22.1
pbr==0.5.23
stevedore==0.13
tornado==3.1.1
ansible==1.4.4
docopt==0.6.1
-e git+https://github.com/spotify/luigi.git@a33756c781b9bf7e51384f0eb19d6a25050ef136#egg=luigi
......@@ -15,10 +15,13 @@ packages =
namespace_packages =
edx
edx.analytics
data_files =
share/edx.analytics.tasks = share/*
[entry_points]
console_scripts =
launch-task = edx.analytics.tasks.main:main
remote-task = edx.analytics.tasks.remote:main
edx.analytics.tasks =
s3-copy = edx.analytics.tasks.s3:S3Copy
s3-sync = edx.analytics.tasks.s3:S3Sync
......
This directory contains ansible playbooks and their supporting files. It is intended to be unpacked and ansible will be executed from the root of this directory.
[ec2]
regions=us-east-1
destination_variable=route53_domain_name
backup_destination_variable=public_dns_name
cache_path=/tmp
cache_max_age=300
regions_exclude=
route53=True
\ No newline at end of file
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = {}
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(self.args.inifile)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.backup_destination_variable = config.get('ec2', 'backup_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
cache_path = config.get('ec2', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
instances = sorted(reservation.instances)
for instance in instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances
if instance.state != 'running':
return
# Determine route53 domain names
route53_names = self.get_instance_route53_names(instance) if self.route53_enabled else set()
if len(route53_names) > 0:
instance.route53_domain_name = route53_names[0]
# Select the best destination address
dest = None
for dest_variable in [self.destination_variable, self.backup_destination_variable]:
if hasattr(instance, dest_variable):
dest = getattr(instance, dest_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
self.keep_first(self.inventory, 'first_in_' + key, dest)
if k == 'ansible:groups':
for group in v.split(','):
self.push(self.inventory, group, dest)
elif k == 'aws:elasticmapreduce:job-flow-id':
role = instance.tags.get('aws:elasticmapreduce:instance-group-role')
if role:
self.push(self.inventory, 'mr_{name}_{role}'.format(name=v, role=role.lower()), dest)
# Inventory: Group by Route53 domain names
for name in route53_names:
self.push(self.inventory, name, dest)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances
if instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
zone_response = r53_conn.get_all_hosted_zones()
all_zones = zone_response.get('ListHostedZonesResponse', {}).get('HostedZones', [])
route53_zones = [ zone for zone in all_zones if zone['Name'][:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone['Id'].split('/')[-1])
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
if type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
if k.startswith('ansible:var:'):
key = self.to_safe(k[12:])
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in
the dict '''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def keep_first(self, my_dict, key, element):
if key not in my_dict:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
---
luigi_hadoop_version: apache1
luigi_hadoop_command: /home/hadoop/bin/hadoop
luigi_hadoop_streaming_jar: /home/hadoop/.versions/1.0.3/share/hadoop/contrib/streaming/hadoop-streaming.jar
---
- name: configuration directory created
file: path=/etc/luigi state=directory mode=755
- name: configuration file written
template: src=client.cfg.j2 dest=/etc/luigi/client.cfg mode=644
[hadoop]
version: {{ luigi_hadoop_version }}
command: {{ luigi_hadoop_command }}
streaming-jar: {{ luigi_hadoop_streaming_jar }}
---
- name: Configure luigi
hosts: "mr_{{ name }}_master"
gather_facts: False
sudo: True
roles:
- luigi
- name: Run a task
hosts: "mr_{{ name }}_master"
gather_facts: False
sudo: False
vars:
- repo: git@github.com:edx/analytics-tasks.git
- branch: master
- working_dir: "/tmp/task-work-{{ uuid }}"
- working_repo_dir: "{{ working_dir }}/repo"
- working_venv_dir: "{{ working_dir }}/venv"
- task_arguments: ''
- git_server_hostname: github.com
- git_server_ip_address: 207.97.227.239
- git_server_public_key: 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==''
tasks:
- name: known_hosts file exists
command: touch /home/{{ ansible_ssh_user }}/.ssh/known_hosts creates=/home/{{ ansible_ssh_user }}/.ssh/known_hosts
- name: git server in known_hosts file
lineinfile: >
dest=/home/{{ ansible_ssh_user }}/.ssh/known_hosts
regexp=^github.com
line="{{ git_server_hostname }},{{ git_server_ip_address }} {{ git_server_public_key }}"
- name: working directories created
file: path={{ item }} state=directory
with_items:
- "{{ working_dir }}"
- "{{ working_repo_dir }}"
- "{{ working_venv_dir }}"
- name: analytics tasks repository checked out
git: repo={{ repo }} dest={{ working_repo_dir }}
- name: branch fetched
command: git fetch origin {{ branch }} chdir={{ working_repo_dir }}
- name: branch checked out
command: git checkout FETCH_HEAD chdir={{ working_repo_dir }}
- name: bootstrap pip
command: sudo apt-get install -q -y python-pip
sudo: True
- name: virtualenv installed
pip: name=virtualenv version=1.10.1
sudo: True
- name: virtualenv created
command: >
virtualenv {{ working_venv_dir }}
- name: virtualenv initialized
shell: >
. {{ working_venv_dir }}/bin/activate && make install
chdir={{ working_repo_dir }}
- name: task run
shell: >
{{ working_venv_dir }}/bin/launch-task {{ task_arguments }} chdir={{ working_repo_dir }}
async: 10000000000
poll: 0
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment