Commit 1c732af3 by Feanil Patel

Merge pull request #2001 from edx/feanil/pre_merge_cleanup

Feanil/pre merge cleanup
parents a66f98ee 3e908bb4
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
policies: "{{ role_policies }}" policies: "{{ role_policies }}"
- name: Manage ELB security group - name: Manage ELB security group
ec2_group_devel: ec2_group:
profile: "{{ profile }}" profile: "{{ profile }}"
description: "{{ elb_security_group.description }}" description: "{{ elb_security_group.description }}"
name: "{{ elb_security_group.name }}" name: "{{ elb_security_group.name }}"
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
register: elb_sec_group register: elb_sec_group
- name: Manage service security group - name: Manage service security group
ec2_group_devel: ec2_group:
profile: "{{ profile }}" profile: "{{ profile }}"
description: "{{ service_security_group.description }}" description: "{{ service_security_group.description }}"
name: "{{ service_security_group.name }}" name: "{{ service_security_group.name }}"
...@@ -106,7 +106,7 @@ ...@@ -106,7 +106,7 @@
# target: "local" # simplifying generalization of instnace-id, gateway-id or local # target: "local" # simplifying generalization of instnace-id, gateway-id or local
- name: Manage ELB - name: Manage ELB
ec2_elb_lb_1.8: ec2_elb_lb:
profile: "{{ profile }}" profile: "{{ profile }}"
region: "{{ aws_region }}" region: "{{ aws_region }}"
scheme: "{{ elb_scheme }}" scheme: "{{ elb_scheme }}"
...@@ -121,7 +121,7 @@ ...@@ -121,7 +121,7 @@
# Service related components # Service related components
# #
- name: Manage the launch configuration - name: Manage the launch configuration
ec2_lc_local: ec2_lc:
profile: "{{ profile }}" profile: "{{ profile }}"
region: "{{ aws_region }}" region: "{{ aws_region }}"
name: "{{ service_config.name }}" name: "{{ service_config.name }}"
...@@ -141,7 +141,7 @@ ...@@ -141,7 +141,7 @@
register: service_vpc_zone_identifier_string register: service_vpc_zone_identifier_string
- name: Manage ASG - name: Manage ASG
ec2_asg_1.7.1: ec2_asg:
profile: "{{ profile }}" profile: "{{ profile }}"
region: "{{ aws_region }}" region: "{{ aws_region }}"
name: "{{ asg_name }}" name: "{{ asg_name }}"
...@@ -157,7 +157,7 @@ ...@@ -157,7 +157,7 @@
when: auto_scaling_service when: auto_scaling_service
- name: Manage scaling policies - name: Manage scaling policies
ec2_scaling_policy_devel: ec2_scaling_policy:
state: "{{ item.state }}" state: "{{ item.state }}"
profile: "{{ item.profile }}" profile: "{{ item.profile }}"
region: "{{ item.region }}" region: "{{ item.region }}"
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: Gareth Rushgrove
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: false
min_size:
description:
- Minimum number of instances in group
required: false
max_size:
description:
- Maximum number of instances in group
required: false
desired_capacity:
description:
- Desired number of instances in group
required: false
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- ec2_asg:
name: special
load_balancers: 'lb1,lb2'
availability_zones: 'eu-west-1a,eu-west-1b'
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d'
tags:
- environment: production
propagate_at_launch: no
deprecated method of expressing tags:
tags:
- key: environment
value: production
propagate_at_launch: no
'''
import sys
import time
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
ASG_ATTRIBUTES = ('launch_config_name', 'max_size', 'min_size', 'desired_capacity',
'vpc_zone_identifier', 'availability_zones',
'health_check_period', 'health_check_type')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
properties['load_balancers'] = autoscaling_group.load_balancers
return properties
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
as_groups = connection.get_all_groups(names=[group_name])
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
asg_tags = []
for tag in set_tags:
if tag.has_key('key') and tag.has_key('value'): # this block is to support depricated form
asg_tags.append(Tag(key=tag.get('key'),
value=tag.get('value'),
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
else:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type)
try:
connection.create_auto_scaling_group(ag)
asg_properties = get_properties(ag)
module.exit_json(changed=True, **asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
as_group = as_groups[0]
changed = False
for attr in ASG_ATTRIBUTES:
if module.params.get(attr) and getattr(as_group, attr) != module.params.get(attr):
changed = True
setattr(as_group, attr, module.params.get(attr))
if len(set_tags) > 0:
existing_tags = as_group.tags
existing_tag_map = dict((tag.key, tag) for tag in existing_tags)
for tag in set_tags:
if tag.has_key('key') and tag.has_key('value'): # this is to support deprecated method
if 'key' not in tag:
continue
if ( not tag['key'] in existing_tag_map or
existing_tag_map[tag['key']].value != tag['value'] or
('propagate_at_launch' in tag and
existing_tag_map[tag['key']].propagate_at_launch != tag['propagate_at_launch']) ):
changed = True
continue
else:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
if ( not k in existing_tag_map or
existing_tag_map[k].value != v or
('propagate_at_launch' in tag and
existing_tag_map[k].propagate_at_launch != tag['propagate_at_launch']) ):
changed = True
continue
if changed:
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
try:
if changed:
as_group.update()
asg_properties = get_properties(as_group)
module.exit_json(changed=changed, **asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = as_groups[0]
module.exit_json(changed=changed, name=result.name,
autoscaling_group_arn=result.autoscaling_group_arn,
availability_zones=result.availability_zones,
created_time=str(result.created_time),
default_cooldown=result.default_cooldown,
health_check_period=result.health_check_period,
health_check_type=result.health_check_type,
instance_id=result.instance_id,
instances=[instance.instance_id for instance in result.instances],
launch_config_name=result.launch_config_name,
load_balancers=result.load_balancers,
min_size=result.min_size, max_size=result.max_size,
placement_group=result.placement_group,
tags=result.tags,
termination_policies=result.termination_policies,
vpc_zone_identifier=result.vpc_zone_identifier)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
if state == 'present':
create_autoscaling_group(connection, module)
elif state == 'absent':
delete_autoscaling_group(connection, module)
main()
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description: Creates or destroys Amazon ELB.
short_description: Creates or destroys Amazon ELB.
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
version_added: "1.5"
author: Jim Dalton
options:
state:
description:
- Create or destroy the ELB
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
health_check:
description:
- An associative array of health check configuration settigs (see example)
require: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.6"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Basic VPC provisioning example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extreneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456, subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
"""
import sys
import os
try:
import boto
import boto.ec2.elb
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", tags=[], region=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
self.do_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status
}
else:
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme
}
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [l.get_complex_tuple()
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
return info
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incomping port is all we're looking for
if existing_listener[0] == listener['load_balancer_port']:
existing_listener_found = existing_listener.get_complex_tuple()
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the exsiting one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = existing_listener.get_complex_tuple()
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
listener['load_balancer_port'],
listener['instance_port'],
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.Changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
## can be removed if https://github.com/ansible/ansible/pull/9113 is merged upstream
def is_taggable(self, object):
from boto.ec2.ec2object import TaggedEC2Object
if not object or not issubclass(object.__class__, TaggedEC2Object):
return False
return True
def do_tags(self):
"""
General function for adding tags to objects that are subclasses
of boto.ec2.ec2object.TaggedEC2Object. Currently updates
existing tags, as the API overwrites them, but does not remove
orphans.
:param module:
:param object:
:param tags:
"""
dry_run = True if self.module.check_mode else False
if (self.is_taggable(self.elb)):
tag_dict = {}
for tag in self.tags:
tag_dict[tag['key']] = tag['value']
self.elb.add_tags(tag_dict, dry_run)
else:
pass
#self.module.fail_json(msg="ELB object is not a subclass of TaggedEC2Object")
## end can be removed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
tags={'default': None, 'required': False, 'type': 'list'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
health_check = module.params['health_check']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one port is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets,
scheme, tags, region=region, **aws_connect_params)
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: ec2_group
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
required: true
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example).
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example).
required: false
version_added: "1.6"
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
version_added: "1.4"
description:
- create or delete security group
required: false
default: 'present'
aliases: []
extends_documentation_fragment: aws
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
local_action:
module: ec2_group
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1a
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
'''
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port,
grant.group_id, grant.cidr_ip)] = rule
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
group_id = None
group_name = None
ip = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name in groups:
group_id = groups[group_name].id
elif group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
if not module.check_mode:
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
group_id = auto_group.id
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
return group_id, ip, target_group_created
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(required=True),
vpc_id=dict(),
rules=dict(),
rules_egress=dict(),
state = dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params['name']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = module.params['rules']
rules_egress = module.params['rules_egress']
state = module.params.get('state')
changed = False
ec2 = ec2_connect(module)
# find the group if present
group = None
groups = {}
for curGroup in ec2.get_all_security_groups():
groups[curGroup.id] = curGroup
groups[curGroup.name] = curGroup
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
# Ensure requested group is absent
if state == 'absent':
if group:
'''found a match, delete it'''
try:
group.delete()
except Exception, e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e))
else:
group = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested group is present
elif state == 'present':
if group:
'''existing group found'''
# check the group parameters are correct
group_in_use = False
rs = ec2.get_all_instances()
for r in rs:
for i in r.instances:
group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False)
if group.description != description:
if group_in_use:
module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.")
# if the group doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0:
time.sleep(0.1)
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
# Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules:
for rule in rules:
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id]
if not module.check_mode:
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup)
changed = True
# Finally, remove anything left in the groupRules -- these will be defunct rules
for rule in groupRules.itervalues():
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
addRulesToLookup(group.rules_egress, 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress:
for rule in rules_egress:
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id].id
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=rule['proto'],
from_port=rule['from_port'],
to_port=rule['to_port'],
src_group_id=grantGroup,
cidr_ip=ip)
changed = True
elif vpc_id and not module.check_mode:
# when using a vpc, but no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'
if default_egress_rule not in groupRules:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=-1,
from_port=None,
to_port=None,
src_group_id=None,
cidr_ip='0.0.0.0/0'
)
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
for rule in groupRules.itervalues():
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
module.exit_json(changed=changed, group_id=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: ec2_group
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
required: true
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example).
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example).
required: false
version_added: "1.6"
tags:
description:
- List of tags to apply to this security group
required: false
version_added: "1.8"
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
version_added: "1.4"
description:
- create or delete security group
required: false
default: 'present'
aliases: []
extends_documentation_fragment: aws
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
local_action:
module: ec2_group
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1a
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
tags:
- key: environment
value: production
'''
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port,
grant.group_id, grant.cidr_ip)] = rule
def get_target_from_rule(module, rule, name, group, groups):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
group_id = None
group_name = None
ip = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name in groups:
group_id = groups[group_name].id
elif group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
if not module.check_mode:
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
group_id = auto_group.id
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
return group_id, ip, target_group_created
## can be removed if https://github.com/ansible/ansible/pull/9113 is merged upstream
def is_taggable(object):
from boto.ec2.ec2object import TaggedEC2Object
if not object or not issubclass(object.__class__, TaggedEC2Object):
return False
return True
def do_tags(module, object, tags):
"""
General function for adding tags to objects that are subclasses
of boto.ec2.ec2object.TaggedEC2Object. Currently updates
existing tags, as the API overwrites them, but does not remove
orphans.
:param module:
:param object:
:param tags:
"""
dry_run = True if module.check_mode else False
if (is_taggable(object)):
tag_dict = {}
for tag in tags:
tag_dict[tag['key']] = tag['value']
object.add_tags(tag_dict, dry_run)
else:
module.fail_json(msg="Security group object is not a subclass of TaggedEC2Object")
## end can be removed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(required=True),
vpc_id=dict(),
rules=dict(),
rules_egress=dict(),
tags=dict(type='list', default=[]),
state = dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params['name']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = module.params['rules']
rules_egress = module.params['rules_egress']
tags = module.params['tags']
state = module.params.get('state')
changed = False
ec2 = ec2_connect(module)
# find the group if present
group = None
groups = {}
for curGroup in ec2.get_all_security_groups():
groups[curGroup.id] = curGroup
groups[curGroup.name] = curGroup
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
# Ensure requested group is absent
if state == 'absent':
if group:
'''found a match, delete it'''
try:
group.delete()
except Exception, e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e))
else:
group = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested group is present
elif state == 'present':
if group:
'''existing group found'''
# check the group parameters are correct
group_in_use = False
rs = ec2.get_all_instances()
for r in rs:
for i in r.instances:
group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False)
if group.description != description:
if group_in_use:
module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.")
# if the group doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0:
time.sleep(0.1)
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
# tag the security group, function imported from ansible.module_utils.ec2
do_tags(module, group, tags)
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
# Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules:
for rule in rules:
group_id, ip, target_group_created = get_target_from_rule(module, rule, name, group, groups)
if target_group_created:
changed = True
if rule['proto'] == 'all':
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id]
if not module.check_mode:
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup)
changed = True
# Finally, remove anything left in the groupRules -- these will be defunct rules
for rule in groupRules.itervalues():
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
addRulesToLookup(group.rules_egress, 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress:
for rule in rules_egress:
group_id, ip, target_group_created = get_target_from_rule(module, rule, name, group, groups)
if target_group_created:
changed = True
if rule['proto'] == 'all':
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id].id
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=rule['proto'],
from_port=rule['from_port'],
to_port=rule['to_port'],
src_group_id=grantGroup,
cidr_ip=ip)
changed = True
elif vpc_id and not module.check_mode:
# when using a vpc, but no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'
if default_egress_rule not in groupRules:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=-1,
from_port=None,
to_port=None,
src_group_id=None,
cidr_ip='0.0.0.0/0'
)
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
for rule in groupRules.itervalues():
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
module.exit_json(changed=changed, group_id=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_lc
short_description: Create or delete AWS Autoscaling Launch Configurations
description:
- Can create or delete AwS Autoscaling Configurations
- Works with the ec2_asg module to manage Autoscaling Groups
version_added: "1.6"
author: Gareth Rushgrove
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for configuration
required: true
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
image_id:
description:
- The AMI unique identifier to be used for the group
required: false
key_name:
description:
- The SSH key name to be used for access to managed instances
required: false
security_groups:
description:
- A list of security groups into which instances should be found
required: false
instance_profile_name:
description:
- The name or ARN of the instance profile associated with the IAM role for the instance
required: false
default: null
version_added: "1.7"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volumes:
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
default: null
aliases: []
user_data:
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
kernel_id:
description:
- Kernel id for the EC2 instance
required: false
default: null
aliases: []
spot_price:
description:
- The spot price you are bidding. Only applies for an autoscaling group with spot instances.
required: false
default: null
instance_monitoring:
description:
- whether instances in group are launched with detailed monitoring.
required: false
default: false
aliases: []
assign_public_ip:
description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false
default: false
aliases: []
version_added: "1.8"
ramdisk_id:
description:
- A RAM disk id for the instances.
required: false
default: null
aliases: []
version_added: "1.8"
instance_profile_name:
description:
- The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
required: false
default: null
aliases: []
version_added: "1.8"
ebs_optimized:
description:
- Specifies whether the instance is optimized for EBS I/O (true) or not (false).
required: false
default: false
aliases: []
version_added: "1.8"
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- ec2_lc:
name: special
image_id: ami-XXX
key_name: default
security_groups: 'group,group2'
instance_type: t1.micro
'''
import sys
import time
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_block_device(module, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def create_launch_config(connection, module):
name = module.params.get('name')
image_id = module.params.get('image_id')
key_name = module.params.get('key_name')
security_groups = module.params['security_groups']
user_data = module.params.get('user_data')
volumes = module.params['volumes']
instance_type = module.params.get('instance_type')
spot_price = module.params.get('spot_price')
instance_monitoring = module.params.get('instance_monitoring')
assign_public_ip = module.params.get('assign_public_ip')
kernel_id = module.params.get('kernel_id')
ramdisk_id = module.params.get('ramdisk_id')
instance_profile_name = module.params.get('instance_profile_name')
ebs_optimized = module.params.get('ebs_optimized')
instance_profile_name = module.params.get('instance_profile_name')
bdm = BlockDeviceMapping()
if volumes:
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, volume)
lc = LaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
security_groups=security_groups,
user_data=user_data,
block_device_mappings=[bdm],
instance_type=instance_type,
kernel_id=kernel_id,
spot_price=spot_price,
instance_monitoring=instance_monitoring,
associate_public_ip_address = assign_public_ip,
ramdisk_id=ramdisk_id,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
)
if instance_profile_name:
lc.instance_profile_name = instance_profile_name
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = False
if not launch_configs:
try:
connection.create_launch_configuration(lc)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = True
except BotoServerError, e:
module.fail_json(msg=str(e))
result = launch_configs[0]
module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
image_id=result.image_id, arn=result.launch_configuration_arn,
security_groups=result.security_groups, instance_type=instance_type)
def delete_launch_config(connection, module):
name = module.params.get('name')
launch_configs = connection.get_all_launch_configurations(names=[name])
if launch_configs:
launch_configs[0].delete()
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
image_id=dict(type='str'),
key_name=dict(type='str'),
security_groups=dict(type='list'),
user_data=dict(type='str'),
kernel_id=dict(type='str'),
volumes=dict(type='list'),
instance_type=dict(type='str'),
instance_profile_name=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
spot_price=dict(type='float'),
ramdisk_id=dict(type='str'),
ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(default=False, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_launch_config(connection, module)
elif state == 'absent':
delete_launch_config(connection, module)
main()
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: Zacharie Eakin
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
desciption:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace, which determines the category it will appear under in cloudwatch
required: false
options: ['AWS/AutoScaling','AWS/Billing','AWS/DynamoDB','AWS/ElastiCache','AWS/EBS','AWS/EC2','AWS/ELB','AWS/ElasticMapReduce','AWS/OpsWorks','AWS/Redshift','AWS/RDS','AWS/Route53','AWS/SNS','AWS/SQS','AWS/StorageGateway']
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer desciption of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str', choices=['AWS/AutoScaling', 'AWS/Billing', 'AWS/DynamoDB', 'AWS/ElastiCache', 'AWS/EBS', 'AWS/EC2',
'AWS/ELB', 'AWS/ElasticMapReduce', 'AWS/OpsWorks', 'AWS/Redshift', 'AWS/RDS', 'AWS/Route53', 'AWS/SNS', 'AWS/SQS', 'AWS/StorageGateway']), statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict'),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
main()
#!/usr/bin/python
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: Zacharie Eakin
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
desciption:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step incase the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError, e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError, e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
asg_name = dict(required=True, type='str'),
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment