Commit 3038bb94 by Feanil Patel

Merge pull request #420 from edx/feanil/vpc_deploys

Feanil/vpc deploys
parents bdf60fd6 9069160c
...@@ -2709,14 +2709,12 @@ ...@@ -2709,14 +2709,12 @@
"SecurityGroupEgress":[ "SecurityGroupEgress":[
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80", "FromPort":{
"ToPort":"80", "Ref":"EdxappServerPort"
"CidrIp":"0.0.0.0/0" },
}, "ToPort":{
{ "Ref":"EdxappServerPort"
"IpProtocol":"tcp", },
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
} }
] ]
...@@ -2995,8 +2993,12 @@ ...@@ -2995,8 +2993,12 @@
}, },
{ {
"LoadBalancerPort":"443", "LoadBalancerPort":"443",
"InstancePort":"443", "InstancePort": { "Ref": "XqueueServerPort" },
"Protocol":"HTTP" "Protocol":"HTTPS",
"InstanceProtocol":"HTTP",
"SSLCertificateId": {
"Ref": "SSLCertificateARN"
}
} }
], ],
"HealthCheck":{ "HealthCheck":{
...@@ -3046,14 +3048,8 @@ ...@@ -3046,14 +3048,8 @@
"SecurityGroupEgress":[ "SecurityGroupEgress":[
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80", "FromPort": { "Ref": "XqueueServerPort" },
"ToPort":"80", "ToPort": { "Ref": "XqueueServerPort" },
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
} }
] ]
...@@ -3870,6 +3866,24 @@ ...@@ -3870,6 +3866,24 @@
"Ref":"DBSecurityGroup" "Ref":"DBSecurityGroup"
} }
], ],
"Tags":[
{
"Key":"role",
"Value":"rds"
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
}
}
],
"MultiAZ":"true" "MultiAZ":"true"
} }
}, },
...@@ -4432,14 +4446,8 @@ ...@@ -4432,14 +4446,8 @@
"SecurityGroupEgress":[ "SecurityGroupEgress":[
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80", "FromPort": { "Ref": "ForumServerPort" },
"ToPort":"80", "ToPort": { "Ref": "ForumServerPort" },
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
} }
] ]
......
...@@ -269,7 +269,8 @@ class Ec2Inventory(object): ...@@ -269,7 +269,8 @@ class Ec2Inventory(object):
reservations = conn.get_all_instances() reservations = conn.get_all_instances()
for reservation in reservations: for reservation in reservations:
for instance in reservation.instances: instances = sorted(reservation.instances)
for instance in instances:
self.add_instance(instance, region) self.add_instance(instance, region)
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
...@@ -363,6 +364,7 @@ class Ec2Inventory(object): ...@@ -363,6 +364,7 @@ class Ec2Inventory(object):
for k, v in instance.tags.iteritems(): for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v) key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest) self.push(self.inventory, key, dest)
self.keep_first(self.inventory, 'first_in_' + key, dest)
# Inventory: Group by Route53 domain names if enabled # Inventory: Group by Route53 domain names if enabled
if self.route53_enabled: if self.route53_enabled:
...@@ -532,6 +534,9 @@ class Ec2Inventory(object): ...@@ -532,6 +534,9 @@ class Ec2Inventory(object):
else: else:
my_dict[key] = [element] my_dict[key] = [element]
def keep_first(self, my_dict, key, element):
if key not in my_dict:
my_dict[key] = [element]
def get_inventory_from_cache(self): def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON ''' Reads the inventory from the cache file and returns it as a JSON
......
---
- hosts: first_in_tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- role: 'mongo'
mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - common
# - mongo
- hosts: first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
migrate_db: 'yes'
openid_workaround: 'yes'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_edxapp:!first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xserver
- xserver
- splunkforwarder
- hosts: tag_role_rabbitmq
serial: 1
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- rabbitmq
- splunkforwarder
- hosts: first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
migrate_db: 'yes'
- splunkforwarder
- hosts: tag_role_xqueue:!first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
- hosts: tag_role_forum
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- oraclejdk
- elasticsearch
- forum
...@@ -5,30 +5,6 @@ ...@@ -5,30 +5,6 @@
# TODO: the supervisor ansible module does not support # TODO: the supervisor ansible module does not support
# stopping and starting services by group. # stopping and starting services by group.
- name: edxapp | stop the edxapp services (supervisor)
supervisorctl: >
name="edxapp:{{ item }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
when: celery_worker is not defined
with_items: service_variants_enabled
sudo_user: "{{ common_web_user }}"
tags:
- deploy
- name: edxapp | stop the celery worker services (supervisor)
supervisorctl: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
when: celery_worker is defined
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
tags:
- deploy
# Do A Checkout # Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edxapp_code_dir}} - name: edxapp | checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}} git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}}
...@@ -202,24 +178,24 @@ ...@@ -202,24 +178,24 @@
# gather_assets and db migrations # gather_assets and db migrations
- include: service_variant_config.yml - include: service_variant_config.yml
- name: edxapp | start the edxapp services (supervisor) - name: edxapp | restart the edxapp services (supervisor)
supervisorctl: > supervisorctl: >
name="edxapp:{{ item }}" name="edxapp:{{ item }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=restarted
when: celery_worker is not defined when: celery_worker is not defined
with_items: service_variants_enabled with_items: service_variants_enabled
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
tags: tags:
- deploy - deploy
- name: edxapp | start the celery worker services (supervisor) - name: edxapp | restart the celery worker services (supervisor)
supervisorctl: > supervisorctl: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=restarted
when: celery_worker is defined when: celery_worker is defined
with_items: edxapp_workers with_items: edxapp_workers
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
......
...@@ -11,15 +11,21 @@ forum_gem_root: "{{ forum_rbenv_dir }}/.gem" ...@@ -11,15 +11,21 @@ forum_gem_root: "{{ forum_rbenv_dir }}/.gem"
forum_gem_bin: "{{ forum_gem_root }}/bin" forum_gem_bin: "{{ forum_gem_root }}/bin"
forum_path: "{{ forum_code_dir }}/bin:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims }}:{{ forum_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" forum_path: "{{ forum_code_dir }}/bin:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims }}:{{ forum_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
FORUM_MONGO_USER: "cs_comments_service"
FORUM_MONGO_PASSWORD: "password"
FORUM_MONGO_HOST: "localhost"
FORUM_MONGO_PORT: "27017"
FORUM_SINATRA_ENV: "development"
forum_environment: forum_environment:
RBENV_ROOT: "{{ forum_rbenv_root }}" RBENV_ROOT: "{{ forum_rbenv_root }}"
GEM_HOME: "{{ forum_gem_root }}" GEM_HOME: "{{ forum_gem_root }}"
GEM_PATH: "{{ forum_gem_root }}" GEM_PATH: "{{ forum_gem_root }}"
PATH: "{{ forum_path }}" PATH: "{{ forum_path }}"
MONGOHQ_USER: "{{ forum_mongo_user }}" MONGOHQ_USER: "{{ FORUM_MONGO_USER }}"
MONGOHQ_PASS: "{{ forum_mongo_password }}" MONGOHQ_PASS: "{{ FORUM_MONGO_PASSWORD }}"
RACK_ENV: "{{ forum_rack_env }}" RACK_ENV: "{{ forum_rack_env }}"
SINATRA_ENV: "{{ forum_sinatra_env }}" SINATRA_ENV: "{{ FORUM_SINATRA_ENV }}"
API_KEY: "{{ forum_api_key }}" API_KEY: "{{ forum_api_key }}"
SEARCH_SERVER: "{{ forum_elasticsearch_url }}" SEARCH_SERVER: "{{ forum_elasticsearch_url }}"
MONGOHQ_URL: "{{ forum_mongo_url }}" MONGOHQ_URL: "{{ forum_mongo_url }}"
...@@ -29,14 +35,9 @@ forum_user: "forum" ...@@ -29,14 +35,9 @@ forum_user: "forum"
forum_ruby_version: "1.9.3-p448" forum_ruby_version: "1.9.3-p448"
forum_source_repo: "https://github.com/edx/cs_comments_service.git" forum_source_repo: "https://github.com/edx/cs_comments_service.git"
forum_version: "HEAD" forum_version: "HEAD"
forum_mongo_user: "cs_comments_service"
forum_mongo_password: "password"
forum_mongo_host: "localhost"
forum_mongo_port: "27010"
forum_mongo_database: "cs_comments_service" forum_mongo_database: "cs_comments_service"
forum_mongo_url: "mongodb://{{ forum_mongo_user }}:{{ forum_mongo_password }}@{{ forum_mongo_host }}:{{ forum_mongo_port }}/{{ forum_mongo_database }}" forum_mongo_url: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{{ FORUM_MONGO_HOST }}:{{ FORUM_MONGO_PORT }}/{{ forum_mongo_database }}"
forum_rack_env: "development" forum_rack_env: "development"
forum_sinatra_env: "development"
forum_api_key: "password" forum_api_key: "password"
forum_elasticsearch_host: "localhost" forum_elasticsearch_host: "localhost"
forum_elasticsearch_port: "9200" forum_elasticsearch_port: "9200"
...@@ -52,7 +53,7 @@ forum_elasticsearch_url: "http://{{ forum_elasticsearch_host }}:{{ forum_elastic ...@@ -52,7 +53,7 @@ forum_elasticsearch_url: "http://{{ forum_elasticsearch_host }}:{{ forum_elastic
# #
forum_services: forum_services:
- {service: "sinatra", host: "localhost", port: "4567"} - {service: "sinatra", host: "localhost", port: "4567"}
- {service: "mongo", host: "{{ forum_mongo_host }}", port: "27017"} - {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "27017"}
- {service: "mongo", host: "{{ forum_mongo_host }}", port: "28017"} - {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "28017"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9200"} - {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9200"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9300"} - {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9300"}
--- ---
- name: forum | stop the forum service
supervisorctl: >
name=forum
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
tags:
- deploy
- name: forum | create the supervisor wrapper - name: forum | create the supervisor wrapper
template: > template: >
src={{ forum_supervisor_wrapper|basename }}.j2 src={{ forum_supervisor_wrapper|basename }}.j2
......
mongo_logappend: true mongo_logappend: true
mongo_version: 2.4.7 mongo_version: 2.4.7
mongo_bind_ip: 127.0.0.1
mongo_port: "27017" mongo_port: "27017"
mongo_extra_conf: '' mongo_extra_conf: ''
mongo_key_file: '/etc/mongodb_key' mongo_key_file: '/etc/mongodb_key'
...@@ -21,12 +20,17 @@ MONGO_USERS: ...@@ -21,12 +20,17 @@ MONGO_USERS:
database: edxapp database: edxapp
MONGO_CLUSTERED: !!null MONGO_CLUSTERED: !!null
MONGO_BIND_IP: 127.0.0.1
## ##
mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" mongo_logpath: "{{ mongo_log_dir }}/mongodb.log"
mongo_dbpath: "{{ mongo_data_dir }}/mongodb" mongo_dbpath: "{{ mongo_data_dir }}/mongodb"
# Have to use this conditional instead of ignore errors
# because the mongo_user module fails and doesn't ginore errors.
mongo_create_users: !!null
# If the system is running out of an Amazon Web Services # If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out # cloudformation stack, this group name can used to pull out
# the name of the stack the mongo server resides in. # the name of the stack the mongo server resides in.
......
...@@ -66,8 +66,8 @@ ...@@ -66,8 +66,8 @@
shell: /usr/bin/mongo /tmp/repset_init.js shell: /usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
# Ignoring errors here because slave instances will fail this command # Ignore errors doesn't work because the module throws an exception
# since slaveOk is false in ansible 1.3. # it doesn't catch.
- name: mongo | create a mongodb user - name: mongo | create a mongodb user
mongodb_user: > mongodb_user: >
database={{ item.database }} database={{ item.database }}
...@@ -75,3 +75,4 @@ ...@@ -75,3 +75,4 @@
password={{ item.password }} password={{ item.password }}
state=present state=present
with_items: MONGO_USERS with_items: MONGO_USERS
when: mongo_create_users
...@@ -13,7 +13,7 @@ logappend={{ mongo_logappend }} ...@@ -13,7 +13,7 @@ logappend={{ mongo_logappend }}
otherwise only to the specified local ip. otherwise only to the specified local ip.
#} #}
{% if mongo_clustered is not defined %} {% if mongo_clustered is not defined %}
bind_ip = {{ mongo_bind_ip }} bind_ip = {{ MONGO_BIND_IP }}
{% endif %} {% endif %}
port = {{ mongo_port }} port = {{ mongo_port }}
......
...@@ -120,3 +120,8 @@ ...@@ -120,3 +120,8 @@
- "{{ supervisor_cfg }}" - "{{ supervisor_cfg }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
notify: supervisor | restart supervisor notify: supervisor | restart supervisor
- name: supervisor | start supervisor
service: >
name={{supervisor_service}}
state=started
# Stop xqueue service.
- name: xqueue | stop xqueue service
supervisorctl: >
name=xqueue
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
tags:
- deploy
- name: xqueue | stop xqueue consumer service - name: xqueue | stop xqueue consumer service
supervisorctl: > supervisorctl: >
name=xqueue_consumer name=xqueue_consumer
...@@ -84,20 +74,20 @@ ...@@ -84,20 +74,20 @@
tags: tags:
- deploy - deploy
- name: xqueue | start xqueue - name: xqueue | restart xqueue
supervisorctl: > supervisorctl: >
name=xqueue name=xqueue
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=restarted
tags: tags:
- deploy - deploy
- name: xqueue | start xqueue consumer - name: xqueue | restart xqueue consumer
supervisorctl: > supervisorctl: >
name=xqueue_consumer name=xqueue_consumer
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=restarted
tags: tags:
- deploy - deploy
- name: xserver | stop xserver
supervisorctl: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
tags:
- deploy
- name: xserver | checkout code - name: xserver | checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}} git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
...@@ -20,15 +11,6 @@ ...@@ -20,15 +11,6 @@
tags: tags:
- deploy - deploy
- name: xserver | ensuring xserver is activated in supervisor
supervisorctl: >
name="xserver"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=present
tags:
- deploy
- name: xserver | install requirements - name: xserver | install requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
...@@ -74,11 +56,11 @@ ...@@ -74,11 +56,11 @@
tags: tags:
- deploy - deploy
- name: xserver | start xserver - name: xserver | restart xserver
supervisorctl: > supervisorctl: >
name=xserver name=xserver
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=restarted
tags: tags:
- deploy - deploy
...@@ -84,7 +84,9 @@ def _ssh_config(args): ...@@ -84,7 +84,9 @@ def _ssh_config(args):
for reservation in reservations: for reservation in reservations:
for instance in reservation.instances: for instance in reservation.instances:
if 'group' in instance.tags: if 'role' in instance.tags:
logical_id = instance.tags['role']
elif 'group' in instance.tags:
logical_id = instance.tags['group'] logical_id = instance.tags['group']
else: else:
logical_id = instance.tags['aws:cloudformation:logical-id'] logical_id = instance.tags['aws:cloudformation:logical-id']
......
...@@ -78,8 +78,14 @@ def elbs_for_stack_name(stack_name): ...@@ -78,8 +78,14 @@ def elbs_for_stack_name(stack_name):
if elb.vpc_id == vpc_id: if elb.vpc_id == vpc_id:
yield elb yield elb
def rdss_for_stack_name(stack_name):
vpc_id = vpc_for_stack_name(stack_name)
rds = boto.connect_rds()
for instance in rds.get_all_dbinstances():
if hasattr(instance, 'VpcId') and instance.VpcId == vpc_id:
yield instance
def ensure_service_dns(elb, prefix, zone): def ensure_service_dns(generated_dns_name, prefix, zone):
dns_template = "{prefix}.{zone_name}" dns_template = "{prefix}.{zone_name}"
# Have to remove the trailing period that is on zone names. # Have to remove the trailing period that is on zone names.
...@@ -87,7 +93,7 @@ def ensure_service_dns(elb, prefix, zone): ...@@ -87,7 +93,7 @@ def ensure_service_dns(elb, prefix, zone):
dns_name = dns_template.format(prefix=prefix, dns_name = dns_template.format(prefix=prefix,
zone_name=zone_name) zone_name=zone_name)
add_or_update_record(zone, dns_name, 'CNAME', 600, [elb.dns_name]) add_or_update_record(zone, dns_name, 'CNAME', 600, [generated_dns_name])
if __name__ == "__main__": if __name__ == "__main__":
...@@ -102,7 +108,7 @@ if __name__ == "__main__": ...@@ -102,7 +108,7 @@ if __name__ == "__main__":
stack_name = args.stackname stack_name = args.stackname
# Create DNS for edxapp and xqueue. # Create DNS for edxapp and xqueue.
dns_settings = { elb_dns_settings = {
'edxapp': ['courses', 'studio'], 'edxapp': ['courses', 'studio'],
'xqueue': ['xqueue'], 'xqueue': ['xqueue'],
'rabbit': ['rabbit'], 'rabbit': ['rabbit'],
...@@ -118,9 +124,18 @@ if __name__ == "__main__": ...@@ -118,9 +124,18 @@ if __name__ == "__main__":
stack_elbs = elbs_for_stack_name(stack_name) stack_elbs = elbs_for_stack_name(stack_name)
for elb in stack_elbs: for elb in stack_elbs:
for role, dns_prefixes in dns_settings.items(): for role, dns_prefixes in elb_dns_settings.items():
#FIXME this breaks when the service name is in the stack name ie. testforumstack. #FIXME this breaks when the service name is in the stack name ie. testforumstack.
# Get the tags for the instances in this elb and compare the service against the role tag. # Get the tags for the instances in this elb and compare the service against the role tag.
if role in elb.dns_name.lower(): if role in elb.dns_name.lower():
for prefix in dns_prefixes: for prefix in dns_prefixes:
ensure_service_dns(elb, prefix, zone) ensure_service_dns(elb.dns_name, prefix, zone)
# Add a DNS name for the RDS
stack_rdss = list(rdss_for_stack_name(stack_name))
if len(stack_rdss) != 1:
msg = "Didn't find exactly one RDS in this VPC(Found {})"
raise Exception(msg.format(len(stack_rdss)))
else:
ensure_service_dns(stack_rdss[0].endpoint[0], 'rds', zone)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment