Commit 3038bb94 by Feanil Patel

Merge pull request #420 from edx/feanil/vpc_deploys

Feanil/vpc deploys
parents bdf60fd6 9069160c
......@@ -2709,14 +2709,12 @@
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"FromPort":{
"Ref":"EdxappServerPort"
},
"ToPort":{
"Ref":"EdxappServerPort"
},
"CidrIp":"0.0.0.0/0"
}
]
......@@ -2995,8 +2993,12 @@
},
{
"LoadBalancerPort":"443",
"InstancePort":"443",
"Protocol":"HTTP"
"InstancePort": { "Ref": "XqueueServerPort" },
"Protocol":"HTTPS",
"InstanceProtocol":"HTTP",
"SSLCertificateId": {
"Ref": "SSLCertificateARN"
}
}
],
"HealthCheck":{
......@@ -3046,14 +3048,8 @@
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"FromPort": { "Ref": "XqueueServerPort" },
"ToPort": { "Ref": "XqueueServerPort" },
"CidrIp":"0.0.0.0/0"
}
]
......@@ -3870,6 +3866,24 @@
"Ref":"DBSecurityGroup"
}
],
"Tags":[
{
"Key":"role",
"Value":"rds"
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
}
}
],
"MultiAZ":"true"
}
},
......@@ -4432,14 +4446,8 @@
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"FromPort": { "Ref": "ForumServerPort" },
"ToPort": { "Ref": "ForumServerPort" },
"CidrIp":"0.0.0.0/0"
}
]
......
......@@ -269,7 +269,8 @@ class Ec2Inventory(object):
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
instances = sorted(reservation.instances)
for instance in instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
......@@ -363,6 +364,7 @@ class Ec2Inventory(object):
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
self.keep_first(self.inventory, 'first_in_' + key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
......@@ -532,6 +534,9 @@ class Ec2Inventory(object):
else:
my_dict[key] = [element]
def keep_first(self, my_dict, key, element):
if key not in my_dict:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
......
---
- hosts: first_in_tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- role: 'mongo'
mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - common
# - mongo
- hosts: first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
migrate_db: 'yes'
openid_workaround: 'yes'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_edxapp:!first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xserver
- xserver
- splunkforwarder
- hosts: tag_role_rabbitmq
serial: 1
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- rabbitmq
- splunkforwarder
- hosts: first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
migrate_db: 'yes'
- splunkforwarder
- hosts: tag_role_xqueue:!first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
- hosts: tag_role_forum
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- oraclejdk
- elasticsearch
- forum
......@@ -5,30 +5,6 @@
# TODO: the supervisor ansible module does not support
# stopping and starting services by group.
- name: edxapp | stop the edxapp services (supervisor)
supervisorctl: >
name="edxapp:{{ item }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
when: celery_worker is not defined
with_items: service_variants_enabled
sudo_user: "{{ common_web_user }}"
tags:
- deploy
- name: edxapp | stop the celery worker services (supervisor)
supervisorctl: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
when: celery_worker is defined
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
tags:
- deploy
# Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}}
......@@ -202,24 +178,24 @@
# gather_assets and db migrations
- include: service_variant_config.yml
- name: edxapp | start the edxapp services (supervisor)
- name: edxapp | restart the edxapp services (supervisor)
supervisorctl: >
name="edxapp:{{ item }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
state=restarted
when: celery_worker is not defined
with_items: service_variants_enabled
sudo_user: "{{ common_web_user }}"
tags:
- deploy
- name: edxapp | start the celery worker services (supervisor)
- name: edxapp | restart the celery worker services (supervisor)
supervisorctl: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
state=restarted
when: celery_worker is defined
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
......
......@@ -11,15 +11,21 @@ forum_gem_root: "{{ forum_rbenv_dir }}/.gem"
forum_gem_bin: "{{ forum_gem_root }}/bin"
forum_path: "{{ forum_code_dir }}/bin:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims }}:{{ forum_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
FORUM_MONGO_USER: "cs_comments_service"
FORUM_MONGO_PASSWORD: "password"
FORUM_MONGO_HOST: "localhost"
FORUM_MONGO_PORT: "27017"
FORUM_SINATRA_ENV: "development"
forum_environment:
RBENV_ROOT: "{{ forum_rbenv_root }}"
GEM_HOME: "{{ forum_gem_root }}"
GEM_PATH: "{{ forum_gem_root }}"
PATH: "{{ forum_path }}"
MONGOHQ_USER: "{{ forum_mongo_user }}"
MONGOHQ_PASS: "{{ forum_mongo_password }}"
MONGOHQ_USER: "{{ FORUM_MONGO_USER }}"
MONGOHQ_PASS: "{{ FORUM_MONGO_PASSWORD }}"
RACK_ENV: "{{ forum_rack_env }}"
SINATRA_ENV: "{{ forum_sinatra_env }}"
SINATRA_ENV: "{{ FORUM_SINATRA_ENV }}"
API_KEY: "{{ forum_api_key }}"
SEARCH_SERVER: "{{ forum_elasticsearch_url }}"
MONGOHQ_URL: "{{ forum_mongo_url }}"
......@@ -29,14 +35,9 @@ forum_user: "forum"
forum_ruby_version: "1.9.3-p448"
forum_source_repo: "https://github.com/edx/cs_comments_service.git"
forum_version: "HEAD"
forum_mongo_user: "cs_comments_service"
forum_mongo_password: "password"
forum_mongo_host: "localhost"
forum_mongo_port: "27010"
forum_mongo_database: "cs_comments_service"
forum_mongo_url: "mongodb://{{ forum_mongo_user }}:{{ forum_mongo_password }}@{{ forum_mongo_host }}:{{ forum_mongo_port }}/{{ forum_mongo_database }}"
forum_mongo_url: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{{ FORUM_MONGO_HOST }}:{{ FORUM_MONGO_PORT }}/{{ forum_mongo_database }}"
forum_rack_env: "development"
forum_sinatra_env: "development"
forum_api_key: "password"
forum_elasticsearch_host: "localhost"
forum_elasticsearch_port: "9200"
......@@ -52,7 +53,7 @@ forum_elasticsearch_url: "http://{{ forum_elasticsearch_host }}:{{ forum_elastic
#
forum_services:
- {service: "sinatra", host: "localhost", port: "4567"}
- {service: "mongo", host: "{{ forum_mongo_host }}", port: "27017"}
- {service: "mongo", host: "{{ forum_mongo_host }}", port: "28017"}
- {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "27017"}
- {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "28017"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9200"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9300"}
---
- name: forum | stop the forum service
supervisorctl: >
name=forum
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
tags:
- deploy
- name: forum | create the supervisor wrapper
template: >
src={{ forum_supervisor_wrapper|basename }}.j2
......
mongo_logappend: true
mongo_version: 2.4.7
mongo_bind_ip: 127.0.0.1
mongo_port: "27017"
mongo_extra_conf: ''
mongo_key_file: '/etc/mongodb_key'
......@@ -21,12 +20,17 @@ MONGO_USERS:
database: edxapp
MONGO_CLUSTERED: !!null
MONGO_BIND_IP: 127.0.0.1
##
mongo_logpath: "{{ mongo_log_dir }}/mongodb.log"
mongo_dbpath: "{{ mongo_data_dir }}/mongodb"
# Have to use this conditional instead of ignore errors
# because the mongo_user module fails and doesn't ginore errors.
mongo_create_users: !!null
# If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out
# the name of the stack the mongo server resides in.
......
......@@ -66,8 +66,8 @@
shell: /usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED
# Ignoring errors here because slave instances will fail this command
# since slaveOk is false in ansible 1.3.
# Ignore errors doesn't work because the module throws an exception
# it doesn't catch.
- name: mongo | create a mongodb user
mongodb_user: >
database={{ item.database }}
......@@ -75,3 +75,4 @@
password={{ item.password }}
state=present
with_items: MONGO_USERS
when: mongo_create_users
......@@ -13,7 +13,7 @@ logappend={{ mongo_logappend }}
otherwise only to the specified local ip.
#}
{% if mongo_clustered is not defined %}
bind_ip = {{ mongo_bind_ip }}
bind_ip = {{ MONGO_BIND_IP }}
{% endif %}
port = {{ mongo_port }}
......
......@@ -120,3 +120,8 @@
- "{{ supervisor_cfg }}"
- "{{ supervisor_cfg_dir }}"
notify: supervisor | restart supervisor
- name: supervisor | start supervisor
service: >
name={{supervisor_service}}
state=started
# Stop xqueue service.
- name: xqueue | stop xqueue service
supervisorctl: >
name=xqueue
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
tags:
- deploy
- name: xqueue | stop xqueue consumer service
supervisorctl: >
name=xqueue_consumer
......@@ -84,20 +74,20 @@
tags:
- deploy
- name: xqueue | start xqueue
- name: xqueue | restart xqueue
supervisorctl: >
name=xqueue
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
state=restarted
tags:
- deploy
- name: xqueue | start xqueue consumer
- name: xqueue | restart xqueue consumer
supervisorctl: >
name=xqueue_consumer
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
state=restarted
tags:
- deploy
- name: xserver | stop xserver
supervisorctl: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=stopped
tags:
- deploy
- name: xserver | checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
sudo_user: "{{ xserver_user }}"
......@@ -20,15 +11,6 @@
tags:
- deploy
- name: xserver | ensuring xserver is activated in supervisor
supervisorctl: >
name="xserver"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=present
tags:
- deploy
- name: xserver | install requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
sudo_user: "{{ xserver_user }}"
......@@ -74,11 +56,11 @@
tags:
- deploy
- name: xserver | start xserver
- name: xserver | restart xserver
supervisorctl: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
state=restarted
tags:
- deploy
......@@ -84,7 +84,9 @@ def _ssh_config(args):
for reservation in reservations:
for instance in reservation.instances:
if 'group' in instance.tags:
if 'role' in instance.tags:
logical_id = instance.tags['role']
elif 'group' in instance.tags:
logical_id = instance.tags['group']
else:
logical_id = instance.tags['aws:cloudformation:logical-id']
......
......@@ -78,8 +78,14 @@ def elbs_for_stack_name(stack_name):
if elb.vpc_id == vpc_id:
yield elb
def rdss_for_stack_name(stack_name):
vpc_id = vpc_for_stack_name(stack_name)
rds = boto.connect_rds()
for instance in rds.get_all_dbinstances():
if hasattr(instance, 'VpcId') and instance.VpcId == vpc_id:
yield instance
def ensure_service_dns(elb, prefix, zone):
def ensure_service_dns(generated_dns_name, prefix, zone):
dns_template = "{prefix}.{zone_name}"
# Have to remove the trailing period that is on zone names.
......@@ -87,7 +93,7 @@ def ensure_service_dns(elb, prefix, zone):
dns_name = dns_template.format(prefix=prefix,
zone_name=zone_name)
add_or_update_record(zone, dns_name, 'CNAME', 600, [elb.dns_name])
add_or_update_record(zone, dns_name, 'CNAME', 600, [generated_dns_name])
if __name__ == "__main__":
......@@ -102,7 +108,7 @@ if __name__ == "__main__":
stack_name = args.stackname
# Create DNS for edxapp and xqueue.
dns_settings = {
elb_dns_settings = {
'edxapp': ['courses', 'studio'],
'xqueue': ['xqueue'],
'rabbit': ['rabbit'],
......@@ -118,9 +124,18 @@ if __name__ == "__main__":
stack_elbs = elbs_for_stack_name(stack_name)
for elb in stack_elbs:
for role, dns_prefixes in dns_settings.items():
for role, dns_prefixes in elb_dns_settings.items():
#FIXME this breaks when the service name is in the stack name ie. testforumstack.
# Get the tags for the instances in this elb and compare the service against the role tag.
if role in elb.dns_name.lower():
for prefix in dns_prefixes:
ensure_service_dns(elb, prefix, zone)
ensure_service_dns(elb.dns_name, prefix, zone)
# Add a DNS name for the RDS
stack_rdss = list(rdss_for_stack_name(stack_name))
if len(stack_rdss) != 1:
msg = "Didn't find exactly one RDS in this VPC(Found {})"
raise Exception(msg.format(len(stack_rdss)))
else:
ensure_service_dns(stack_rdss[0].endpoint[0], 'rds', zone)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment