Commit d1bc29c7 by John Jarvis

Merge pull request #678 from edx/jarv/ansible-1.4

Jarv/ansible 1.4
parents 0553e057 67d0776d
...@@ -121,7 +121,7 @@ options: ...@@ -121,7 +121,7 @@ options:
required: False required: False
default: 1 default: 1
aliases: [] aliases: []
monitor: monitoring:
version_added: "1.1" version_added: "1.1"
description: description:
- enable detailed monitoring (CloudWatch) for instance - enable detailed monitoring (CloudWatch) for instance
...@@ -185,7 +185,7 @@ options: ...@@ -185,7 +185,7 @@ options:
default: 'present' default: 'present'
aliases: [] aliases: []
root_ebs_size: root_ebs_size:
version_added: "1.4" version_added: "1.5"
desription: desription:
- size of the root volume in gigabytes - size of the root volume in gigabytes
required: false required: false
...@@ -193,7 +193,7 @@ options: ...@@ -193,7 +193,7 @@ options:
aliases: [] aliases: []
requirements: [ "boto" ] requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade, John Jarvis author: Seth Vidal, Tim Gerla, Lester Wade
''' '''
EXAMPLES = ''' EXAMPLES = '''
...@@ -210,17 +210,6 @@ EXAMPLES = ''' ...@@ -210,17 +210,6 @@ EXAMPLES = '''
group: webserver group: webserver
count: 3 count: 3
# Basic provisioning example with setting the root volume size to 50GB
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
root_ebs_size: 50
# Advanced example with tagging and CloudWatch # Advanced example with tagging and CloudWatch
- local_action: - local_action:
module: ec2 module: ec2
...@@ -231,7 +220,8 @@ EXAMPLES = ''' ...@@ -231,7 +220,8 @@ EXAMPLES = '''
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes' instance_tags: '{"db":"postgres"}'
monitoring=yes
# Multiple groups example # Multiple groups example
local_action: local_action:
...@@ -243,7 +233,8 @@ local_action: ...@@ -243,7 +233,8 @@ local_action:
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes' instance_tags: '{"db":"postgres"}'
monitoring=yes
# VPC example # VPC example
- local_action: - local_action:
...@@ -406,6 +397,7 @@ def create_instances(module, ec2): ...@@ -406,6 +397,7 @@ def create_instances(module, ec2):
else: else:
bdm = None bdm = None
# group_id and group_name are exclusive of each other # group_id and group_name are exclusive of each other
if group_id and group_name: if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
...@@ -416,9 +408,7 @@ def create_instances(module, ec2): ...@@ -416,9 +408,7 @@ def create_instances(module, ec2):
if group_name: if group_name:
grp_details = ec2.get_all_security_groups() grp_details = ec2.get_all_security_groups()
if type(group_name) == list: if type(group_name) == list:
# FIXME: this should be a nice list comprehension group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# also not py 2.4 compliant
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
elif type(group_name) == str: elif type(group_name) == str:
for grp in grp_details: for grp in grp_details:
if str(group_name) in str(grp): if str(group_name) in str(grp):
...@@ -501,7 +491,7 @@ def create_instances(module, ec2): ...@@ -501,7 +491,7 @@ def create_instances(module, ec2):
if instance_tags: if instance_tags:
try: try:
ec2.create_tags(instids, module.from_json(instance_tags)) ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e: except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
...@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids): ...@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids):
""" """
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False changed = False
instance_dict_array = [] instance_dict_array = []
...@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids): ...@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids):
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True changed = True
return (changed, instance_dict_array, terminated_instance_ids) # wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = len(response.pop().instances)
except Exception, e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
return (changed, instance_dict_array, terminated_instance_ids)
def main(): def main():
...@@ -593,16 +609,16 @@ def main(): ...@@ -593,16 +609,16 @@ def main():
image = dict(), image = dict(),
kernel = dict(), kernel = dict(),
count = dict(default='1'), count = dict(default='1'),
monitoring = dict(choices=BOOLEANS, default=False), monitoring = dict(type='bool', default=False),
ramdisk = dict(), ramdisk = dict(),
wait = dict(choices=BOOLEANS, default=False), wait = dict(type='bool', default=False),
wait_timeout = dict(default=300), wait_timeout = dict(default=300),
ec2_url = dict(), ec2_url = dict(),
aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
aws_access_key = dict(aliases=['ec2_access_key', 'access_key']), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
placement_group = dict(), placement_group = dict(),
user_data = dict(), user_data = dict(),
instance_tags = dict(), instance_tags = dict(type='dict'),
vpc_subnet_id = dict(), vpc_subnet_id = dict(),
private_ip = dict(), private_ip = dict(),
instance_profile_name = dict(), instance_profile_name = dict(),
...@@ -612,33 +628,9 @@ def main(): ...@@ -612,33 +628,9 @@ def main():
) )
) )
ec2_url = module.params.get('ec2_url') # def get_ec2_creds(module):
aws_secret_key = module.params.get('aws_secret_key') # return ec2_url, ec2_access_key, ec2_secret_key, region
aws_access_key = module.params.get('aws_access_key') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
region = module.params.get('region')
# allow eucarc environment variables to be used if ansible vars aren't set
if not ec2_url and 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
# If we have a region specified, connect to its endpoint. # If we have a region specified, connect to its endpoint.
if region: if region:
...@@ -672,8 +664,8 @@ def main(): ...@@ -672,8 +664,8 @@ def main():
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
# import module snippets
# this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import *
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> from ansible.module_utils.ec2 import *
main() main()
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
# #
# #
- name: analytics-server | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
- name: analytics-server | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | upload ssh script - name: upload ssh script
template: template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }} src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750 force=yes owner=root group=adm mode=750
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | install read-only ssh key required for checkout - name: install read-only ssh key required for checkout
copy: copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }} src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600 force=yes owner=ubuntu group=adm mode=0600
...@@ -22,14 +22,14 @@ ...@@ -22,14 +22,14 @@
- install - install
- update - update
- name: analytics-server | checkout code - name: checkout code
git: git:
dest={{ as_code_dir }} repo={{ as_source_repo }} dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true version={{ as_version }} force=true
environment: environment:
GIT_SSH: $as_git_ssh GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service notify: restart the analytics service
notify: analytics-server | start the analytics service notify: start the analytics service
tags: tags:
- analytics-server - analytics-server
- install - install
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | update src permissions - name: update src permissions
file: file:
path={{ as_code_dir }} state=directory owner={{ as_user }} path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes group={{ as_web_user }} mode=2750 recurse=yes
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent file: path={{ as_git_identity_dest }} state=absent
tags: tags:
- analytics-server - analytics-server
...@@ -60,20 +60,20 @@ ...@@ -60,20 +60,20 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | remove ssh script - name: remove ssh script
file: path={{ as_git_ssh }} state=absent file: path={{ as_git_ssh }} state=absent
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | install application requirements - name: install application requirements
pip: pip:
requirements={{ as_requirements_file }} requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present virtualenv={{ as_venv_dir }} state=present
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service notify: start the analytics service
tags: tags:
- analytics-server - analytics-server
- install - install
......
...@@ -37,14 +37,14 @@ ...@@ -37,14 +37,14 @@
# - common # - common
# - analytics-server # - analytics-server
# #
- name: analytics-server | install system packages - name: install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | create analytics-server user {{ as_user }} - name: create analytics-server user {{ as_user }}
user: user:
name={{ as_user }} state=present shell=/bin/bash name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes home={{ as_home }} createhome=yes
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
- install - install
- update - update
- name: analytics-server | setup the analytics-server env - name: setup the analytics-server env
template: template:
src=opt/wwc/analytics-server/{{ as_env }}.j2 src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }} dest={{ as_home }}/{{ as_env }}
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
- install - install
- update - update
- name: analytics-server | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile dest={{ as_home }}/.bash_profile
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
group={{ as_user }} group={{ as_user }}
# Awaiting next ansible release. # Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists #- name: ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch # file: path={{ as_home }}/.bashrc state=touch
# sudo: true # sudo: true
# sudo_user: "{{ as_user }}" # sudo_user: "{{ as_user }}"
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
# - install # - install
# - update # - update
- name: analytics-server | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc shell: touch {{ as_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
- install - install
- update - update
- name: analytics-server | add source of analytics-server_env to .bashrc - name: add source of analytics-server_env to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env' regexp='. {{ as_home }}/analytics-server_env'
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
- install - install
- update - update
- name: analytics-server | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate' regexp='. {{ as_venv_dir }}/bin/activate'
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
- install - install
- update - update
- name: analytics-server | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: as_pip_pkgs with_items: as_pip_pkgs
tags: tags:
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
- install - install
- update - update
- name: analytics-server | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
- install - install
- update - update
- name: analytics-server | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
# #
# #
- name: analytics | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
- name: analytics | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | upload ssh script - name: upload ssh script
template: template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }} src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750 force=yes owner=root group=adm mode=750
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | install read-only ssh key required for checkout - name: install read-only ssh key required for checkout
copy: copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }} src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600 force=yes owner=ubuntu group=adm mode=0600
...@@ -22,14 +22,14 @@ ...@@ -22,14 +22,14 @@
- install - install
- update - update
- name: analytics | checkout code - name: checkout code
git: git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }} dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true version={{ analytics_version }} force=true
environment: environment:
GIT_SSH: $analytics_git_ssh GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service notify: restart the analytics service
notify: analytics | start the analytics service notify: start the analytics service
tags: tags:
- analytics - analytics
- install - install
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | update src permissions - name: update src permissions
file: file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }} path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes group={{ analytics_web_user }} mode=2750 recurse=yes
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent file: path={{ analytics_git_identity_dest }} state=absent
tags: tags:
- analytics - analytics
...@@ -60,20 +60,20 @@ ...@@ -60,20 +60,20 @@
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | remove ssh script - name: remove ssh script
file: path={{ analytics_git_ssh }} state=absent file: path={{ analytics_git_ssh }} state=absent
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | install application requirements - name: install application requirements
pip: pip:
requirements={{ analytics_requirements_file }} requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present virtualenv={{ analytics_venv_dir }} state=present
sudo: true sudo: true
sudo_user: "{{ analytics_user }}" sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service notify: start the analytics service
tags: tags:
- analytics - analytics
- install - install
......
...@@ -37,14 +37,14 @@ ...@@ -37,14 +37,14 @@
# - common # - common
# - analytics # - analytics
# #
- name: analytics | install system packages - name: install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | create analytics user {{ analytics_user }} - name: create analytics user {{ analytics_user }}
user: user:
name={{ analytics_user }} state=present shell=/bin/bash name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes home={{ analytics_home }} createhome=yes
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
- install - install
- update - update
- name: analytics | setup the analytics env - name: setup the analytics env
template: template:
src=opt/wwc/analytics/{{ analytics_env }}.j2 src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }} dest={{ analytics_home }}/{{ analytics_env }}
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
- install - install
- update - update
- name: analytics | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile dest={{ analytics_home }}/.bash_profile
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
group={{ analytics_user }} group={{ analytics_user }}
# Awaiting next ansible release. # Awaiting next ansible release.
#- name: analytics | ensure .bashrc exists #- name: ensure .bashrc exists
# file: path={{ analytics_home }}/.bashrc state=touch # file: path={{ analytics_home }}/.bashrc state=touch
# sudo: true # sudo: true
# sudo_user: "{{ analytics_user }}" # sudo_user: "{{ analytics_user }}"
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
# - install # - install
# - update # - update
- name: analytics | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc shell: touch {{ analytics_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ analytics_user }}" sudo_user: "{{ analytics_user }}"
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
- install - install
- update - update
- name: analytics | add source of analytics_env to .bashrc - name: add source of analytics_env to .bashrc
lineinfile: lineinfile:
dest={{ analytics_home }}/.bashrc dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env' regexp='. {{ analytics_home }}/analytics_env'
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
- install - install
- update - update
- name: analytics | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ analytics_home }}/.bashrc dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate' regexp='. {{ analytics_venv_dir }}/bin/activate'
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
- install - install
- update - update
- name: analytics | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: analytics_pip_pkgs with_items: analytics_pip_pkgs
tags: tags:
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
- install - install
- update - update
- name: analytics | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
- install - install
- update - update
- name: analytics | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
......
--- ---
- name: ansible-role | check if the role exists - name: check if the role exists
command: test -d roles/{{ role_name }} command: test -d roles/{{ role_name }}
register: role_exists register: role_exists
ignore_errors: yes ignore_errors: yes
- name: ansible-role | prompt for overwrite - name: prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort." pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success when: role_exists | success
- name: ansible-role | create role directories - name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory file: path=roles/{{role_name}}/{{ item }} state=directory
with_items: with_items:
- tasks - tasks
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
- templates - templates
- files - files
- name: ansible-role | make an ansible role - name: make an ansible role
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items: with_items:
- tasks - tasks
......
...@@ -7,5 +7,5 @@ ...@@ -7,5 +7,5 @@
# Overview: # Overview:
# #
# #
- name: {{ role_name }} | notify me - name: notify me
debug: msg="stub handler" debug: msg="stub handler"
...@@ -14,6 +14,6 @@ ...@@ -14,6 +14,6 @@
# #
# #
- name: {{ role_name }} | stub ansible task - name: stub ansible task
debug: msg="This is a stub task created by the ansible-role role" debug: msg="This is a stub task created by the ansible-role role"
notify: {{ role_name }} | notify me notify: notify me
\ No newline at end of file
--- ---
- name: apache | restart apache - name: restart apache
service: name=apache2 state=restarted service: name=apache2 state=restarted
# Requires nginx package # Requires nginx package
--- ---
- name: apache | Copying apache config {{ site_name }} - name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }} template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file: first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2" - "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir # seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2" - "roles/apache/templates/{{ site_name }}.j2"
notify: apache | restart apache notify: restart apache
when: apache_role_run is defined when: apache_role_run is defined
tags: tags:
- apache - apache
- update - update
- name: apache | Creating apache2 config link {{ site_name }} - name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: apache | restart apache notify: restart apache
when: apache_role_run is defined when: apache_role_run is defined
tags: tags:
- apache - apache
......
#Installs apache and runs the lms wsgi #Installs apache and runs the lms wsgi
--- ---
- name: apache | Installs apache and mod_wsgi from apt - name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items: with_items:
- apache2 - apache2
- libapache2-mod-wsgi - libapache2-mod-wsgi
notify: apache | restart apache notify: restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | disables default site - name: disables default site
command: a2dissite 000-default command: a2dissite 000-default
notify: apache | restart apache notify: restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | rewrite apache ports conf - name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: apache | restart apache notify: restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | Register the fact that apache role has run - name: Register the fact that apache role has run
command: echo True command: echo True
register: apache_role_run register: apache_role_run
tags: tags:
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
- fail: automated_sudoers_dest required for role - fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined when: automated_sudoers_dest is not defined
- name: automated | create automated user - name: create automated user
user: user:
name={{ automated_user }} state=present shell=/bin/rbash name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes home={{ automated_home }} createhome=yes
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
- install - install
- update - update
- name: automated | create sudoers file from file - name: create sudoers file from file
copy: copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }} dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root" src={{ automated_sudoers_file }} owner="root"
...@@ -77,7 +77,7 @@ ...@@ -77,7 +77,7 @@
- install - install
- update - update
- name: automated | create sudoers file from template - name: create sudoers file from template
template: template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }} dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root" src={{ automated_sudoers_template }} owner="root"
...@@ -92,7 +92,7 @@ ...@@ -92,7 +92,7 @@
# Prevent user from updating their PATH and # Prevent user from updating their PATH and
# environment. # environment.
# #
- name: automated | update shell file mode - name: update shell file mode
file: file:
path={{ automated_home }}/{{ item }} mode=0640 path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }} state=file owner="root" group={{ automated_user }}
...@@ -105,7 +105,7 @@ ...@@ -105,7 +105,7 @@
- .profile - .profile
- .bash_logout - .bash_logout
- name: automated | change ~automated ownership - name: change ~automated ownership
file: file:
path={{ automated_home }} mode=0750 state=directory path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }} owner="root" group={{ automated_user }}
...@@ -119,7 +119,7 @@ ...@@ -119,7 +119,7 @@
# and that links that were remove from the role are # and that links that were remove from the role are
# removed. # removed.
# #
- name: automated | remove ~automated/bin directory - name: remove ~automated/bin directory
file: file:
path={{ automated_home }}/bin state=absent path={{ automated_home }}/bin state=absent
ignore_errors: yes ignore_errors: yes
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
- install - install
- update - update
- name: automated | create ~automated/bin directory - name: create ~automated/bin directory
file: file:
path={{ automated_home }}/bin state=directory mode=0750 path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }} owner="root" group={{ automated_user }}
...@@ -137,7 +137,7 @@ ...@@ -137,7 +137,7 @@
- install - install
- update - update
- name: automated | re-write .profile - name: re-write .profile
copy: copy:
src=home/automator/.profile src=home/automator/.profile
dest={{ automated_home }}/.profile dest={{ automated_home }}/.profile
...@@ -149,7 +149,7 @@ ...@@ -149,7 +149,7 @@
- install - install
- update - update
- name: automated | re-write .bashrc - name: re-write .bashrc
copy: copy:
src=home/automator/.bashrc src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc dest={{ automated_home }}/.bashrc
...@@ -161,7 +161,7 @@ ...@@ -161,7 +161,7 @@
- install - install
- update - update
- name: automated | create .ssh directory - name: create .ssh directory
file: file:
path={{ automated_home }}/.ssh state=directory mode=0700 path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }} owner={{ automated_user }} group={{ automated_user }}
...@@ -170,7 +170,7 @@ ...@@ -170,7 +170,7 @@
- install - install
- update - update
- name: automated | copy key to .ssh/authorized_keys - name: copy key to .ssh/authorized_keys
copy: copy:
src=home/automator/.ssh/authorized_keys src=home/automator/.ssh/authorized_keys
dest={{ automated_home }}/.ssh/authorized_keys mode=0600 dest={{ automated_home }}/.ssh/authorized_keys mode=0600
...@@ -180,7 +180,7 @@ ...@@ -180,7 +180,7 @@
- install - install
- update - update
- name: automated | create allowed command links - name: create allowed command links
file: file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }} src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link state=link
......
# Install browsers required to run the JavaScript # Install browsers required to run the JavaScript
# and acceptance test suite locally without a display # and acceptance test suite locally without a display
--- ---
- name: browsers | install system packages - name: install system packages
apt: pkg={{','.join(browser_deb_pkgs)}} apt: pkg={{','.join(browser_deb_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
- name: browsers | download browser debian packages from S3 - name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}" get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb register: download_deb
with_items: "{{ browser_s3_deb_pkgs }}" with_items: browser_s3_deb_pkgs
- name: browsers | install browser debian packages - name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }} shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed when: download_deb.changed
with_items: "{{ browser_s3_deb_pkgs }}" with_items: browser_s3_deb_pkgs
- name: browsers | Install ChromeDriver - name: Install ChromeDriver
get_url: get_url:
url={{ chromedriver_url }} url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: browsers | Install ChromeDriver 2 - name: Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp chdir=/var/tmp
- name: browsers | Install ChromeDriver 3 - name: Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4 - name: Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755 file: path=/usr/local/bin/chromedriver mode=0755
- name: browsers | create xvfb upstart script - name: create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: browsers | start xvfb - name: start xvfb
shell: start xvfb shell: start xvfb
ignore_errors: yes ignore_errors: yes
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# Overview: # Overview:
# #
- name: certs | restart certs - name: restart certs
supervisorctl_local: > supervisorctl_local: >
name=certs name=certs
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
--- ---
- name: certs | create certificate application config - name: create certificate application config
template: > template: >
src=certs.env.json.j2 src=certs.env.json.j2
dest={{ certs_app_dir }}/env.json dest={{ certs_app_dir }}/env.json
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: certs | restart certs notify: restart certs
- name: certs | create certificate auth file - name: create certificate auth file
template: > template: >
src=certs.auth.json.j2 src=certs.auth.json.j2
dest={{ certs_app_dir }}/auth.json dest={{ certs_app_dir }}/auth.json
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: certs | restart certs notify: restart certs
- name: certs | writing supervisor script for certificates - name: writing supervisor script for certificates
template: > template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644 owner={{ supervisor_user }} mode=0644
notify: certs | restart certs notify: restart certs
- name: certs | create ssh script for git - name: create ssh script for git
template: > template: >
src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }} src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }}
owner={{ certs_user }} mode=750 owner={{ certs_user }} mode=750
notify: certs | restart certs notify: restart certs
- name: certs | install read-only ssh key for the certs repo - name: install read-only ssh key for the certs repo
copy: > copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }} src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600 force=yes owner={{ certs_user }} mode=0600
notify: certs | restart certs notify: restart certs
- name: certs | checkout certificates repo into {{ certs_code_dir }} - name: checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }} git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }}
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
environment: environment:
GIT_SSH: "{{ certs_git_ssh }}" GIT_SSH: "{{ certs_git_ssh }}"
notify: certs | restart certs notify: restart certs
- name: certs | remove read-only ssh key for the certs repo - name: remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent file: path={{ certs_git_identity }} state=absent
notify: certs | restart certs notify: restart certs
- name : install python requirements - name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: certs | restart certs notify: restart certs
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: certs | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
when: start_services when: start_services
- name: certs | ensure certs has started - name: ensure certs has started
supervisorctl_local: > supervisorctl_local: >
name=certs name=certs
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -69,12 +69,12 @@ ...@@ -69,12 +69,12 @@
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
when: start_services when: start_services
- name: certs | create a symlink for venv python - name: create a symlink for venv python
file: > file: >
src="{{ certs_venv_bin }}/{{ item }}" src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link state=link
notify: certs | restart certs notify: restart certs
with_items: with_items:
- python - python
- pip - pip
......
...@@ -35,46 +35,46 @@ ...@@ -35,46 +35,46 @@
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!" fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY when: not CERTS_LOCAL_GIT_IDENTITY
- name: certs | create application user - name: create application user
user: > user: >
name="{{ certs_user }}" name="{{ certs_user }}"
home="{{ certs_app_dir }}" home="{{ certs_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: certs | restart certs notify: restart certs
- name: certs | create certs app and data dirs - name: create certs app and data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
owner="{{ certs_user }}" owner="{{ certs_user }}"
group="{{ common_web_group }}" group="{{ common_web_group }}"
notify: certs | restart certs notify: restart certs
with_items: with_items:
- "{{ certs_app_dir }}" - "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}" - "{{ certs_venvs_dir }}"
- name: certs | create certs gpg dir - name: create certs gpg dir
file: > file: >
path="{{ certs_gpg_dir }}" state=directory path="{{ certs_gpg_dir }}" state=directory
owner="{{ common_web_user }}" owner="{{ common_web_user }}"
mode=0700 mode=0700
notify: certs | restart certs notify: restart certs
- name: certs | copy the private gpg signing key - name: copy the private gpg signing key
copy: > copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }} src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
owner={{ common_web_user }} mode=0600 owner={{ common_web_user }} mode=0600
notify: certs | restart certs notify: restart certs
register: certs_gpg_key register: certs_gpg_key
- name: certs | load the gpg key - name: load the gpg key
shell: > shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} /usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed when: certs_gpg_key.changed
notify: certs | restart certs notify: restart certs
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
--- ---
- name: common | restart rsyslogd - name: restart rsyslogd
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
sudo: True sudo: True
--- ---
- name: common | Add user www-data - name: Add user www-data
# This is the default user for nginx # This is the default user for nginx
user: > user: >
name="{{ common_web_user }}" name="{{ common_web_user }}"
shell=/bin/false shell=/bin/false
- name: common | Create common directories - name: Create common directories
file: > file: >
path={{ item }} state=directory owner=root path={{ item }} state=directory owner=root
group=root mode=0755 group=root mode=0755
...@@ -16,57 +16,57 @@ ...@@ -16,57 +16,57 @@
- "{{ COMMON_CFG_DIR }}" - "{{ COMMON_CFG_DIR }}"
# Need to install python-pycurl to use Ansible's apt_repository module # Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl - name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes apt: pkg=python-pycurl state=present update_cache=yes
# Ensure that we get a current version of Git # Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later # GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors # https://help.github.com/articles/https-cloning-errors
- name: common | Add git apt repository - name: Add git apt repository
apt_repository: repo="{{ common_git_ppa }}" apt_repository: repo="{{ common_git_ppa }}"
- name: common | Install role-independent useful system packages - name: Install role-independent useful system packages
# do this before log dir setup; rsyslog package guarantees syslog user present # do this before log dir setup; rsyslog package guarantees syslog user present
apt: > apt: >
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes state=present update_cache=yes
- name: common | Create common log directory - name: Create common log directory
file: > file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755 group=syslog mode=0755
- name: common | upload sudo config for key forwarding as root - name: upload sudo config for key forwarding as root
copy: > copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
validate='visudo -c -f %s' owner=root group=root mode=0440 validate='visudo -c -f %s' owner=root group=root mode=0440
- name: common | pip install virtualenv - name: pip install virtualenv
pip: > pip: >
name="{{ item }}" state=present name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: common_pip_pkgs with_items: common_pip_pkgs
- name: common | Install rsyslog configuration for edX - name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644 template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd notify: restart rsyslogd
- name: common | Install logrotate configuration for edX - name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644 template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
- name: common | update /etc/hosts - name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
register: etc_hosts register: etc_hosts
- name: common | update /etc/hostname - name: update /etc/hostname
template: src=hostname.j2 dest=/etc/hostname template: src=hostname.j2 dest=/etc/hostname
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
register: etc_hostname register: etc_hostname
- name: common | run hostname - name: run hostname
shell: > shell: >
hostname -F /etc/hostname hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed) when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
--- ---
- name: datadog | restart the datadog service - name: restart the datadog service
service: name=datadog-agent state=restarted service: name=datadog-agent state=restarted
...@@ -15,43 +15,43 @@ ...@@ -15,43 +15,43 @@
# - datadog # - datadog
# #
- name: datadog | install debian needed pkgs - name: install debian needed pkgs
apt: pkg={{ item }} apt: pkg={{ item }}
with_items: datadog_debian_pkgs with_items: datadog_debian_pkgs
tags: tags:
- datadog - datadog
- name: datadog | add apt key - name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
tags: tags:
- datadog - datadog
- name: datadog | install apt repository - name: install apt repository
apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes
tags: tags:
- datadog - datadog
- name: datadog | install datadog agent - name: install datadog agent
apt: pkg="datadog-agent" apt: pkg="datadog-agent"
tags: tags:
- datadog - datadog
- name: datadog | bootstrap config - name: bootstrap config
shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf
tags: tags:
- datadog - datadog
- name: datadog | update api-key - name: update api-key
lineinfile: > lineinfile: >
dest="/etc/dd-agent/datadog.conf" dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*" regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}" line="api_key:{{ datadog_api_key }}"
notify: notify:
- datadog | restart the datadog service - restart the datadog service
tags: tags:
- datadog - datadog
- name: datadog | ensure started and enabled - name: ensure started and enabled
service: name=datadog-agent state=started enabled=yes service: name=datadog-agent state=started enabled=yes
tags: tags:
- datadog - datadog
--- ---
- name: demo | check out the demo course - name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }} git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
register: demo_checkout register: demo_checkout
- name: demo | import demo course - name: import demo course
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }} {{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed when: demo_checkout.changed
- name: demo | create some test users and enroll them in the course - name: create some test users and enroll them in the course
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -20,21 +20,21 @@ ...@@ -20,21 +20,21 @@
with_items: demo_test_users with_items: demo_test_users
when: demo_checkout.changed when: demo_checkout.changed
- name: demo | create staff user - name: create staff user
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed when: demo_checkout.changed
- name: demo | add test users to the certificate whitelist - name: add test users to the certificate whitelist
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: demo_test_users with_items: demo_test_users
when: demo_checkout.changed when: demo_checkout.changed
- name: demo | seed the forums for the demo course - name: seed the forums for the demo course
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
# - edxapp # - edxapp
# - demo # - demo
- name: demo | create demo app and data dirs - name: create demo app and data dirs
file: > file: >
path="{{ demo_app_dir }}" state=directory path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}" owner="{{ edxapp_user }}" group="{{ common_web_group }}"
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# Defaults for role devpi # Defaults for role devpi
# #
--- ---
- name: devpi | restart devpi - name: restart devpi
supervisorctl_local: > supervisorctl_local: >
state=restarted state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }} supervisorctl_path={{ devpi_supervisor_ctl }}
......
...@@ -30,13 +30,13 @@ ...@@ -30,13 +30,13 @@
# - devpi # - devpi
--- ---
- name: devpi | create devpi user - name: create devpi user
user: > user: >
name={{ devpi_user }} name={{ devpi_user }}
shell=/bin/false createhome=no shell=/bin/false createhome=no
notify: devpi | restart devpi notify: restart devpi
- name: devpi | create devpi application directories - name: create devpi application directories
file: > file: >
path={{ item }} path={{ item }}
state=directory state=directory
...@@ -45,9 +45,9 @@ ...@@ -45,9 +45,9 @@
with_items: with_items:
- "{{ devpi_app_dir }}" - "{{ devpi_app_dir }}"
- "{{ devpi_venv_dir }}" - "{{ devpi_venv_dir }}"
notify: devpi | restart devpi notify: restart devpi
- name: devpi | create the devpi data directory, needs write access by the service user - name: create the devpi data directory, needs write access by the service user
file: > file: >
path={{ item }} path={{ item }}
state=directory state=directory
...@@ -56,40 +56,40 @@ ...@@ -56,40 +56,40 @@
with_items: with_items:
- "{{ devpi_data_dir }}" - "{{ devpi_data_dir }}"
- "{{ devpi_mirror_dir }}" - "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi notify: restart devpi
- name: devpi | install devpi pip pkgs - name: install devpi pip pkgs
pip: > pip: >
name={{ item }} name={{ item }}
state=present state=present
virtualenv={{ devpi_venv_dir }} virtualenv={{ devpi_venv_dir }}
sudo_user: "{{ devpi_user }}" sudo_user: "{{ devpi_user }}"
with_items: devpi_pip_pkgs with_items: devpi_pip_pkgs
notify: devpi | restart devpi notify: restart devpi
- name: devpi | writing supervisor script - name: writing supervisor script
template: > template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644 owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi notify: restart devpi
- name: devpi | create a symlink for venv python, pip - name: create a symlink for venv python, pip
file: > file: >
src="{{ devpi_venv_bin }}/{{ item }}" src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link state=link
notify: devpi | restart devpi notify: restart devpi
with_items: with_items:
- python - python
- pip - pip
- name: devpi | create a symlink for venv supervisor - name: create a symlink for venv supervisor
file: > file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl" src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link state=link
- name: devpi | create a symlink for supervisor config - name: create a symlink for supervisor config
file: > file: >
src="{{ devpi_supervisor_app_dir }}/supervisord.conf" src="{{ devpi_supervisor_app_dir }}/supervisord.conf"
dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi
...@@ -100,12 +100,12 @@ ...@@ -100,12 +100,12 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: devpi | update devpi supervisor configuration - name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update" shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: devpi | ensure devpi is started - name: ensure devpi is started
supervisorctl_local: > supervisorctl_local: >
state=started state=started
supervisorctl_path={{ devpi_supervisor_ctl }} supervisorctl_path={{ devpi_supervisor_ctl }}
......
--- ---
- name: discern | restart discern - name: restart discern
supervisorctl_local: > supervisorctl_local: >
name=discern name=discern
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
--- ---
- name: discern | create supervisor scripts - discern, discern_celery - name: create supervisor scripts - discern, discern_celery
template: > template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644 owner={{ supervisor_user }} mode=0644
...@@ -8,56 +8,56 @@ ...@@ -8,56 +8,56 @@
with_items: ['discern', 'discern_celery'] with_items: ['discern', 'discern_celery']
#Upload config files for django (auth and env) #Upload config files for django (auth and env)
- name: discern | create discern application config env.json file - name: create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
- name: discern | create discern auth file auth.json - name: create discern auth file auth.json
template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
- name: discern | git checkout discern repo into discern_code_dir - name: git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }} git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
- name: discern | git checkout ease repo into discern_ease_code_dir - name: git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }} git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
#Numpy has to be a pre-requirement in order for scipy to build #Numpy has to be a pre-requirement in order for scipy to build
- name : discern | install python pre-requirements for discern and ease - name : install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- "{{ discern_pre_requirements_file }}" - "{{ discern_pre_requirements_file }}"
- "{{ discern_ease_pre_requirements_file }}" - "{{ discern_ease_pre_requirements_file }}"
- name : discern | install python requirements for discern and ease - name : install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- "{{ discern_post_requirements_file }}" - "{{ discern_post_requirements_file }}"
- "{{ discern_ease_post_requirements_file }}" - "{{ discern_ease_post_requirements_file }}"
- name: discern | install ease python package - name: install ease python package
shell: > shell: >
{{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install {{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install
notify: notify:
- discern | restart discern - restart discern
- name: discern | download and install nltk - name: download and install nltk
shell: | shell: |
set -e set -e
curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }} curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }}
...@@ -68,30 +68,30 @@ ...@@ -68,30 +68,30 @@
chdir={{ discern_data_dir }} chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not #Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment #support virtualenvs as of this comment
- name: discern | django syncdb migrate and collectstatic for discern - name: django syncdb migrate and collectstatic for discern
shell: > shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}} {{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }} chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- syncdb - syncdb
- migrate - migrate
- collectstatic - collectstatic
#Have this separate from the other three because it doesn't take the noinput flag #Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern - name: django update_index for discern
shell: > shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}} {{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }} chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
...@@ -99,14 +99,14 @@ ...@@ -99,14 +99,14 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: discern | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
when: start_services when: start_services
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: discern | ensure discern, discern_celery has started - name: ensure discern, discern_celery has started
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
- discern - discern
- discern_celery - discern_celery
- name: discern | create a symlink for venv python - name: create a symlink for venv python
file: > file: >
src="{{ discern_venv_bin }}/python" src="{{ discern_venv_bin }}/python"
dest={{ COMMON_BIN_DIR }}/python.discern dest={{ COMMON_BIN_DIR }}/python.discern
......
--- ---
- name: discern | create application user - name: create application user
user: > user: >
name="{{ discern_user }}" name="{{ discern_user }}"
home="{{ discern_app_dir }}" home="{{ discern_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: notify:
- discern | restart discern - restart discern
- name: discern | create discern app dirs owned by discern - name: create discern app dirs owned by discern
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
owner="{{ discern_user }}" owner="{{ discern_user }}"
group="{{ common_web_group }}" group="{{ common_web_group }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- "{{ discern_app_dir }}" - "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}" - "{{ discern_venvs_dir }}"
- name: discern | create discern data dir, owned by {{ common_web_user }} - name: create discern data dir, owned by {{ common_web_user }}
file: > file: >
path="{{ discern_data_dir }}" state=directory path="{{ discern_data_dir }}" state=directory
owner="{{ common_web_user }}" group="{{ discern_user }}" owner="{{ common_web_user }}" group="{{ discern_user }}"
mode=0775 mode=0775
notify: notify:
- discern | restart discern - restart discern
- name: discern | install debian packages that discern needs - name: install debian packages that discern needs
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
notify: notify:
- discern | restart discern - restart discern
with_items: discern_debian_pkgs with_items: discern_debian_pkgs
- name: discern | install debian packages for ease that discern needs - name: install debian packages for ease that discern needs
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
notify: notify:
- discern | restart discern - restart discern
with_items: discern_ease_debian_pkgs with_items: discern_ease_debian_pkgs
- name: discern | copy sudoers file for discern - name: copy sudoers file for discern
copy: > copy: >
src=sudoers-discern dest=/etc/sudoers.d/discern src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root mode=0440 validate='visudo -cf %s' owner=root group=root
notify: notify:
- discern | restart discern - restart discern
#Needed if using redis to prevent memory issues #Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis - name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1 command: sysctl vm.overcommit_memory=1
notify: notify:
- discern | restart discern - restart discern
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
--- ---
- name: edx_ansible | git checkout edx_ansible repo into edx_ansible_code_dir - name: git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }} git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
- name : edx_ansible | install edx_ansible venv requirements - name : install edx_ansible venv requirements
pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
- name: edx_ansible | create update script - name: create update script
template: > template: >
dest={{ edx_ansible_app_dir}}/update dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755 src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: edx_ansible | create a symlink for update.sh - name: create a symlink for update.sh
file: > file: >
src={{ edx_ansible_app_dir }}/update src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update dest={{ COMMON_BIN_DIR }}/update
state=link state=link
- name: edx_ansible | dump all vars to yaml - name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600 template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
- name: edx_ansible | clean up var file, removing all version vars - name: clean up var file, removing all version vars
shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }} shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }}
with_items: with_items:
- edx_platform_version - edx_platform_version
...@@ -37,10 +37,10 @@ ...@@ -37,10 +37,10 @@
- ease_version - ease_version
- certs_version - certs_version
- name: edx_ansible | remove the special _original_file var - name: remove the special _original_file var
shell: sed -i -e "/_original_file/d" {{ edx_ansible_var_file }} shell: sed -i -e "/_original_file/d" {{ edx_ansible_var_file }}
- name: edxapp | create a symlink for var file - name: create a symlink for var file
file: > file: >
src={{ edx_ansible_var_file }} src={{ edx_ansible_var_file }}
dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }} dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }}
......
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
# #
# #
# #
- name: edx_ansible | create application user - name: create application user
user: > user: >
name="{{ edx_ansible_user }}" name="{{ edx_ansible_user }}"
home="{{ edx_ansible_app_dir }}" home="{{ edx_ansible_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: edx_ansible | create edx_ansible app and venv dir - name: create edx_ansible app and venv dir
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
- "{{ edx_ansible_data_dir }}" - "{{ edx_ansible_data_dir }}"
- "{{ edx_ansible_venvs_dir }}" - "{{ edx_ansible_venvs_dir }}"
- name: edx_ansible | install a bunch of system packages on which edx_ansible relies - name: install a bunch of system packages on which edx_ansible relies
apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
--- ---
- name: edxapp | restart edxapp - name: restart edxapp
supervisorctl_local: > supervisorctl_local: >
state=restarted state=restarted
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled with_items: service_variants_enabled
- name: edxapp | restart edxapp_workers - name: restart edxapp_workers
supervisorctl_local: > supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
- name: edxapp | setup the edxapp env - name: setup the edxapp env
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
template: > template: >
src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env
owner={{ edxapp_user }} group={{ common_web_user }} owner={{ edxapp_user }} group={{ common_web_user }}
mode=0644 mode=0644
# Do A Checkout # Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edxapp_code_dir}} - name: checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}} git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
register: chkout register: chkout
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | git clean after checking out edx-platform - name: git clean after checking out edx-platform
shell: cd {{edxapp_code_dir}} && git clean -xdf shell: cd {{edxapp_code_dir}} && git clean -xdf
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | checkout theme - name: checkout theme
git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}} git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
when: edxapp_theme_name != '' when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create checksum for requirements, package.json and Gemfile - name: create checksum for requirements, package.json and Gemfile
shell: > shell: >
/usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new /usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
...@@ -47,16 +47,16 @@ ...@@ -47,16 +47,16 @@
# Substitute github mirror in all requirements files # Substitute github mirror in all requirements files
# This is run on every single deploy # This is run on every single deploy
- name: edxapp | Updating requirement files for git mirror - name: Updating requirement files for git mirror
command: | command: |
/bin/sed -i -e 's/github\.com/{{ COMMON_GIT_MIRROR }}/g' {{ " ".join(edxapp_all_req_files) }} /bin/sed -i -e 's/github\.com/{{ COMMON_GIT_MIRROR }}/g' {{ " ".join(edxapp_all_req_files) }}
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# Ruby plays that need to be run after platform updates. # Ruby plays that need to be run after platform updates.
- name: edxapp | gem install bundler - name: gem install bundler
shell: > shell: >
gem install bundle gem install bundle
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -64,10 +64,10 @@ ...@@ -64,10 +64,10 @@
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | bundle install - name: bundle install
shell: > shell: >
bundle install --binstubs bundle install --binstubs
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -75,32 +75,32 @@ ...@@ -75,32 +75,32 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# Set the npm registry # Set the npm registry
- name: edxapp | Set the npm registry - name: Set the npm registry
shell: shell:
npm config set registry 'http://registry.npmjs.org' npm config set registry 'http://registry.npmjs.org'
creates="{{ edxapp_app_dir }}/.npmrc" creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# Node play that need to be run after platform updates. # Node play that need to be run after platform updates.
- name: edxapp | Install edx-platform npm dependencies - name: Install edx-platform npm dependencies
shell: npm install chdir={{ edxapp_code_dir }} shell: npm install chdir={{ edxapp_code_dir }}
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# Install the python pre requirements into {{ edxapp_venv_dir }} # Install the python pre requirements into {{ edxapp_venv_dir }}
- name : edxapp | install python pre-requirements - name : install python pre-requirements
pip: > pip: >
requirements="{{pre_requirements_file}}" requirements="{{pre_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
...@@ -109,12 +109,12 @@ ...@@ -109,12 +109,12 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the python modules into {{ edxapp_venv_dir }} # Install the python modules into {{ edxapp_venv_dir }}
- name : edxapp | install python base-requirements - name : install python base-requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
...@@ -124,12 +124,12 @@ ...@@ -124,12 +124,12 @@
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the python post requirements into {{ edxapp_venv_dir }} # Install the python post requirements into {{ edxapp_venv_dir }}
- name : edxapp | install python post-requirements - name : install python post-requirements
pip: > pip: >
requirements="{{post_requirements_file}}" requirements="{{post_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
...@@ -138,12 +138,12 @@ ...@@ -138,12 +138,12 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the final python modules into {{ edxapp_venv_dir }} # Install the final python modules into {{ edxapp_venv_dir }}
- name : edxapp | install python post-post requirements - name : install python post-post requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
...@@ -156,12 +156,12 @@ ...@@ -156,12 +156,12 @@
- "{{ local_requirements_file }}" - "{{ local_requirements_file }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# Install the sandbox python modules into {{ edxapp_venv_dir }} # Install the sandbox python modules into {{ edxapp_venv_dir }}
- name : edxapp | install sandbox requirements into regular venv - name : install sandbox requirements into regular venv
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
...@@ -175,20 +175,20 @@ ...@@ -175,20 +175,20 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
when: "not EDXAPP_PYTHON_SANDBOX and (not inst.stat.exists or new.stat.md5 != inst.stat.md5)" when: "not EDXAPP_PYTHON_SANDBOX and (not inst.stat.exists or new.stat.md5 != inst.stat.md5)"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# The next few tasks set up the python code sandbox # The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions # need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
# to install anything # to install anything
- name: edxapp | code sandbox | put sandbox apparmor profile in complain mode - name: code sandbox | put sandbox apparmor profile in complain mode
command: /usr/sbin/aa-complain /etc/apparmor.d/code.sandbox command: /usr/sbin/aa-complain /etc/apparmor.d/code.sandbox
when: EDXAPP_PYTHON_SANDBOX when: EDXAPP_PYTHON_SANDBOX
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | Install base sandbox requirements and create sandbox virtualenv - name: code sandbox | Install base sandbox requirements and create sandbox virtualenv
pip: > pip: >
requirements="{{sandbox_base_requirements}}" requirements="{{sandbox_base_requirements}}"
virtualenv="{{edxapp_sandbox_venv_dir}}" virtualenv="{{edxapp_sandbox_venv_dir}}"
...@@ -197,12 +197,12 @@ ...@@ -197,12 +197,12 @@
sudo_user: "{{ edxapp_sandbox_user }}" sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX when: EDXAPP_PYTHON_SANDBOX
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | Install sandbox requirements into sandbox venv - name: code sandbox | Install sandbox requirements into sandbox venv
shell: > shell: >
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} {{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -214,50 +214,50 @@ ...@@ -214,50 +214,50 @@
register: sandbox_install_output register: sandbox_install_output
changed_when: "'installed' in sandbox_install_output" changed_when: "'installed' in sandbox_install_output"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | put code sandbox into aa-enforce or aa-complain mode, depending on EDXAPP_SANDBOX_ENFORCE - name: code sandbox | put code sandbox into aa-enforce or aa-complain mode, depending on EDXAPP_SANDBOX_ENFORCE
command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox
when: EDXAPP_PYTHON_SANDBOX when: EDXAPP_PYTHON_SANDBOX
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | compiling all py files in the edx-platform repo - name: compiling all py files in the edx-platform repo
shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}" shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# alternative would be to give {{ common_web_user }} read access # alternative would be to give {{ common_web_user }} read access
# to the virtualenv but that permission change will require # to the virtualenv but that permission change will require
# root access. # root access.
- name: edxapp | give other read permissions to the virtualenv - name: give other read permissions to the virtualenv
command: chmod -R o+r "{{ edxapp_venv_dir }}" command: chmod -R o+r "{{ edxapp_venv_dir }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create checksum for installed requirements - name: create checksum for installed requirements
shell: cp /var/tmp/edxapp.req.new /var/tmp/edxapp.req.installed shell: cp /var/tmp/edxapp.req.new /var/tmp/edxapp.req.installed
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: "edxapp | restart edxapp" notify: "restart edxapp"
# https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726 # https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726
# This is necessary for when syncdb is run and the django_openid_auth module is installed, # This is necessary for when syncdb is run and the django_openid_auth module is installed,
# not sure if this fix will ever get merged # not sure if this fix will ever get merged
- name: edxapp | openid workaround - name: openid workaround
shell: sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' {{ edxapp_venv_dir }}/lib/python2.7/site-packages/django_openid_auth/models.py shell: sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' {{ edxapp_venv_dir }}/lib/python2.7/site-packages/django_openid_auth/models.py
when: openid_workaround is defined when: openid_workaround is defined
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
# creates the supervisor jobs for the # creates the supervisor jobs for the
# service variants configured, runs # service variants configured, runs
...@@ -269,14 +269,14 @@ ...@@ -269,14 +269,14 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
- name: edxapp | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
when: start_services and not devstack when: start_services and not devstack
- name: edxapp | ensure edxapp has started - name: ensure edxapp has started
supervisorctl_local: > supervisorctl_local: >
state=started state=started
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -286,7 +286,7 @@ ...@@ -286,7 +286,7 @@
when: start_services and celery_worker is not defined and not devstack when: start_services and celery_worker is not defined and not devstack
with_items: service_variants_enabled with_items: service_variants_enabled
- name: edxapp | ensure edxapp_workers has started - name: ensure edxapp_workers has started
supervisorctl_local: > supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -4,27 +4,27 @@ ...@@ -4,27 +4,27 @@
--- ---
- name: edxapp | Install logrotate configuration for tracking file - name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644 template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create application user - name: create application user
user: > user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}" name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
createhome=no shell=/bin/false createhome=no shell=/bin/false
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create edxapp user dirs - name: create edxapp user dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}" owner="{{ edxapp_user }}" group="{{ common_web_group }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
with_items: with_items:
- "{{ edxapp_app_dir }}" - "{{ edxapp_app_dir }}"
- "{{ edxapp_data_dir }}" - "{{ edxapp_data_dir }}"
...@@ -32,36 +32,36 @@ ...@@ -32,36 +32,36 @@
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}" - "{{ edxapp_staticfile_dir }}"
- name: edxapp | create edxapp log dir - name: create edxapp log dir
file: > file: >
path="{{ edxapp_log_dir }}" state=directory path="{{ edxapp_log_dir }}" state=directory
owner="{{ common_log_user }}" group="{{ common_log_user }}" owner="{{ common_log_user }}" group="{{ common_log_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create web-writable edxapp data dirs - name: create web-writable edxapp data dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ edxapp_user }}" owner="{{ common_web_user }}" group="{{ edxapp_user }}"
mode="0775" mode="0775"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
with_items: with_items:
- "{{ edxapp_course_data_dir }}" - "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}" - "{{ edxapp_upload_dir }}"
- name: edxapp | install system packages on which LMS and CMS rely - name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create log directories for service variants - name: create log directories for service variants
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
file: > file: >
path={{ edxapp_log_dir }}/{{ item }} state=directory path={{ edxapp_log_dir }}/{{ item }} state=directory
owner={{ common_log_user }} group={{ common_log_user }} owner={{ common_log_user }} group={{ common_log_user }}
......
- name: edxapp | code sandbox | Create edxapp sandbox user - name: code sandbox | Create edxapp sandbox user
user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }} user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }}
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | Install apparmor utils system pkg - name: code sandbox | Install apparmor utils system pkg
apt: pkg=apparmor-utils state=present apt: pkg=apparmor-utils state=present
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | write out apparmor code sandbox config - name: code sandbox | write out apparmor code sandbox config
template: src=code.sandbox.j2 dest=/etc/apparmor.d/code.sandbox mode=0644 owner=root group=root template: src=code.sandbox.j2 dest=/etc/apparmor.d/code.sandbox mode=0644 owner=root group=root
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | write out sandbox user sudoers config - name: code sandbox | write out sandbox user sudoers config
template: src=95-sandbox-sudoer.j2 dest=/etc/sudoers.d/95-{{ edxapp_sandbox_user }} mode=0440 owner=root group=root validate='visudo -c -f %s' template: src=95-sandbox-sudoer.j2 dest=/etc/sudoers.d/95-{{ edxapp_sandbox_user }} mode=0440 owner=root group=root validate='visudo -c -f %s'
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
# we boostrap and enable the apparmor service here. in deploy.yml we disable, deploy, then re-enable # we boostrap and enable the apparmor service here. in deploy.yml we disable, deploy, then re-enable
# so we need to enable it in main.yml # so we need to enable it in main.yml
- name: edxapp | code sandbox | start apparmor service - name: code sandbox | start apparmor service
service: name=apparmor state=started service: name=apparmor state=started
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | (bootstrap) load code sandbox profile - name: code sandbox | (bootstrap) load code sandbox profile
command: apparmor_parser -r /etc/apparmor.d/code.sandbox command: apparmor_parser -r /etc/apparmor.d/code.sandbox
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
- name: edxapp | code sandbox | (bootstrap) put code sandbox into aa-enforce or aa-complain mode depending on EDXAPP_SANDBOX_ENFORCE - name: code sandbox | (bootstrap) put code sandbox into aa-enforce or aa-complain mode depending on EDXAPP_SANDBOX_ENFORCE
command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: tags:
- edxapp-sandbox - edxapp-sandbox
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
with_items: service_variants_enabled with_items: service_variants_enabled
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: "create {{ item }} auth file" - name: "create {{ item }} auth file"
template: > template: >
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
dest={{ edxapp_app_dir }}/{{ item }}.auth.json dest={{ edxapp_app_dir }}/{{ item }}.auth.json
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
with_items: service_variants_enabled with_items: service_variants_enabled
# write the supervisor scripts for the service variants # write the supervisor scripts for the service variants
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
when: celery_worker is not defined and not devstack when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
- name: edxapp | writing edxapp supervisor script - name: writing edxapp supervisor script
template: > template: >
src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf
owner={{ supervisor_user }} owner={{ supervisor_user }}
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
# write the supervisor script for celery workers # write the supervisor script for celery workers
- name: edxapp | writing celery worker supervisor script - name: writing celery worker supervisor script
template: > template: >
src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf
owner={{ supervisor_user }} owner={{ supervisor_user }}
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
# Gather assets using rake if possible # Gather assets using rake if possible
- name: edxapp | gather {{ item }} static assets with rake - name: gather {{ item }} static assets with rake
shell: > shell: >
SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws
executable=/bin/bash executable=/bin/bash
...@@ -56,23 +56,23 @@ ...@@ -56,23 +56,23 @@
when: celery_worker is not defined and not devstack and item != "lms-preview" when: celery_worker is not defined and not devstack and item != "lms-preview"
with_items: service_variants_enabled with_items: service_variants_enabled
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
- name: edxapp | syncdb and migrate - name: syncdb and migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin}}/django-admin.py syncdb --migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }} shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin}}/django-admin.py syncdb --migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | db migrate - name: db migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/django-admin.py migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }} shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/django-admin.py migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_only is defined and migrate_only|lower == "yes" when: migrate_only is defined and migrate_only|lower == "yes"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
...@@ -10,33 +10,33 @@ ...@@ -10,33 +10,33 @@
# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz # http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz
# #
--- ---
- name: edxlocal| install packages needed for single server - name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: edxlocal | create a database for edxapp - name: create a database for edxapp
mysql_db: > mysql_db: >
db=edxapp db=edxapp
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for xqueue - name: create a database for xqueue
mysql_db: > mysql_db: >
db=xqueue db=xqueue
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for ora - name: create a database for ora
mysql_db: > mysql_db: >
db=ora db=ora
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for discern - name: create a database for discern
mysql_db: > mysql_db: >
db=discern db=discern
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | install memcached - name: install memcached
apt: pkg=memcached state=present apt: pkg=memcached state=present
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
# - oraclejdk # - oraclejdk
# - elasticsearch # - elasticsearch
- name: elasticsearch | download elasticsearch - name: download elasticsearch
get_url: > get_url: >
url={{ elasticsearch_url }} url={{ elasticsearch_url }}
dest=/var/tmp/{{ elasticsearch_file }} dest=/var/tmp/{{ elasticsearch_file }}
force=no force=no
- name: elasticsearch | install elasticsearch from local package - name: install elasticsearch from local package
shell: > shell: >
dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb
executable=/bin/bash executable=/bin/bash
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
- elasticsearch - elasticsearch
- install - install
- name: elasticsearch | Ensure elasticsearch is enabled and started - name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=started enabled=yes service: name=elasticsearch state=started enabled=yes
tags: tags:
- elasticsearch - elasticsearch
......
--- ---
- name: forum | restart the forum service - name: restart the forum service
supervisorctl_local: > supervisorctl_local: >
name=forum name=forum
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
--- ---
- name: forum | create the supervisor config - name: create the supervisor config
template: > template: >
src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf
owner={{ supervisor_user }} owner={{ supervisor_user }}
...@@ -9,41 +9,41 @@ ...@@ -9,41 +9,41 @@
when: not devstack when: not devstack
register: forum_supervisor register: forum_supervisor
- name: forum | create the supervisor wrapper - name: create the supervisor wrapper
template: > template: >
src={{ forum_supervisor_wrapper|basename }}.j2 src={{ forum_supervisor_wrapper|basename }}.j2
dest={{ forum_supervisor_wrapper }} dest={{ forum_supervisor_wrapper }}
mode=0755 mode=0755
sudo_user: "{{ forum_user }}" sudo_user: "{{ forum_user }}"
when: not devstack when: not devstack
notify: forum | restart the forum service notify: restart the forum service
- name: forum | git checkout forum repo into {{ forum_code_dir }} - name: git checkout forum repo into {{ forum_code_dir }}
git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }} git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
sudo_user: "{{ forum_user }}" sudo_user: "{{ forum_user }}"
notify: forum | restart the forum service notify: restart the forum service
# TODO: This is done as the common_web_user # TODO: This is done as the common_web_user
# since the process owner needs write access # since the process owner needs write access
# to the rbenv # to the rbenv
- name: forum | install comments service bundle - name: install comments service bundle
shell: bundle install chdir={{ forum_code_dir }} shell: bundle install chdir={{ forum_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
environment: "{{ forum_environment }}" environment: "{{ forum_environment }}"
notify: forum | restart the forum service notify: restart the forum service
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: forum | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
when: start_services and not devstack when: start_services and not devstack
- name: forum | ensure forum is started - name: ensure forum is started
supervisorctl_local: > supervisorctl_local: >
name=forum name=forum
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -21,26 +21,26 @@ ...@@ -21,26 +21,26 @@
# rbenv_ruby_version: "{{ forum_ruby_version }}" # rbenv_ruby_version: "{{ forum_ruby_version }}"
# - forum # - forum
- name: forum | create application user - name: create application user
user: > user: >
name="{{ forum_user }}" home="{{ forum_app_dir }}" name="{{ forum_user }}" home="{{ forum_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: forum | restart the forum service notify: restart the forum service
- name: forum | create forum app dir - name: create forum app dir
file: > file: >
path="{{ forum_app_dir }}" state=directory path="{{ forum_app_dir }}" state=directory
owner="{{ forum_user }}" group="{{ common_web_group }}" owner="{{ forum_user }}" group="{{ common_web_group }}"
notify: forum | restart the forum service notify: restart the forum service
- name: forum | setup the forum env - name: setup the forum env
template: > template: >
src=forum_env.j2 dest={{ forum_app_dir }}/forum_env src=forum_env.j2 dest={{ forum_app_dir }}/forum_env
owner={{ forum_user }} group={{ common_web_user }} owner={{ forum_user }} group={{ common_web_user }}
mode=0644 mode=0644
notify: notify:
- forum | restart the forum service - restart the forum service
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
--- ---
- name: forum | test that the required service are listening - name: test that the required service are listening
wait_for: port={{ item.port }} host={{ item.host }} timeout=30 wait_for: port={{ item.port }} host={{ item.host }} timeout=30
with_items: "{{ forum_services }}" with_items: forum_services
when: not devstack when: not devstack
- name: forum | test that mongo replica set members are listing - name: test that mongo replica set members are listing
wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30 wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30
with_items: "{{ FORUM_MONGO_HOSTS }}" with_items: FORUM_MONGO_HOSTS
when: not devstack when: not devstack
...@@ -28,39 +28,39 @@ ...@@ -28,39 +28,39 @@
--- ---
- name: gh_mirror | install pip packages - name: install pip packages
pip: name={{ item }} state=present pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs with_items: gh_mirror_pip_pkgs
- name: gh_mirror | install debian packages - name: install debian packages
apt: > apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }} pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present state=present
update_cache=yes update_cache=yes
- name: gh_mirror | create gh_mirror user - name: create gh_mirror user
user: > user: >
name={{ gh_mirror_user }} name={{ gh_mirror_user }}
state=present state=present
- name: gh_mirror | create the gh_mirror data directory - name: create the gh_mirror data directory
file: > file: >
path={{ gh_mirror_data_dir }} path={{ gh_mirror_data_dir }}
state=directory state=directory
owner={{ gh_mirror_user }} owner={{ gh_mirror_user }}
group={{ gh_mirror_group }} group={{ gh_mirror_group }}
- name: gh_mirror | create the gh_mirror app directory - name: create the gh_mirror app directory
file: > file: >
path={{ gh_mirror_app_dir }} path={{ gh_mirror_app_dir }}
state=directory state=directory
- name: gh_mirror | create org config - name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts - name: copying sync scripts
copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }} copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }}
with_items: "{{ gh_mirror_app_files }}" with_items: gh_mirror_app_files
- name: creating cron job to update repos - name: creating cron job to update repos
cron: cron:
......
...@@ -12,34 +12,34 @@ ...@@ -12,34 +12,34 @@
# - mark # - mark
- name: gh_users | creating default .bashrc - name: creating default .bashrc
template: > template: >
src=default.bashrc.j2 dest=/etc/skel/.bashrc src=default.bashrc.j2 dest=/etc/skel/.bashrc
mode=0644 owner=root group=root mode=0644 owner=root group=root
- name: gh_users | create gh group - name: create gh group
group: name=gh state=present group: name=gh state=present
# TODO: give limited sudo access to this group # TODO: give limited sudo access to this group
- name: gh_users | grant full sudo access to gh group - name: grant full sudo access to gh group
copy: > copy: >
content="%gh ALL=(ALL) NOPASSWD:ALL" content="%gh ALL=(ALL) NOPASSWD:ALL"
dest=/etc/sudoers.d/gh owner=root group=root dest=/etc/sudoers.d/gh owner=root group=root
mode=0440 validate='visudo -cf %s' mode=0440 validate='visudo -cf %s'
- name: gh_users | create github users - name: create github users
user: user:
name={{ item }} groups=gh name={{ item }} groups=gh
shell=/bin/bash shell=/bin/bash
with_items: gh_users with_items: gh_users
- name: gh_users | create .ssh directory - name: create .ssh directory
file: file:
path=/home/{{ item }}/.ssh state=directory mode=0700 path=/home/{{ item }}/.ssh state=directory mode=0700
owner={{ item }} owner={{ item }}
with_items: gh_users with_items: gh_users
- name: gh_users | copy github key[s] to .ssh/authorized_keys - name: copy github key[s] to .ssh/authorized_keys
get_url: get_url:
url=https://github.com/{{ item }}.keys url=https://github.com/{{ item }}.keys
dest=/home/{{ item }}/.ssh/authorized_keys mode=0600 dest=/home/{{ item }}/.ssh/authorized_keys mode=0600
......
--- ---
# Install and configure simple glusterFS shared storage # Install and configure simple glusterFS shared storage
- name: gluster | all | Install common packages - name: all | Install common packages
apt: name={{ item }} state=present apt: name={{ item }} state=present
with_items: with_items:
- glusterfs-client - glusterfs-client
...@@ -9,20 +9,20 @@ ...@@ -9,20 +9,20 @@
- nfs-common - nfs-common
tags: gluster tags: gluster
- name: gluster | all | Install server packages - name: all | Install server packages
apt: name=glusterfs-server state=present apt: name=glusterfs-server state=present
when: > when: >
"{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}"
tags: gluster tags: gluster
- name: gluster | all | enable server - name: all | enable server
service: name=glusterfs-server state=started enabled=yes service: name=glusterfs-server state=started enabled=yes
when: > when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
tags: gluster tags: gluster
# Ignoring error below so that we can move the data folder and have it be a link # Ignoring error below so that we can move the data folder and have it be a link
- name: gluster | all | create folders - name: all | create folders
file: path={{ item.path }} state=directory file: path={{ item.path }} state=directory
with_items: gluster_volumes with_items: gluster_volumes
when: > when: >
...@@ -30,39 +30,39 @@ ...@@ -30,39 +30,39 @@
ignore_errors: yes ignore_errors: yes
tags: gluster tags: gluster
- name: gluster | primary | create peers - name: primary | create peers
command: gluster peer probe {{ item }} command: gluster peer probe {{ item }}
with_items: gluster_peers with_items: gluster_peers
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | primary | create volumes - name: primary | create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %} command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: gluster | primary | start volumes - name: primary | start volumes
command: gluster volume start {{ item.name }} command: gluster volume start {{ item.name }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: gluster | primary | set security - name: primary | set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }} command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | primary | set performance cache - name: primary | set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }} command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | all | mount volume - name: all | mount volume
mount: > mount: >
name={{ item.mount_location }} name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }} src={{ gluster_primary_ip }}:{{ item.name }}
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system # This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1 # before the network stack is up and can't lookup 127.0.0.1
- name: gluster | all | sleep mount - name: all | sleep mount
lineinfile: > lineinfile: >
dest=/etc/rc.local dest=/etc/rc.local
line='sleep 5; /bin/mount -a' line='sleep 5; /bin/mount -a'
......
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
# Overview: # Overview:
# #
# #
- name: haproxy | restart haproxy - name: restart haproxy
service: name=haproxy state=restarted service: name=haproxy state=restarted
- name: haproxy | reload haproxy - name: reload haproxy
service: name=haproxy state=reloaded service: name=haproxy state=reloaded
- name: haproxy | restart rsyslog - name: restart rsyslog
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
...@@ -17,26 +17,26 @@ ...@@ -17,26 +17,26 @@
# so it allows for a configuration template to be overriden # so it allows for a configuration template to be overriden
# with a variable # with a variable
- name: haproxy | Install haproxy - name: Install haproxy
apt: pkg=haproxy state={{ pkgs.haproxy.state }} apt: pkg=haproxy state={{ pkgs.haproxy.state }}
notify: haproxy | restart haproxy notify: restart haproxy
- name: haproxy | Server configuration file - name: Server configuration file
template: > template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644 owner=root group=root mode=0644
notify: haproxy | reload haproxy notify: reload haproxy
- name: haproxy | Enabled in default - name: Enabled in default
lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1 lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1
notify: haproxy | restart haproxy notify: restart haproxy
- name: haproxy | install logrotate - name: install logrotate
template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644 template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644
- name: haproxy | install rsyslog conf - name: install rsyslog conf
template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644 template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644
notify: haproxy | restart rsyslog notify: restart rsyslog
- name: haproxy | make sure haproxy has started - name: make sure haproxy has started
service: name=haproxy state=started service: name=haproxy state=started
--- ---
- name: jenkins_master | restart Jenkins - name: restart Jenkins
service: name=jenkins state=restarted service: name=jenkins state=restarted
- name: jenkins_master | start nginx - name: start nginx
service: name=nginx state=started service: name=nginx state=started
- name: jenkins_master | reload nginx - name: reload nginx
service: name=nginx state=reloaded service: name=nginx state=reloaded
--- ---
- name: jenkins_master | install jenkins specific system packages - name: install jenkins specific system packages
apt: apt:
pkg={{','.join(jenkins_debian_pkgs)}} pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
tags: tags:
- jenkins - jenkins
- name: jenkins_master | install jenkins extra system packages - name: install jenkins extra system packages
apt: apt:
pkg={{','.join(JENKINS_EXTRA_PKGS)}} pkg={{','.join(JENKINS_EXTRA_PKGS)}}
state=present update_cache=yes state=present update_cache=yes
tags: tags:
- jenkins - jenkins
- name: jenkins_master | create jenkins group - name: create jenkins group
group: name={{ jenkins_group }} state=present group: name={{ jenkins_group }} state=present
- name: jenkins_master | add the jenkins user to the group - name: add the jenkins user to the group
user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }} user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }}
# Should be resolved in the next release, but until then we need to do this # Should be resolved in the next release, but until then we need to do this
# https://issues.jenkins-ci.org/browse/JENKINS-20407 # https://issues.jenkins-ci.org/browse/JENKINS-20407
- name: jenkins_master | workaround for JENKINS-20407 - name: workaround for JENKINS-20407
command: "mkdir -p /var/run/jenkins" command: "mkdir -p /var/run/jenkins"
- name: jenkins_master | download Jenkins package - name: download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}" get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: jenkins_master | install Jenkins package - name: install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}" command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: jenkins_master | stop Jenkins - name: stop Jenkins
service: name=jenkins state=stopped service: name=jenkins state=stopped
# Move /var/lib/jenkins to Jenkins home (on the EBS) # Move /var/lib/jenkins to Jenkins home (on the EBS)
- name: jenkins_master | move /var/lib/jenkins - name: move /var/lib/jenkins
command: mv /var/lib/jenkins {{ jenkins_home }} command: mv /var/lib/jenkins {{ jenkins_home }}
creates={{ jenkins_home }} creates={{ jenkins_home }}
- name: jenkins_master | set owner for Jenkins home - name: set owner for Jenkins home
file: path={{ jenkins_home }} recurse=yes state=directory file: path={{ jenkins_home }} recurse=yes state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
# Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins # Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins
# since Jenkins will expect its files to be in /var/lib/jenkins # since Jenkins will expect its files to be in /var/lib/jenkins
- name: jenkins_master | symlink /var/lib/jenkins - name: symlink /var/lib/jenkins
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
notify: notify:
- jenkins_master | restart Jenkins - restart Jenkins
- name: jenkins_master | make plugins directory - name: make plugins directory
sudo_user: jenkins sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins shell: mkdir -p {{ jenkins_home }}/plugins
# We first download the plugins to a temp directory and include # We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment # the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins # the version, the plugin will be updated in Jenkins
- name: jenkins_master | download Jenkins plugins - name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }} dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: "{{ jenkins_plugins }}" with_items: jenkins_plugins
- name: jenkins_master | install Jenkins plugins - name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: "{{ jenkins_plugins }}" with_items: jenkins_plugins
- name: jenkins_master | set Jenkins plugin permissions - name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_plugins }}" with_items: jenkins_plugins
notify: notify:
- jenkins_master | restart Jenkins - restart Jenkins
# We had to fork some plugins to workaround # We had to fork some plugins to workaround
# certain issues. If these changes get merged # certain issues. If these changes get merged
# upstream, we may be able to use the regular plugin install process. # upstream, we may be able to use the regular plugin install process.
# Until then, we compile and install the forks ourselves. # Until then, we compile and install the forks ourselves.
- name: jenkins_master | checkout custom plugin repo - name: checkout custom plugin repo
git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }} git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
- name: jenkins_master | compile custom plugins - name: compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }} command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }}
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
- name: jenkins_master | install custom plugins - name: install custom plugins
command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }} command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }}
{{ jenkins_home }}/plugins/{{ item.package }} {{ jenkins_home }}/plugins/{{ item.package }}
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
notify: notify:
- jenkins_master | restart Jenkins - restart Jenkins
- name: jenkins_master | set custom plugin permissions - name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }} file: path={{ jenkins_home }}/plugins/{{ item.package }}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
# Plugins that are bundled with Jenkins are "pinned". # Plugins that are bundled with Jenkins are "pinned".
# Jenkins will overwrite updated plugins with its built-in version # Jenkins will overwrite updated plugins with its built-in version
# unless we create a ".pinned" file for the plugin. # unless we create a ".pinned" file for the plugin.
# See https://issues.jenkins-ci.org/browse/JENKINS-13129 # See https://issues.jenkins-ci.org/browse/JENKINS-13129
- name: jenkins_master | create plugin pin files - name: create plugin pin files
command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
with_items: "{{ jenkins_bundled_plugins }}" with_items: jenkins_bundled_plugins
- name: jenkins_master | setup nginix vhost - name: setup nginix vhost
template: template:
src=etc/nginx/sites-available/jenkins.j2 src=etc/nginx/sites-available/jenkins.j2
dest=/etc/nginx/sites-available/jenkins dest=/etc/nginx/sites-available/jenkins
- name: jenkins_master | enable jenkins vhost - name: enable jenkins vhost
file: file:
src=/etc/nginx/sites-available/jenkins src=/etc/nginx/sites-available/jenkins
dest=/etc/nginx/sites-enabled/jenkins dest=/etc/nginx/sites-enabled/jenkins
state=link state=link
notify: jenkins_master | start nginx notify: start nginx
--- ---
- name: jenkins_worker | Install Java - name: Install Java
apt: pkg=openjdk-7-jre-headless state=present apt: pkg=openjdk-7-jre-headless state=present
- name: jenkins_worker | Download JSCover - name: Download JSCover
get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip
- name: jenkins_worker | Unzip JSCover - name: Unzip JSCover
shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover
creates=/var/tmp/jscover creates=/var/tmp/jscover
- name: jenkins_worker | Install JSCover JAR - name: Install JSCover JAR
command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar
creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar
- name: jenkins_worker | Set JSCover permissions - name: Set JSCover permissions
file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file
owner=root group=root mode=0755 owner=root group=root mode=0755
--- ---
# Install scripts requiring a GitHub OAuth token # Install scripts requiring a GitHub OAuth token
- name: jenkins_worker | Install requests Python library - name: Install requests Python library
pip: name=requests state=present pip: name=requests state=present
- fail: jenkins_worker | OAuth token not defined - fail: OAuth token not defined
when: github_oauth_token is not defined when: github_oauth_token is not defined
- name: jenkins_worker | Install Python GitHub PR auth script - name: Install Python GitHub PR auth script
template: src="github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py" template: src="github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py"
owner=root group=root owner=root group=root
mode=755 mode=755
- name: jenkins_worker | Install Python GitHub post status script - name: Install Python GitHub post status script
template: src="github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py" template: src="github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py"
owner=root group=root owner=root group=root
mode=755 mode=755
# Create wheelhouse to enable fast virtualenv creation # Create wheelhouse to enable fast virtualenv creation
- name: jenkins_worker | Create wheel virtualenv - name: Create wheel virtualenv
command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }} command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }}
sudo_user: "{{ jenkins_user }}" sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Install wheel - name: Install wheel
pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv
sudo_user: "{{ jenkins_user }}" sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Create wheelhouse dir - name: Create wheelhouse dir
file: file:
path={{ jenkins_wheel_dir }} state=directory path={{ jenkins_wheel_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# (need to install each one in the venv to satisfy dependencies) # (need to install each one in the venv to satisfy dependencies)
- name: jenkins_worker | Create wheel archives - name: Create wheel archives
shell: shell:
"{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" && "{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" &&
{{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\" {{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\"
creates={{ jenkins_wheel_dir }}/${item.wheel}" creates={{ jenkins_wheel_dir }}/${item.wheel}"
sudo_user: "{{ jenkins_user }}" sudo_user: "{{ jenkins_user }}"
with_items: "{{ jenkins_wheels }}" with_items: jenkins_wheels
- name: jenkins_worker | Add wheel_venv.sh script - name: Add wheel_venv.sh script
template: template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
--- ---
- name: jenkins_worker | Create jenkins group - name: Create jenkins group
group: name={{ jenkins_group }} state=present group: name={{ jenkins_group }} state=present
# The Jenkins account needs a login shell because Jenkins uses scp # The Jenkins account needs a login shell because Jenkins uses scp
- name: jenkins_worker | Add the jenkins user to the group and configure shell - name: Add the jenkins user to the group and configure shell
user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash
# Because of a bug in the latest release of the EC2 plugin # Because of a bug in the latest release of the EC2 plugin
# we need to use a key generated by Amazon (not imported) # we need to use a key generated by Amazon (not imported)
# To satisfy this, we allow users to log in as Jenkins # To satisfy this, we allow users to log in as Jenkins
# using the same keypair the instance was started with. # using the same keypair the instance was started with.
- name: jenkins_worker | Create .ssh directory - name: Create .ssh directory
file: file:
path={{ jenkins_home }}/.ssh state=directory path={{ jenkins_home }}/.ssh state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
ignore_errors: yes ignore_errors: yes
- name: jenkins_worker | Copy ssh keys for jenkins - name: Copy ssh keys for jenkins
command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys
ignore_errors: yes ignore_errors: yes
- name: jenkins_worker | Set key permissions - name: Set key permissions
file: file:
path={{ jenkins_home }}/.ssh/authorized_keys path={{ jenkins_home }}/.ssh/authorized_keys
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400 owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes ignore_errors: yes
- name: jenkins_worker | Install system packages - name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}} apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
- name: jenkins_worker | Add script to set up environment variables - name: Add script to set up environment variables
template: template:
src=jenkins_env.j2 dest={{ jenkins_home }}/jenkins_env src=jenkins_env.j2 dest={{ jenkins_home }}/jenkins_env
owner={{ jenkins_user }} group={{ jenkins_group }} mode=0500 owner={{ jenkins_user }} group={{ jenkins_group }} mode=0500
# Need to add Github to known_hosts to avoid # Need to add Github to known_hosts to avoid
# being prompted when using git through ssh # being prompted when using git through ssh
- name: jenkins_worker | Add github.com to known_hosts if it does not exist - name: Add github.com to known_hosts if it does not exist
shell: > shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Will terminate an instance if one and only one already exists # Will terminate an instance if one and only one already exists
# with the same name # with the same name
- name: launch_ec2 | lookup tags for terminating existing instance - name: lookup tags for terminating existing instance
local_action: local_action:
module: ec2_lookup module: ec2_lookup
region: "{{ region }}" region: "{{ region }}"
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
register: tag_lookup register: tag_lookup
when: terminate_instance == true when: terminate_instance == true
- name: launch_ec2 | checking for other instances - name: checking for other instances
debug: msg="Too many results returned, not terminating!" debug: msg="Too many results returned, not terminating!"
when: terminate_instance == true and tag_lookup.instance_ids|length > 1 when: terminate_instance == true and tag_lookup.instance_ids|length > 1
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
state: absent state: absent
when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1 when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1
- name: launch_ec2 | Launch ec2 instance - name: Launch ec2 instance
local_action: local_action:
module: ec2_local module: ec2_local
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
instance_profile_name: "{{ instance_profile_name }}" instance_profile_name: "{{ instance_profile_name }}"
register: ec2 register: ec2
- name: launch_ec2 | Add DNS name - name: Add DNS name
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -59,9 +59,9 @@ ...@@ -59,9 +59,9 @@
ttl: 300 ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}" record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Add DNS name studio - name: Add DNS name studio
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -71,9 +71,9 @@ ...@@ -71,9 +71,9 @@
ttl: 300 ttl: 300
record: "studio.{{ dns_name }}.{{ dns_zone }}" record: "studio.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Add DNS name preview - name: Add DNS name preview
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -83,17 +83,17 @@ ...@@ -83,17 +83,17 @@
ttl: 300 ttl: 300
record: "preview.{{ dns_name }}.{{ dns_zone }}" record: "preview.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Add new instance to host group - name: Add new instance to host group
local_action: > local_action: >
add_host add_host
hostname={{ item.public_ip }} hostname={{ item.public_ip }}
groupname=launched groupname=launched
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Wait for SSH to come up - name: Wait for SSH to come up
local_action: > local_action: >
wait_for wait_for
host={{ item.public_dns_name }} host={{ item.public_dns_name }}
...@@ -101,4 +101,4 @@ ...@@ -101,4 +101,4 @@
port=22 port=22
delay=60 delay=60
timeout=320 timeout=320
with_items: "{{ ec2.instances }}" with_items: ec2.instances
...@@ -16,14 +16,14 @@ ...@@ -16,14 +16,14 @@
- fail: msg="secure_dir not defined. This is a path to the secure ora config file." - fail: msg="secure_dir not defined. This is a path to the secure ora config file."
when: secure_dir is not defined when: secure_dir is not defined
- name: legacy_ora | create ora application config - name: create ora application config
copy: copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json dest={{ora_app_dir}}/env.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
register: env_state register: env_state
- name: legacy_ora | create ora auth file - name: create ora auth file
copy: copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json dest={{ora_app_dir}}/auth.json
...@@ -31,13 +31,13 @@ ...@@ -31,13 +31,13 @@
register: auth_state register: auth_state
# Restart ORA Services # Restart ORA Services
- name: legacy_ora | restart edx-ora - name: restart edx-ora
service: service:
name=edx-ora name=edx-ora
state=restarted state=restarted
when: env_state.changed or auth_state.changed when: env_state.changed or auth_state.changed
- name: legacy_ora | restart edx-ora-celery - name: restart edx-ora-celery
service: service:
name=edx-ora-celery name=edx-ora-celery
state=restarted state=restarted
......
--- ---
- name: local_dev | install useful system packages - name: install useful system packages
apt: apt:
pkg={{','.join(local_dev_pkgs)}} install_recommends=yes pkg={{','.join(local_dev_pkgs)}} install_recommends=yes
state=present update_cache=yes state=present update_cache=yes
- name: local_dev | set login shell for app accounts - name: set login shell for app accounts
user: name={{ item.user }} shell="/bin/bash" user: name={{ item.user }} shell="/bin/bash"
with_items: "{{ localdev_accounts }}" with_items: localdev_accounts
# Ensure forum user has permissions to access .gem and .rbenv # Ensure forum user has permissions to access .gem and .rbenv
# This is a little twisty: the forum role sets the owner and group to www-data # This is a little twisty: the forum role sets the owner and group to www-data
# So we add the forum user to the www-data group and give group write permissions # So we add the forum user to the www-data group and give group write permissions
- name: local_dev | add forum user to www-data group - name: add forum user to www-data group
user: name={{ forum_user }} groups={{ common_web_group }} append=yes user: name={{ forum_user }} groups={{ common_web_group }} append=yes
- name: local_dev | set forum rbenv and gem permissions - name: set forum rbenv and gem permissions
file: file:
path={{ item }} state=directory mode=770 path={{ item }} state=directory mode=770
with_items: with_items:
...@@ -22,32 +22,32 @@ ...@@ -22,32 +22,32 @@
- "{{ forum_app_dir }}/.rbenv" - "{{ forum_app_dir }}/.rbenv"
# Create scripts to configure environment # Create scripts to configure environment
- name: local_dev | create login scripts - name: create login scripts
template: template:
src=app_bashrc.j2 dest={{ item.home }}/.bashrc src=app_bashrc.j2 dest={{ item.home }}/.bashrc
owner={{ item.user }} mode=755 owner={{ item.user }} mode=755
with_items: "{{ localdev_accounts }}" with_items: localdev_accounts
# Default to the correct git config # Default to the correct git config
# No more accidentally force pushing to master! :) # No more accidentally force pushing to master! :)
- name: local_dev | configure git - name: configure git
copy: copy:
src=gitconfig dest={{ item.home }}/.gitconfig src=gitconfig dest={{ item.home }}/.gitconfig
owner={{ item.user }} mode=700 owner={{ item.user }} mode=700
with_items: "{{ localdev_accounts }}" with_items: localdev_accounts
# Configure X11 for application users # Configure X11 for application users
- name: local_dev | preserve DISPLAY for sudo - name: preserve DISPLAY for sudo
copy: copy:
src=x11_display dest=/etc/sudoers.d/x11_display src=x11_display dest=/etc/sudoers.d/x11_display
owner=root group=root mode=0440 owner=root group=root mode=0440
- name: local_dev | login share X11 auth to app users - name: login share X11 auth to app users
template: template:
src=share_x11.j2 dest={{ localdev_home }}/share_x11 src=share_x11.j2 dest={{ localdev_home }}/share_x11
owner={{ localdev_user }} mode=0700 owner={{ localdev_user }} mode=0700
- name: local_dev | update bashrc with X11 share script - name: update bashrc with X11 share script
lineinfile: lineinfile:
dest={{ localdev_home }}/.bashrc dest={{ localdev_home }}/.bashrc
regexp=". {{ localdev_home }}/share_x11" regexp=". {{ localdev_home }}/share_x11"
......
--- ---
- name: mongo | install python pymongo for mongo_user ansible module - name: install python pymongo for mongo_user ansible module
pip: > pip: >
name=pymongo state=present name=pymongo state=present
version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: mongo | add the mongodb signing key - name: add the mongodb signing key
apt_key: > apt_key: >
id=7F0CEB10 id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc url=http://docs.mongodb.org/10gen-gpg-key.asc
state=present state=present
- name: mongo | add the mongodb repo to the sources list - name: add the mongodb repo to the sources list
apt_repository: > apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
state=present state=present
- name: mongo | install mongo server and recommends - name: install mongo server and recommends
apt: > apt: >
pkg=mongodb-10gen={{ mongo_version }} pkg=mongodb-10gen={{ mongo_version }}
state=present install_recommends=yes state=present install_recommends=yes
update_cache=yes update_cache=yes
- name: mongo | create mongo dirs - name: create mongo dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ mongo_user }}" owner="{{ mongo_user }}"
...@@ -32,14 +32,14 @@ ...@@ -32,14 +32,14 @@
- "{{ mongo_dbpath }}" - "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}" - "{{ mongo_log_dir }}"
- name: mongo | stop mongo service - name: stop mongo service
service: name=mongodb state=stopped service: name=mongodb state=stopped
- name: mongo | move mongodb to {{ mongo_data_dir }} - name: move mongodb to {{ mongo_data_dir }}
command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb
- name: mongo | copy mongodb key file - name: copy mongodb key file
copy: > copy: >
src={{ secure_dir }}/files/mongo_key src={{ secure_dir }}/files/mongo_key
dest={{ mongo_key_file }} dest={{ mongo_key_file }}
...@@ -48,27 +48,27 @@ ...@@ -48,27 +48,27 @@
group=mongodb group=mongodb
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
- name: mongo | copy configuration template - name: copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes
notify: restart mongo notify: restart mongo
- name: mongo | start mongo service - name: start mongo service
service: name=mongodb state=started service: name=mongodb state=started
- name: mongo | wait for mongo server to start - name: wait for mongo server to start
wait_for: port=27017 delay=2 wait_for: port=27017 delay=2
- name: mongo | Create the file to initialize the mongod replica set - name: Create the file to initialize the mongod replica set
template: src=repset_init.j2 dest=/tmp/repset_init.js template: src=repset_init.j2 dest=/tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
- name: mongo | Initialize the replication set - name: Initialize the replication set
shell: /usr/bin/mongo /tmp/repset_init.js shell: /usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
# Ignore errors doesn't work because the module throws an exception # Ignore errors doesn't work because the module throws an exception
# it doesn't catch. # it doesn't catch.
- name: mongo | create a mongodb user - name: create a mongodb user
mongodb_user: > mongodb_user: >
database={{ item.database }} database={{ item.database }}
name={{ item.user }} name={{ item.user }}
......
--- ---
- name: nginx | restart nginx - name: restart nginx
service: name=nginx state=restarted service: name=nginx state=restarted
- name: nginx | reload nginx - name: reload nginx
service: name=nginx state=reloaded service: name=nginx state=reloaded
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# - common/tasks/main.yml # - common/tasks/main.yml
--- ---
- name: nginx | create nginx app dirs - name: create nginx app dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
- "{{ nginx_app_dir }}" - "{{ nginx_app_dir }}"
- "{{ nginx_sites_available_dir }}" - "{{ nginx_sites_available_dir }}"
- "{{ nginx_sites_enabled_dir }}" - "{{ nginx_sites_enabled_dir }}"
notify: nginx | restart nginx notify: restart nginx
- name: nginx | create nginx data dirs - name: create nginx data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -23,66 +23,66 @@ ...@@ -23,66 +23,66 @@
with_items: with_items:
- "{{ nginx_data_dir }}" - "{{ nginx_data_dir }}"
- "{{ nginx_log_dir }}" - "{{ nginx_log_dir }}"
notify: nginx | restart nginx notify: restart nginx
- name: nginx | Install nginx packages - name: Install nginx packages
apt: pkg={{','.join(nginx_debian_pkgs)}} state=present apt: pkg={{','.join(nginx_debian_pkgs)}} state=present
notify: nginx | restart nginx notify: restart nginx
- name: nginx | Server configuration file - name: Server configuration file
template: > template: >
src=nginx.conf.j2 dest=/etc/nginx/nginx.conf src=nginx.conf.j2 dest=/etc/nginx/nginx.conf
owner=root group={{ common_web_user }} mode=0644 owner=root group={{ common_web_user }} mode=0644
notify: nginx | reload nginx notify: reload nginx
- name: nginx | Creating common nginx configuration - name: Creating common nginx configuration
template: > template: >
src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release
owner=root group=root mode=0600 owner=root group=root mode=0600
notify: nginx | reload nginx notify: reload nginx
- name: nginx | Creating link for common nginx configuration - name: Creating link for common nginx configuration
file: > file: >
src={{ nginx_sites_available_dir }}/edx-release src={{ nginx_sites_available_dir }}/edx-release
dest={{ nginx_sites_enabled_dir }}/edx-release dest={{ nginx_sites_enabled_dir }}/edx-release
state=link owner=root group=root state=link owner=root group=root
notify: nginx | reload nginx notify: reload nginx
- name: nginx | Copying nginx configs for {{ nginx_sites }} - name: Copying nginx configs for {{ nginx_sites }}
template: > template: >
src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }} src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640 owner=root group={{ common_web_user }} mode=0640
notify: nginx | reload nginx notify: reload nginx
with_items: nginx_sites with_items: nginx_sites
- name: nginx | Creating nginx config links for {{ nginx_sites }} - name: Creating nginx config links for {{ nginx_sites }}
file: > file: >
src={{ nginx_sites_available_dir }}/{{ item }} src={{ nginx_sites_available_dir }}/{{ item }}
dest={{ nginx_sites_enabled_dir }}/{{ item }} dest={{ nginx_sites_enabled_dir }}/{{ item }}
state=link owner=root group=root state=link owner=root group=root
notify: nginx | reload nginx notify: reload nginx
with_items: nginx_sites with_items: nginx_sites
- name: nginx | Write out htpasswd file - name: Write out htpasswd file
htpasswd: > htpasswd: >
name={{ NGINX_HTPASSWD_USER }} name={{ NGINX_HTPASSWD_USER }}
password={{ NGINX_HTPASSWD_PASS }} password={{ NGINX_HTPASSWD_PASS }}
path={{ nginx_htpasswd_file }} path={{ nginx_htpasswd_file }}
when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS
- name: nginx | Create nginx log file location (just in case) - name: Create nginx log file location (just in case)
file: > file: >
path={{ nginx_log_dir}} state=directory path={{ nginx_log_dir}} state=directory
owner={{ common_web_user }} group={{ common_web_user }} owner={{ common_web_user }} group={{ common_web_user }}
- name: nginx | copy ssl cert - name: copy ssl cert
copy: > copy: >
src={{ NGINX_SSL_CERTIFICATE }} src={{ NGINX_SSL_CERTIFICATE }}
dest=/etc/ssl/certs/{{ item|basename }} dest=/etc/ssl/certs/{{ item|basename }}
owner=root group=root mode=0644 owner=root group=root mode=0644
when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem' when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem'
- name: nginx | copy ssl key - name: copy ssl key
copy: > copy: >
src={{ NGINX_SSL_KEY }} src={{ NGINX_SSL_KEY }}
dest=/etc/ssl/private/{{ item|basename }} dest=/etc/ssl/private/{{ item|basename }}
...@@ -91,18 +91,18 @@ ...@@ -91,18 +91,18 @@
# removing default link # removing default link
- name: nginx | Removing default nginx config and restart (enabled) - name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: nginx | reload nginx notify: reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good # Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
- name: nginx | Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2 dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2
owner=root group=root mode=644 owner=root group=root mode=644
- name: nginx | Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2 dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2
owner=root group=root mode=644 owner=root group=root mode=644
...@@ -110,10 +110,10 @@ ...@@ -110,10 +110,10 @@
# If tasks that notify restart nginx don't change the state of the remote system # If tasks that notify restart nginx don't change the state of the remote system
# their corresponding notifications don't get run. If nginx has been stopped for # their corresponding notifications don't get run. If nginx has been stopped for
# any reason, this will ensure that it is started up again. # any reason, this will ensure that it is started up again.
- name: nginx | make sure nginx has started - name: make sure nginx has started
service: name=nginx state=started service: name=nginx state=started
when: start_services when: start_services
- name: nginx | make sure nginx has stopped - name: make sure nginx has stopped
service: name=nginx state=stopped service: name=nginx state=stopped
when: not start_services when: not start_services
--- ---
- name: notifier | restart notifier-scheduler - name: restart notifier-scheduler
supervisorctl_local: > supervisorctl_local: >
name=notifier-scheduler name=notifier-scheduler
state=restarted state=restarted
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
- name: notifier | restart notifier-celery-workers - name: restart notifier-celery-workers
supervisorctl_local: > supervisorctl_local: >
name=notifier-celery-workers name=notifier-celery-workers
state=restarted state=restarted
......
--- ---
- name: notifier | checkout code - name: checkout code
git: git:
dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }} dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }}
version={{ NOTIFIER_VERSION }} version={{ NOTIFIER_VERSION }}
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
- name: notifier | source repo group perms - name: source repo group perms
file: file:
path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory
- name: notifier | install application requirements - name: install application requirements
pip: pip:
requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}" requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
# Syncdb for whatever reason always creates the file owned by www-data:www-data, and then # Syncdb for whatever reason always creates the file owned by www-data:www-data, and then
# complains it can't write because it's running as notifier. So this is to touch the file into # complains it can't write because it's running as notifier. So this is to touch the file into
# place with proper perms first. # place with proper perms first.
- name: notifier | fix permissions on notifer db file - name: fix permissions on notifer db file
file: > file: >
path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }} path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
mode=0664 mode=0664
sudo: true sudo: true
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
tags: tags:
- deploy - deploy
- name: notifier | syncdb - name: syncdb
shell: > shell: >
cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
environment: notifier_env_vars environment: notifier_env_vars
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
...@@ -17,86 +17,86 @@ ...@@ -17,86 +17,86 @@
# - common # - common
# - notifier # - notifier
# #
- name: notifier | install notifier specific system packages - name: install notifier specific system packages
apt: pkg={{','.join(notifier_debian_pkgs)}} state=present apt: pkg={{','.join(notifier_debian_pkgs)}} state=present
- name: notifier | check if incommon ca is installed - name: check if incommon ca is installed
command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt
register: incommon_present register: incommon_present
ignore_errors: yes ignore_errors: yes
- name: common | create incommon ca directory - name: create incommon ca directory
file: file:
path="/usr/share/ca-certificates/incommon" mode=2775 state=directory path="/usr/share/ca-certificates/incommon" mode=2775 state=directory
when: incommon_present|failed when: incommon_present|failed
- name: common | retrieve incommon server CA - name: retrieve incommon server CA
shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt
when: incommon_present|failed when: incommon_present|failed
- name: common | add InCommon ca cert - name: add InCommon ca cert
lineinfile: lineinfile:
dest=/etc/ca-certificates.conf dest=/etc/ca-certificates.conf
regexp='incommon/InCommonServerCA.crt' regexp='incommon/InCommonServerCA.crt'
line='incommon/InCommonServerCA.crt' line='incommon/InCommonServerCA.crt'
- name: common | update ca certs globally - name: update ca certs globally
shell: update-ca-certificates shell: update-ca-certificates
- name: notifier | create notifier user {{ NOTIFIER_USER }} - name: create notifier user {{ NOTIFIER_USER }}
user: user:
name={{ NOTIFIER_USER }} state=present shell=/bin/bash name={{ NOTIFIER_USER }} state=present shell=/bin/bash
home={{ NOTIFIER_HOME }} createhome=yes home={{ NOTIFIER_HOME }} createhome=yes
- name: notifier | setup the notifier env - name: setup the notifier env
template: template:
src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env
owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}" owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}"
- name: notifier | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ NOTIFIER_HOME }}/.bash_profile dest={{ NOTIFIER_HOME }}/.bash_profile
owner={{ NOTIFIER_USER }} owner={{ NOTIFIER_USER }}
group={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ NOTIFIER_HOME }}/.bashrc shell: touch {{ NOTIFIER_HOME }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
- name: notifier | add source of notifier_env to .bashrc - name: add source of notifier_env to .bashrc
lineinfile: lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_HOME }}/notifier_env' regexp='. {{ NOTIFIER_HOME }}/notifier_env'
line='. {{ NOTIFIER_HOME }}/notifier_env' line='. {{ NOTIFIER_HOME }}/notifier_env'
- name: notifier | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate' regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ NOTIFIER_VENV_DIR }}/bin/activate' line='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: notifier | create notifier DB directory - name: create notifier DB directory
file: file:
path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }} path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
- name: notifier | create notifier/bin directory - name: create notifier/bin directory
file: file:
path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }} path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | supervisord config for celery workers - name: supervisord config for celery workers
template: > template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf" dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-celery-workers notify: restart notifier-celery-workers
- name: notifier | supervisord config for scheduler - name: supervisord config for scheduler
template: > template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf" dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-scheduler notify: restart notifier-scheduler
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
--- ---
- name: ora | restart ora - name: restart ora
supervisorctl_local: > supervisorctl_local: >
name=ora name=ora
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
state=restarted state=restarted
when: start_services and ora_installed is defined and not devstack when: start_services and ora_installed is defined and not devstack
- name: ora | restart ora_celery - name: restart ora_celery
supervisorctl_local: > supervisorctl_local: >
name=ora_celery name=ora_celery
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
- name: ora | create supervisor scripts - ora, ora_celery - name: create supervisor scripts - ora, ora_celery
template: > template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: ['ora', 'ora_celery'] with_items: ['ora', 'ora_celery']
when: not devstack when: not devstack
- include: ease.yml - include: ease.yml
- name: ora | create ora application config - name: create ora application config
template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
- name: ora | create ora auth file - name: create ora auth file
template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
- name: ora | setup the ora env - name: setup the ora env
notify: notify:
- "ora | restart ora" - "restart ora"
- "ora | restart ora_celery" - "restart ora_celery"
template: > template: >
src=ora_env.j2 dest={{ ora_app_dir }}/ora_env src=ora_env.j2 dest={{ ora_app_dir }}/ora_env
owner={{ ora_user }} group={{ common_web_user }} owner={{ ora_user }} group={{ common_web_user }}
mode=0644 mode=0644
# Do A Checkout # Do A Checkout
- name: ora | git checkout ora repo into {{ ora_app_dir }} - name: git checkout ora repo into {{ ora_app_dir }}
git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }} git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }}
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
# TODO: Check git.py _run_if_changed() to see if the logic there to skip running certain # TODO: Check git.py _run_if_changed() to see if the logic there to skip running certain
# portions of the deploy needs to be incorporated here. # portions of the deploy needs to be incorporated here.
# Install the python pre requirements into {{ ora_venv_dir }} # Install the python pre requirements into {{ ora_venv_dir }}
- name: ora | install python pre-requirements - name: install python pre-requirements
pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
# Install the python post requirements into {{ ora_venv_dir }} # Install the python post requirements into {{ ora_venv_dir }}
- name: ora | install python post-requirements - name: install python post-requirements
pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
#Needed if using redis to prevent memory issues #Needed if using redis to prevent memory issues
- name: ora | change memory commit settings -- needed for redis - name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1 command: sysctl vm.overcommit_memory=1
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | syncdb and migrate - name: syncdb and migrate
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}} shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | create users - name: create users
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}} shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
...@@ -83,13 +83,13 @@ ...@@ -83,13 +83,13 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: ora | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
when: start_services and not devstack when: start_services and not devstack
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: ora | ensure ora is started - name: ensure ora is started
supervisorctl_local: > supervisorctl_local: >
name=ora name=ora
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
state=started state=started
when: start_services and not devstack when: start_services and not devstack
- name: ora | ensure ora_celery is started - name: ensure ora_celery is started
supervisorctl_local: > supervisorctl_local: >
name=ora_celery name=ora_celery
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
# Do A Checkout # Do A Checkout
- name: ora | git checkout ease repo into its base dir - name: git checkout ease repo into its base dir
git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}} git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}}
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | install ease system packages - name: install ease system packages
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
with_items: ora_ease_debian_pkgs with_items: ora_ease_debian_pkgs
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
# Install the python pre requirements into {{ ora_ease_venv_dir }} # Install the python pre requirements into {{ ora_ease_venv_dir }}
- name: ora | install ease python pre-requirements - name: install ease python pre-requirements
pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
# Install the python post requirements into {{ ora_ease_venv_dir }} # Install the python post requirements into {{ ora_ease_venv_dir }}
- name: ora | install ease python post-requirements - name: install ease python post-requirements
pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | install ease python package - name: install ease python package
shell: > shell: >
. {{ ora_ease_venv_dir }}/bin/activate; cd {{ ora_ease_code_dir }}; python setup.py install . {{ ora_ease_venv_dir }}/bin/activate; cd {{ ora_ease_code_dir }}; python setup.py install
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | download and install nltk - name: download and install nltk
shell: | shell: |
set -e set -e
curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }} curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }}
...@@ -49,5 +49,5 @@ ...@@ -49,5 +49,5 @@
chdir={{ ora_data_dir }} chdir={{ ora_data_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
...@@ -3,49 +3,49 @@ ...@@ -3,49 +3,49 @@
# - common/tasks/main.yml # - common/tasks/main.yml
--- ---
- name: ora | create application user - name: create application user
user: > user: >
name="{{ ora_user }}" home="{{ ora_app_dir }}" name="{{ ora_user }}" home="{{ ora_app_dir }}"
createhome=no shell=/bin/false createhome=no shell=/bin/false
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | create ora app dir - name: create ora app dir
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ ora_user }}" group="{{ common_web_group }}" owner="{{ ora_user }}" group="{{ common_web_group }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: with_items:
- "{{ ora_venvs_dir }}" - "{{ ora_venvs_dir }}"
- "{{ ora_app_dir }}" - "{{ ora_app_dir }}"
- name: ora | create ora data dir, owned by {{ common_web_user }} - name: create ora data dir, owned by {{ common_web_user }}
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ common_web_group }}" owner="{{ common_web_user }}" group="{{ common_web_group }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: with_items:
- "{{ ora_data_dir }}" - "{{ ora_data_dir }}"
- "{{ ora_data_course_dir }}" - "{{ ora_data_course_dir }}"
- "{{ ora_app_dir }}/ml_models" - "{{ ora_app_dir }}/ml_models"
- name: ora | install debian packages that ora needs - name: install debian packages that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: ora_debian_pkgs with_items: ora_debian_pkgs
- name: ora | install debian packages for ease that ora needs - name: install debian packages for ease that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: ora_ease_debian_pkgs with_items: ora_ease_debian_pkgs
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# - common # - common
# - oraclejdk # - oraclejdk
- name: oraclejdk | check for Oracle Java version {{ oraclejdk_base }} - name: check for Oracle Java version {{ oraclejdk_base }}
command: test -d /usr/lib/jvm/{{ oraclejdk_base }} command: test -d /usr/lib/jvm/{{ oraclejdk_base }}
ignore_errors: true ignore_errors: true
register: oraclejdk_present register: oraclejdk_present
- name: oraclejdk | download Oracle Java - name: download Oracle Java
shell: > shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }} curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }}
executable=/bin/bash executable=/bin/bash
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
creates=/var/tmp/{{ oraclejdk_file }} creates=/var/tmp/{{ oraclejdk_file }}
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | install Oracle Java - name: install Oracle Java
shell: > shell: >
mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }} mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
creates=/usr/lib/jvm/{{ oraclejdk_base }} creates=/usr/lib/jvm/{{ oraclejdk_base }}
...@@ -34,10 +34,10 @@ ...@@ -34,10 +34,10 @@
sudo: true sudo: true
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | create symlink expected by elasticsearch - name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | add JAVA_HOME for Oracle Java - name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755 template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed when: oraclejdk_present|failed
...@@ -3,80 +3,80 @@ ...@@ -3,80 +3,80 @@
# There is a bug with initializing multiple nodes in the HA cluster at once # There is a bug with initializing multiple nodes in the HA cluster at once
# http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html # http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html
- name: rabbitmq | trust rabbit repository - name: trust rabbit repository
apt_key: url={{rabbitmq_apt_key}} state=present apt_key: url={{rabbitmq_apt_key}} state=present
- name: rabbitmq | install python-software-properties if debian - name: install python-software-properties if debian
apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present
- name: rabbitmq | add rabbit repository - name: add rabbit repository
apt_repository: repo="{{rabbitmq_repository}}" state=present apt_repository: repo="{{rabbitmq_repository}}" state=present
- name: rabbitmq | install rabbitmq - name: install rabbitmq
apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes
- name: rabbitmq | stop rabbit cluster - name: stop rabbit cluster
service: name=rabbitmq-server state=stopped service: name=rabbitmq-server state=stopped
# in case there are lingering processes, ignore errors # in case there are lingering processes, ignore errors
# silently # silently
- name: rabbitmq | send sigterm to any running rabbitmq processes - name: send sigterm to any running rabbitmq processes
shell: pkill -u rabbitmq || true shell: pkill -u rabbitmq || true
# Defaulting to /var/lib/rabbitmq # Defaulting to /var/lib/rabbitmq
- name: rabbitmq | create cookie directory - name: create cookie directory
file: > file: >
path={{rabbitmq_cookie_dir}} path={{rabbitmq_cookie_dir}}
owner=rabbitmq group=rabbitmq mode=0755 state=directory owner=rabbitmq group=rabbitmq mode=0755 state=directory
- name: rabbitmq | add rabbitmq erlang cookie - name: add rabbitmq erlang cookie
template: > template: >
src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}} src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}}
owner=rabbitmq group=rabbitmq mode=0400 owner=rabbitmq group=rabbitmq mode=0400
register: erlang_cookie register: erlang_cookie
# Defaulting to /etc/rabbitmq # Defaulting to /etc/rabbitmq
- name: rabbitmq | create rabbitmq config directory - name: create rabbitmq config directory
file: > file: >
path={{rabbitmq_config_dir}} path={{rabbitmq_config_dir}}
owner=root group=root mode=0755 state=directory owner=root group=root mode=0755 state=directory
- name: rabbitmq | add rabbitmq environment configuration - name: add rabbitmq environment configuration
template: > template: >
src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf
owner=root group=root mode=0644 owner=root group=root mode=0644
- name: rabbitmq | add rabbitmq cluster configuration - name: add rabbitmq cluster configuration
template: > template: >
src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config
owner=root group=root mode=0644 owner=root group=root mode=0644
register: cluster_configuration register: cluster_configuration
- name: rabbitmq | install plugins - name: install plugins
rabbitmq_plugin: rabbitmq_plugin:
names={{",".join(rabbitmq_plugins)}} state=enabled names={{",".join(rabbitmq_plugins)}} state=enabled
# When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'. # When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'.
# This folder should be deleted before clustering is setup because it retains data # This folder should be deleted before clustering is setup because it retains data
# that can conflict with the clustering information. # that can conflict with the clustering information.
- name: rabbitmq | remove mnesia configuration - name: remove mnesia configuration
file: path={{rabbitmq_mnesia_folder}} state=absent file: path={{rabbitmq_mnesia_folder}} state=absent
when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh
- name: rabbitmq | start rabbit nodes - name: start rabbit nodes
service: name=rabbitmq-server state=restarted service: name=rabbitmq-server state=restarted
- name: rabbitmq | wait for rabbit to start - name: wait for rabbit to start
wait_for: port={{ rabbitmq_management_port }} delay=2 wait_for: port={{ rabbitmq_management_port }} delay=2
- name: rabbitmq | remove guest user - name: remove guest user
rabbitmq_user: user="guest" state=absent rabbitmq_user: user="guest" state=absent
- name: rabbitmq | add vhosts - name: add vhosts
rabbitmq_vhost: name={{ item }} state=present rabbitmq_vhost: name={{ item }} state=present
with_items: RABBITMQ_VHOSTS with_items: RABBITMQ_VHOSTS
- name: rabbitmq | add admin users - name: add admin users
rabbitmq_user: > rabbitmq_user: >
user='{{item[0].name}}' password='{{item[0].password}}' user='{{item[0].name}}' password='{{item[0].password}}'
read_priv='.*' write_priv='.*' read_priv='.*' write_priv='.*'
...@@ -87,23 +87,23 @@ ...@@ -87,23 +87,23 @@
- RABBITMQ_VHOSTS - RABBITMQ_VHOSTS
when: "'admins' in rabbitmq_auth_config" when: "'admins' in rabbitmq_auth_config"
- name: rabbitmq | make queues mirrored - name: make queues mirrored
shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'" shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'"
when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1 when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1
# #
# Depends upon the management plugin # Depends upon the management plugin
# #
- name: rabbitmq | install admin tools - name: install admin tools
get_url: > get_url: >
url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin
dest=/usr/local/bin/rabbitmqadmin dest=/usr/local/bin/rabbitmqadmin
- name: rabbitmq | ensure rabbitmqadmin attributes - name: ensure rabbitmqadmin attributes
file: > file: >
path=/usr/local/bin/rabbitmqadmin owner=root path=/usr/local/bin/rabbitmqadmin owner=root
group=root mode=0655 group=root mode=0655
- name: rabbitmq | stop rabbit nodes - name: stop rabbit nodes
service: name=rabbitmq-server state=restarted service: name=rabbitmq-server state=restarted
when: not start_services when: not start_services
...@@ -34,95 +34,95 @@ ...@@ -34,95 +34,95 @@
- fail: rbenv_ruby_version required for role - fail: rbenv_ruby_version required for role
when: rbenv_ruby_version is not defined when: rbenv_ruby_version is not defined
- name: rbenv | create rbenv user {{ rbenv_user }} - name: create rbenv user {{ rbenv_user }}
user: > user: >
name={{ rbenv_user }} home={{ rbenv_dir }} name={{ rbenv_user }} home={{ rbenv_dir }}
shell=/bin/false createhome=no shell=/bin/false createhome=no
when: rbenv_user != common_web_user when: rbenv_user != common_web_user
- name: rbenv | create rbenv dir if it does not exist - name: create rbenv dir if it does not exist
file: > file: >
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}" path="{{ rbenv_dir }}" owner="{{ rbenv_user }}"
state=directory state=directory
- name: rbenv | install build depends - name: install build depends
apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no
with_items: rbenv_debian_pkgs with_items: rbenv_debian_pkgs
- name: rbenv | update rbenv repo - name: update rbenv repo
git: > git: >
repo=https://github.com/sstephenson/rbenv.git repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }} dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | ensure ruby_env exists - name: ensure ruby_env exists
template: > template: >
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | check ruby-build installed - name: check ruby-build installed
command: test -x /usr/local/bin/ruby-build command: test -x /usr/local/bin/ruby-build
register: rbuild_present register: rbuild_present
ignore_errors: yes ignore_errors: yes
- name: rbenv | if ruby-build exists, which versions we can install - name: if ruby-build exists, which versions we can install
command: /usr/local/bin/ruby-build --definitions command: /usr/local/bin/ruby-build --definitions
when: rbuild_present|success when: rbuild_present|success
register: installable_ruby_vers register: installable_ruby_vers
ignore_errors: yes ignore_errors: yes
### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version ### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version
- name: rbenv | create temporary directory - name: create temporary directory
command: mktemp -d command: mktemp -d
register: tempdir register: tempdir
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | clone ruby-build repo - name: clone ruby-build repo
git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | install ruby-build - name: install ruby-build
command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | remove temporary directory - name: remove temporary directory
file: path={{ tempdir.stdout }} state=absent file: path={{ tempdir.stdout }} state=absent
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | check ruby {{ rbenv_ruby_version }} installed - name: check ruby {{ rbenv_ruby_version }} installed
shell: "rbenv versions | grep {{ rbenv_ruby_version }}" shell: "rbenv versions | grep {{ rbenv_ruby_version }}"
register: ruby_installed register: ruby_installed
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
ignore_errors: yes ignore_errors: yes
- name: rbenv | install ruby {{ rbenv_ruby_version }} - name: install ruby {{ rbenv_ruby_version }}
shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}" shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}"
when: ruby_installed|failed when: ruby_installed|failed
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | set global ruby {{ rbenv_ruby_version }} - name: set global ruby {{ rbenv_ruby_version }}
shell: "rbenv global {{ rbenv_ruby_version }}" shell: "rbenv global {{ rbenv_ruby_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | install bundler - name: install bundler
shell: "gem install bundler -v {{ rbenv_bundler_version }}" shell: "gem install bundler -v {{ rbenv_bundler_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | remove rbenv version of rake - name: remove rbenv version of rake
file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent
- name: rbenv | install rake gem - name: install rake gem
shell: "gem install rake -v {{ rbenv_rake_version }}" shell: "gem install rake -v {{ rbenv_rake_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | rehash - name: rehash
shell: "rbenv rehash" shell: "rbenv rehash"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
...@@ -25,17 +25,17 @@ ...@@ -25,17 +25,17 @@
# #
# The role would need to include tasks like the following # The role would need to include tasks like the following
# #
# - name: my_role | create s3fs mount points # - name: create s3fs mount points
# file: # file:
# path={{ item.mount_point }} owner={{ item.owner }} # path={{ item.mount_point }} owner={{ item.owner }}
# group={{ item.group }} mode={{ item.mode }} state="directory" # group={{ item.group }} mode={{ item.mode }} state="directory"
# with_items: "{{ my_role_s3fs_mounts }}" # with_items: my_role_s3fs_mounts
# #
# - name: my_role | mount s3 buckets # - name: mount s3 buckets
# mount: # mount:
# name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs # name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs
# opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted # opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted
# with_items: "{{ myrole_s3fs_mounts }}" # with_items: myrole_s3fs_mounts
# #
# Example play: # Example play:
# #
...@@ -53,37 +53,37 @@ ...@@ -53,37 +53,37 @@
# - s3fs # - s3fs
# #
- name: s3fs | install system packages - name: install system packages
apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present
tags: tags:
- s3fs - s3fs
- install - install
- update - update
- name: s3fs | fetch package - name: fetch package
get_url: get_url:
url={{ s3fs_download_url }} url={{ s3fs_download_url }}
dest={{ s3fs_temp_dir }} dest={{ s3fs_temp_dir }}
- name: s3fs | extract package - name: extract package
shell: shell:
/bin/tar -xzf {{ s3fs_archive }} /bin/tar -xzf {{ s3fs_archive }}
chdir={{ s3fs_temp_dir }} chdir={{ s3fs_temp_dir }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure
- name: s3fs | configure - name: configure
shell: shell:
./configure ./configure
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status
- name: s3fs | make - name: make
shell: shell:
/usr/bin/make /usr/bin/make
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd
- name: s3fs | make install - name: make install
shell: shell:
/usr/bin/make install /usr/bin/make install
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
......
--- ---
- name: shibboleth | restart shibd - name: restart shibd
service: name=shibd state=restarted service: name=shibd state=restarted
#Install shibboleth #Install shibboleth
--- ---
- name: shibboleth | Installs shib and dependencies from apt - name: Installs shib and dependencies from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items: with_items:
- shibboleth-sp2-schemas - shibboleth-sp2-schemas
...@@ -9,46 +9,46 @@ ...@@ -9,46 +9,46 @@
- libshibsp-doc - libshibsp-doc
- libapache2-mod-shib2 - libapache2-mod-shib2
- opensaml2-tools - opensaml2-tools
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | Creates /etc/shibboleth/metadata directory - name: Creates /etc/shibboleth/metadata directory
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | Downloads metadata into metadata directory as backup - name: Downloads metadata into metadata directory as backup
get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | writes out key and pem file - name: writes out key and pem file
template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600 template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600
with_items: with_items:
- key - key
- pem - pem
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | writes out configuration files - name: writes out configuration files
template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644 template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
with_items: with_items:
- attribute-map.xml - attribute-map.xml
- shibboleth2.xml - shibboleth2.xml
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | enables shib - name: enables shib
command: a2enmod shib2 command: a2enmod shib2
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
......
...@@ -16,5 +16,5 @@ ...@@ -16,5 +16,5 @@
# #
# Restart Splunk # Restart Splunk
- name: splunkforwarder | restart splunkforwarder - name: restart splunkforwarder
service: name=splunk state=restarted service: name=splunk state=restarted
...@@ -22,83 +22,83 @@ ...@@ -22,83 +22,83 @@
# #
# Install Splunk Forwarder # Install Splunk Forwarder
- name: splunkforwarder| install splunkforwarder specific system packages - name: install splunkforwarder specific system packages
apt: pkg={{','.join(splunk_debian_pkgs)}} state=present apt: pkg={{','.join(splunk_debian_pkgs)}} state=present
tags: tags:
- splunk - splunk
- install - install
- update - update
- name: splunkforwarder | download the splunk deb - name: download the splunk deb
get_url: > get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}" dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}" url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}"
register: download_deb register: download_deb
- name: splunkforwarder | install splunk forwarder - name: install splunk forwarder
shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}} shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}}
when: download_deb.changed when: download_deb.changed
# Create splunk user # Create splunk user
- name: splunkforwarder | create splunk user - name: create splunk user
user: name=splunk createhome=no state=present append=yes groups=syslog user: name=splunk createhome=no state=present append=yes groups=syslog
when: download_deb.changed when: download_deb.changed
# Need to start splunk manually so that it can create various files # Need to start splunk manually so that it can create various files
# and directories that aren't created till the first run and are needed # and directories that aren't created till the first run and are needed
# to run some of the below commands. # to run some of the below commands.
- name: splunkforwarder | start splunk manually - name: start splunk manually
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{splunkforwarder_output_dir}}/var/lib/splunk creates={{splunkforwarder_output_dir}}/var/lib/splunk
when: download_deb.changed when: download_deb.changed
register: started_manually register: started_manually
- name: splunkforwarder | stop splunk manually - name: stop splunk manually
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt
when: download_deb.changed and started_manually.changed when: download_deb.changed and started_manually.changed
- name: splunkforwarder | create boot script - name: create boot script
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk creates=/etc/init.d/splunk
register: create_boot_script register: create_boot_script
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
# Update credentials # Update credentials
- name: splunkforwarder | update admin pasword - name: update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt" shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
- name: splunkforwarder | add chkconfig to init script - name: add chkconfig to init script
shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk' shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk'
when: download_deb.changed and create_boot_script.changed when: download_deb.changed and create_boot_script.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
# Ensure permissions on splunk content # Ensure permissions on splunk content
- name: splunkforwarder | ensure splunk forder permissions - name: ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
# Drop template files. # Drop template files.
- name: splunkforwarder | drop input configuration - name: drop input configuration
template: template:
src=opt/splunkforwarder/etc/system/local/inputs.conf.j2 src=opt/splunkforwarder/etc/system/local/inputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/inputs.conf dest=/opt/splunkforwarder/etc/system/local/inputs.conf
owner=splunk owner=splunk
group=splunk group=splunk
mode=644 mode=644
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
- name: splunkforwarder | create outputs config file - name: create outputs config file
template: template:
src=opt/splunkforwarder/etc/system/local/outputs.conf.j2 src=opt/splunkforwarder/etc/system/local/outputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/outputs.conf dest=/opt/splunkforwarder/etc/system/local/outputs.conf
owner=splunk owner=splunk
group=splunk group=splunk
mode=644 mode=644
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
...@@ -50,19 +50,19 @@ ...@@ -50,19 +50,19 @@
# supervisor_service: upstart-service-name # supervisor_service: upstart-service-name
# #
--- ---
- name: supervisor | create application user - name: create application user
user: > user: >
name="{{ supervisor_user }}" name="{{ supervisor_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: supervisor | create supervisor service user - name: create supervisor service user
user: > user: >
name="{{ supervisor_service_user }}" name="{{ supervisor_service_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: supervisor | create supervisor directories - name: create supervisor directories
file: > file: >
name={{ item }} name={{ item }}
state=directory state=directory
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
- "{{ supervisor_venv_dir }}" - "{{ supervisor_venv_dir }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
- name: supervisor | create supervisor directories - name: create supervisor directories
file: > file: >
name={{ item }} name={{ item }}
state=directory state=directory
...@@ -84,29 +84,29 @@ ...@@ -84,29 +84,29 @@
- "{{ supervisor_log_dir }}" - "{{ supervisor_log_dir }}"
- name: supervisor | install supervisor in its venv - name: install supervisor in its venv
pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
- name: supervisor | create supervisor upstart job - name: create supervisor upstart job
template: > template: >
src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf
owner=root group=root owner=root group=root
- name: supervisor | create supervisor master config - name: create supervisor master config
template: > template: >
src=supervisord.conf.j2 dest={{ supervisor_cfg }} src=supervisord.conf.j2 dest={{ supervisor_cfg }}
owner={{ supervisor_user }} group={{ supervisor_service_user }} owner={{ supervisor_user }} group={{ supervisor_service_user }}
mode=0644 mode=0644
- name: supervisor | create a symlink for supervisortctl - name: create a symlink for supervisortctl
file: > file: >
src={{ supervisor_ctl }} src={{ supervisor_ctl }}
dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }} dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }}
state=link state=link
when: supervisor_service == "supervisor" when: supervisor_service == "supervisor"
- name: supervisor | create a symlink for supervisor cfg - name: create a symlink for supervisor cfg
file: > file: >
src={{ item }} src={{ item }}
dest={{ COMMON_CFG_DIR }}/{{ item|basename }} dest={{ COMMON_CFG_DIR }}/{{ item|basename }}
...@@ -116,7 +116,7 @@ ...@@ -116,7 +116,7 @@
- "{{ supervisor_cfg }}" - "{{ supervisor_cfg }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
- name: supervisor | start supervisor - name: start supervisor
service: > service: >
name={{supervisor_service}} name={{supervisor_service}}
state=started state=started
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
# calling update on supervisor too soon after it # calling update on supervisor too soon after it
# starts will result in an errror. # starts will result in an errror.
- name: supervisor | wait for web port to be available - name: wait for web port to be available
wait_for: port={{ supervisor_http_bind_port }} timeout=5 wait_for: port={{ supervisor_http_bind_port }} timeout=5
when: start_supervisor.changed when: start_supervisor.changed
...@@ -134,7 +134,7 @@ ...@@ -134,7 +134,7 @@
# we don't use notifications for supervisor because # we don't use notifications for supervisor because
# they don't work well with parameterized roles. # they don't work well with parameterized roles.
# See https://github.com/ansible/ansible/issues/4853 # See https://github.com/ansible/ansible/issues/4853
- name: supervisor | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
...@@ -33,8 +33,8 @@ XQUEUE_AWS_ACCESS_KEY_ID : '' ...@@ -33,8 +33,8 @@ XQUEUE_AWS_ACCESS_KEY_ID : ''
XQUEUE_AWS_SECRET_ACCESS_KEY : '' XQUEUE_AWS_SECRET_ACCESS_KEY : ''
XQUEUE_BASIC_AUTH_USER: 'edx' XQUEUE_BASIC_AUTH_USER: 'edx'
XQUEUE_BASIC_AUTH_PASSWORD: 'edx' XQUEUE_BASIC_AUTH_PASSWORD: 'edx'
XQUEUE_DJANGO_USER: 'lms' XQUEUE_DJANGO_USERS:
XQUEUE_DJANGO_PASSWORD: 'password' lms: 'password'
XQUEUE_RABBITMQ_USER: 'edx' XQUEUE_RABBITMQ_USER: 'edx'
XQUEUE_RABBITMQ_PASS: 'edx' XQUEUE_RABBITMQ_PASS: 'edx'
XQUEUE_RABBITMQ_HOSTNAME: 'localhost' XQUEUE_RABBITMQ_HOSTNAME: 'localhost'
...@@ -61,7 +61,7 @@ xqueue_auth_config: ...@@ -61,7 +61,7 @@ xqueue_auth_config:
AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY
REQUESTS_BASIC_AUTH: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD] REQUESTS_BASIC_AUTH: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD]
USERS: { '{{XQUEUE_DJANGO_USER}}' : $XQUEUE_DJANGO_PASSWORD } USERS: "{{ XQUEUE_DJANGO_USERS }}"
DATABASES: DATABASES:
default: default:
ENGINE: "django.db.backends.mysql" ENGINE: "django.db.backends.mysql"
......
- name: xqueue | restart xqueue - name: restart xqueue
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
- name: "xqueue | writing supervisor scripts - xqueue, xqueue consumer" - name: "writing supervisor scripts - xqueue, xqueue consumer"
template: > template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
with_items: ['xqueue', 'xqueue_consumer'] with_items: ['xqueue', 'xqueue_consumer']
- name: xqueue | create xqueue application config - name: create xqueue application config
template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644 template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | create xqueue auth file - name: create xqueue auth file
template: src=xqueue.auth.json.j2 dest={{ xqueue_app_dir }}/xqueue.auth.json mode=0644 template: src=xqueue.auth.json.j2 dest={{ xqueue_app_dir }}/xqueue.auth.json mode=0644
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
# Do A Checkout # Do A Checkout
- name: xqueue | git checkout xqueue repo into xqueue_code_dir - name: git checkout xqueue repo into xqueue_code_dir
git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }} git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }}
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
# Install the python pre requirements into {{ xqueue_venv_dir }} # Install the python pre requirements into {{ xqueue_venv_dir }}
- name : xqueue | install python pre-requirements - name : install python pre-requirements
pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
# Install the python post requirements into {{ xqueue_venv_dir }} # Install the python post requirements into {{ xqueue_venv_dir }}
- name : xqueue | install python post-requirements - name : install python post-requirements
pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | syncdb and migrate - name: syncdb and migrate
shell: > shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | create users - name: create users
shell: > shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: xqueue | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
when: start_services when: start_services
- name: xqueue | ensure xqueue, consumer is running - name: ensure xqueue, consumer is running
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -6,33 +6,33 @@ ...@@ -6,33 +6,33 @@
# #
# #
- name: xqueue | create application user - name: create application user
user: > user: >
name="{{ xqueue_user }}" name="{{ xqueue_user }}"
home="{{ xqueue_app_dir }}" home="{{ xqueue_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | create xqueue app and venv dir - name: create xqueue app and venv dir
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
owner="{{ xqueue_user }}" owner="{{ xqueue_user }}"
group="{{ common_web_group }}" group="{{ common_web_group }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
with_items: with_items:
- "{{ xqueue_app_dir }}" - "{{ xqueue_app_dir }}"
- "{{ xqueue_venvs_dir }}" - "{{ xqueue_venvs_dir }}"
- name: xqueue | install a bunch of system packages on which xqueue relies - name: install a bunch of system packages on which xqueue relies
apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | create xqueue db - name: create xqueue db
mysql_db: > mysql_db: >
name={{xqueue_auth_config.DATABASES.default.NAME}} name={{xqueue_auth_config.DATABASES.default.NAME}}
login_host={{xqueue_auth_config.DATABASES.default.HOST}} login_host={{xqueue_auth_config.DATABASES.default.HOST}}
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
state=present state=present
encoding=utf8 encoding=utf8
notify: notify:
- xqueue | restart xqueue - restart xqueue
when: xqueue_create_db is defined and xqueue_create_db|lower == "yes" when: xqueue_create_db is defined and xqueue_create_db|lower == "yes"
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# Overview: # Overview:
# #
- name: xserver | restart xserver - name: restart xserver
supervisorctl_local: > supervisorctl_local: >
name=xserver name=xserver
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
- name: "xserver | writing supervisor script" - name: "writing supervisor script"
template: > template: >
src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
- name: xserver | checkout code - name: checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}} git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
- name: xserver | install requirements - name: install requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
- name: xserver | install sandbox requirements - name: install sandbox requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
- name: xserver | create xserver application config - name: create xserver application config
template: src=xserver.env.json.j2 dest={{ xserver_app_dir }}/env.json template: src=xserver.env.json.j2 dest={{ xserver_app_dir }}/env.json
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
- name: xserver | install read-only ssh key for the content repo that is required for grading - name: install read-only ssh key for the content repo that is required for grading
copy: > copy: >
src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }} src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }}
owner={{ xserver_user }} group={{ xserver_user }} mode=0600 owner={{ xserver_user }} group={{ xserver_user }} mode=0600
notify: xserver | restart xserver notify: restart xserver
- name: xserver | upload ssh script - name: upload ssh script
template: > template: >
src=git_ssh.sh.j2 dest=/tmp/git_ssh.sh src=git_ssh.sh.j2 dest=/tmp/git_ssh.sh
owner={{ xserver_user }} mode=750 owner={{ xserver_user }} mode=750
notify: xserver | restart xserver notify: restart xserver
- name: xserver | checkout grader code - name: checkout grader code
git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }} git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }}
environment: environment:
GIT_SSH: /tmp/git_ssh.sh GIT_SSH: /tmp/git_ssh.sh
notify: xserver | restart xserver notify: restart xserver
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
- name: xserver | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ xserver_git_identity }} state=absent file: path={{ xserver_git_identity }} state=absent
notify: xserver | restart xserver notify: restart xserver
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: xserver | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
when: start_services when: start_services
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: xserver | ensure xserver is started - name: ensure xserver is started
supervisorctl_local: > supervisorctl_local: >
name=xserver name=xserver
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
state=started state=started
when: start_services when: start_services
- name: xserver | create a symlink for venv python - name: create a symlink for venv python
file: > file: >
src="{{ xserver_venv_bin }}/{{ item }}" src="{{ xserver_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.xserver dest={{ COMMON_BIN_DIR }}/{{ item }}.xserver
...@@ -74,5 +74,5 @@ ...@@ -74,5 +74,5 @@
- python - python
- pip - pip
- name: xserver | enforce app-armor rules - name: enforce app-armor rules
command: aa-enforce {{ xserver_venv_sandbox_dir }} command: aa-enforce {{ xserver_venv_sandbox_dir }}
...@@ -3,28 +3,28 @@ ...@@ -3,28 +3,28 @@
# access to the edX 6.00x repo which is not public # access to the edX 6.00x repo which is not public
--- ---
- name: xserver | checking for grader info - name: checking for grader info
fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!" fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!"
when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE
- name: xserver | checking for git identity - name: checking for git identity
fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role" fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role"
when: not XSERVER_LOCAL_GIT_IDENTITY when: not XSERVER_LOCAL_GIT_IDENTITY
- name: xserver | create application user - name: create application user
user: > user: >
name="{{ xserver_user }}" name="{{ xserver_user }}"
home="{{ xserver_app_dir }}" home="{{ xserver_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: xserver | create application sandbox user - name: create application sandbox user
user: > user: >
name="{{ xserver_sandbox_user }}" name="{{ xserver_sandbox_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: xserver | create xserver app and data dirs - name: create xserver app and data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -36,27 +36,27 @@ ...@@ -36,27 +36,27 @@
- "{{ xserver_data_dir }}" - "{{ xserver_data_dir }}"
- "{{ xserver_data_dir }}/data" - "{{ xserver_data_dir }}/data"
- name: xserver | create sandbox sudoers file - name: create sandbox sudoers file
template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440 template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440
# Make sure this line is in the common-session file. # Make sure this line is in the common-session file.
- name: xserver | ensure pam-limits module is loaded - name: ensure pam-limits module is loaded
lineinfile: lineinfile:
dest=/etc/pam.d/common-session dest=/etc/pam.d/common-session
regexp="session required pam_limits.so" regexp="session required pam_limits.so"
line="session required pam_limits.so" line="session required pam_limits.so"
- name: xserver | set sandbox limits - name: set sandbox limits
template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf
first_available_file: first_available_file:
- "{{ secure_dir }}/sandbox.conf.j2" - "{{ secure_dir }}/sandbox.conf.j2"
- "sandbox.conf.j2" - "sandbox.conf.j2"
- name: xserver | install system dependencies of xserver - name: install system dependencies of xserver
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
with_items: xserver_debian_pkgs with_items: xserver_debian_pkgs
- name: xserver | load python-sandbox apparmor profile - name: load python-sandbox apparmor profile
template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox
first_available_file: first_available_file:
- "{{ secure_dir }}/files/edx_apparmor_sandbox.j2" - "{{ secure_dir }}/files/edx_apparmor_sandbox.j2"
......
Jinja2==2.7.1 ansible==1.4.4
MarkupSafe==0.18
PyYAML==3.10 PyYAML==3.10
ansible==1.3.2 Jinja2==2.7.2
MarkupSafe==0.18
argparse==1.2.1 argparse==1.2.1
boto==2.10.0 boto==2.23.0
ecdsa==0.10 ecdsa==0.10
paramiko==1.12.0 paramiko==1.12.0
pycrypto==2.6.1 pycrypto==2.6.1
......
...@@ -21,6 +21,16 @@ ...@@ -21,6 +21,16 @@
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -n $WORKSPACE ]]; then
# setup a virtualenv in jenkins
if [[ ! -d ".venv" ]]; then
virtualenv .venv
fi
source .venv/bin/activate
pip install -r requirements.txt
fi
if [[ -z $WORKSPACE ]]; then if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0) dir=$(dirname $0)
source "$dir/ascii-convert.sh" source "$dir/ascii-convert.sh"
...@@ -146,7 +156,12 @@ security_group: $security_group ...@@ -146,7 +156,12 @@ security_group: $security_group
ami: $ami ami: $ami
region: $region region: $region
zone: $zone zone: $zone
instance_tags: '{"environment": "$environment", "github_username": "$github_username", "Name": "$name_tag", "source": "jenkins", "owner": "$BUILD_USER"}' instance_tags:
environment: $environment
github_username: $github_username
Name: $name_tag
source: jenkins
owner: $BUILD_USER
root_ebs_size: $root_ebs_size root_ebs_size: $root_ebs_size
name_tag: $name_tag name_tag: $name_tag
gh_users: gh_users:
...@@ -170,15 +185,10 @@ EOF ...@@ -170,15 +185,10 @@ EOF
fi fi
declare -A deploy declare -A deploy
roles="edxapp forum xqueue xserver ora discern certs"
deploy[edxapp]=$edxapp for role in $roles; do
deploy[forum]=$forum deploy[$role]=${!role}
deploy[xqueue]=$xqueue done
deploy[xserver]=$xserver
deploy[ora]=$ora
deploy[discern]=$discern
deploy[certs]=$certs
# If reconfigure was selected or if starting from an ubuntu 12.04 AMI # If reconfigure was selected or if starting from an ubuntu 12.04 AMI
# run non-deploy tasks for all roles # run non-deploy tasks for all roles
...@@ -188,7 +198,7 @@ if [[ $reconfigure == "true" || $server_type == "ubuntu_12.04" ]]; then ...@@ -188,7 +198,7 @@ if [[ $reconfigure == "true" || $server_type == "ubuntu_12.04" ]]; then
fi fi
# Run deploy tasks for the roles selected # Run deploy tasks for the roles selected
for i in "${!deploy[@]}"; do for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars cat $extra_vars
ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment