Commit 8946de03 by John Jarvis

removing role identifiers for ansible 1.4

parent aa0c2188
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
# #
# #
- name: analytics-server | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
- name: analytics-server | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
...@@ -37,14 +37,14 @@ ...@@ -37,14 +37,14 @@
# - common # - common
# - analytics-server # - analytics-server
# #
- name: analytics-server | install system packages - name: install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | create analytics-server user {{ as_user }} - name: create analytics-server user {{ as_user }}
user: user:
name={{ as_user }} state=present shell=/bin/bash name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes home={{ as_home }} createhome=yes
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
- install - install
- update - update
- name: analytics-server | setup the analytics-server env - name: setup the analytics-server env
template: template:
src=opt/wwc/analytics-server/{{ as_env }}.j2 src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }} dest={{ as_home }}/{{ as_env }}
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
- install - install
- update - update
- name: analytics-server | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile dest={{ as_home }}/.bash_profile
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
# - install # - install
# - update # - update
- name: analytics-server | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc shell: touch {{ as_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
- install - install
- update - update
- name: analytics-server | add source of analytics-server_env to .bashrc - name: add source of analytics-server_env to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env' regexp='. {{ as_home }}/analytics-server_env'
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
- install - install
- update - update
- name: analytics-server | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate' regexp='. {{ as_venv_dir }}/bin/activate'
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
- install - install
- update - update
- name: analytics-server | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: as_pip_pkgs with_items: as_pip_pkgs
tags: tags:
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
- install - install
- update - update
- name: analytics-server | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
- install - install
- update - update
- name: analytics-server | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
# #
# #
- name: analytics | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
- name: analytics | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
...@@ -37,14 +37,14 @@ ...@@ -37,14 +37,14 @@
# - common # - common
# - analytics # - analytics
# #
- name: analytics | install system packages - name: install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | create analytics user {{ analytics_user }} - name: create analytics user {{ analytics_user }}
user: user:
name={{ analytics_user }} state=present shell=/bin/bash name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes home={{ analytics_home }} createhome=yes
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
- install - install
- update - update
- name: analytics | setup the analytics env - name: setup the analytics env
template: template:
src=opt/wwc/analytics/{{ analytics_env }}.j2 src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }} dest={{ analytics_home }}/{{ analytics_env }}
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
- install - install
- update - update
- name: analytics | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile dest={{ analytics_home }}/.bash_profile
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
# - install # - install
# - update # - update
- name: analytics | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc shell: touch {{ analytics_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ analytics_user }}" sudo_user: "{{ analytics_user }}"
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
- install - install
- update - update
- name: analytics | add source of analytics_env to .bashrc - name: add source of analytics_env to .bashrc
lineinfile: lineinfile:
dest={{ analytics_home }}/.bashrc dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env' regexp='. {{ analytics_home }}/analytics_env'
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
- install - install
- update - update
- name: analytics | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ analytics_home }}/.bashrc dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate' regexp='. {{ analytics_venv_dir }}/bin/activate'
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
- install - install
- update - update
- name: analytics | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: analytics_pip_pkgs with_items: analytics_pip_pkgs
tags: tags:
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
- install - install
- update - update
- name: analytics | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
- install - install
- update - update
- name: analytics | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
......
--- ---
- name: ansible-role | check if the role exists - name: check if the role exists
command: test -d roles/{{ role_name }} command: test -d roles/{{ role_name }}
register: role_exists register: role_exists
ignore_errors: yes ignore_errors: yes
- name: ansible-role | prompt for overwrite - name: prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort." pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success when: role_exists | success
- name: ansible-role | create role directories - name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory file: path=roles/{{role_name}}/{{ item }} state=directory
with_items: with_items:
- tasks - tasks
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
- templates - templates
- files - files
- name: ansible-role | make an ansible role - name: make an ansible role
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items: with_items:
- tasks - tasks
......
--- ---
- name: apache | restart apache - name: restart apache
service: name=apache2 state=restarted service: name=apache2 state=restarted
#Installs apache and runs the lms wsgi #Installs apache and runs the lms wsgi
--- ---
- name: apache | Installs apache and mod_wsgi from apt - name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items: with_items:
- apache2 - apache2
...@@ -11,21 +11,21 @@ ...@@ -11,21 +11,21 @@
- apache - apache
- install - install
- name: apache | disables default site - name: disables default site
command: a2dissite 000-default command: a2dissite 000-default
notify: apache | restart apache notify: apache | restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | rewrite apache ports conf - name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: apache | restart apache notify: apache | restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | Register the fact that apache role has run - name: Register the fact that apache role has run
command: echo True command: echo True
register: apache_role_run register: apache_role_run
tags: tags:
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
- fail: automated_sudoers_dest required for role - fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined when: automated_sudoers_dest is not defined
- name: automated | create automated user - name: create automated user
user: user:
name={{ automated_user }} state=present shell=/bin/rbash name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes home={{ automated_home }} createhome=yes
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
- install - install
- update - update
- name: automated | create sudoers file from file - name: create sudoers file from file
copy: copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }} dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root" src={{ automated_sudoers_file }} owner="root"
...@@ -77,7 +77,7 @@ ...@@ -77,7 +77,7 @@
- install - install
- update - update
- name: automated | create sudoers file from template - name: create sudoers file from template
template: template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }} dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root" src={{ automated_sudoers_template }} owner="root"
...@@ -92,7 +92,7 @@ ...@@ -92,7 +92,7 @@
# Prevent user from updating their PATH and # Prevent user from updating their PATH and
# environment. # environment.
# #
- name: automated | update shell file mode - name: update shell file mode
file: file:
path={{ automated_home }}/{{ item }} mode=0640 path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }} state=file owner="root" group={{ automated_user }}
...@@ -105,7 +105,7 @@ ...@@ -105,7 +105,7 @@
- .profile - .profile
- .bash_logout - .bash_logout
- name: automated | change ~automated ownership - name: change ~automated ownership
file: file:
path={{ automated_home }} mode=0750 state=directory path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }} owner="root" group={{ automated_user }}
...@@ -119,7 +119,7 @@ ...@@ -119,7 +119,7 @@
# and that links that were remove from the role are # and that links that were remove from the role are
# removed. # removed.
# #
- name: automated | remove ~automated/bin directory - name: remove ~automated/bin directory
file: file:
path={{ automated_home }}/bin state=absent path={{ automated_home }}/bin state=absent
ignore_errors: yes ignore_errors: yes
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
- install - install
- update - update
- name: automated | create ~automated/bin directory - name: create ~automated/bin directory
file: file:
path={{ automated_home }}/bin state=directory mode=0750 path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }} owner="root" group={{ automated_user }}
...@@ -137,7 +137,7 @@ ...@@ -137,7 +137,7 @@
- install - install
- update - update
- name: automated | re-write .profile - name: re-write .profile
copy: copy:
src=home/automator/.profile src=home/automator/.profile
dest={{ automated_home }}/.profile dest={{ automated_home }}/.profile
...@@ -149,7 +149,7 @@ ...@@ -149,7 +149,7 @@
- install - install
- update - update
- name: automated | re-write .bashrc - name: re-write .bashrc
copy: copy:
src=home/automator/.bashrc src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc dest={{ automated_home }}/.bashrc
...@@ -161,7 +161,7 @@ ...@@ -161,7 +161,7 @@
- install - install
- update - update
- name: automated | create .ssh directory - name: create .ssh directory
file: file:
path={{ automated_home }}/.ssh state=directory mode=0700 path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }} owner={{ automated_user }} group={{ automated_user }}
...@@ -170,7 +170,7 @@ ...@@ -170,7 +170,7 @@
- install - install
- update - update
- name: automated | copy key to .ssh/authorized_keys - name: copy key to .ssh/authorized_keys
copy: copy:
src=home/automator/.ssh/authorized_keys src=home/automator/.ssh/authorized_keys
dest={{ automated_home }}/.ssh/authorized_keys mode=0600 dest={{ automated_home }}/.ssh/authorized_keys mode=0600
...@@ -180,7 +180,7 @@ ...@@ -180,7 +180,7 @@
- install - install
- update - update
- name: automated | create allowed command links - name: create allowed command links
file: file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }} src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link state=link
......
# Install browsers required to run the JavaScript # Install browsers required to run the JavaScript
# and acceptance test suite locally without a display # and acceptance test suite locally without a display
--- ---
- name: browsers | install system packages - name: install system packages
apt: pkg={{','.join(browser_deb_pkgs)}} apt: pkg={{','.join(browser_deb_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
- name: browsers | download browser debian packages from S3 - name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}" get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb register: download_deb
with_items: "{{ browser_s3_deb_pkgs }}" with_items: "{{ browser_s3_deb_pkgs }}"
- name: browsers | install browser debian packages - name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }} shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed when: download_deb.changed
with_items: "{{ browser_s3_deb_pkgs }}" with_items: "{{ browser_s3_deb_pkgs }}"
- name: browsers | Install ChromeDriver - name: Install ChromeDriver
get_url: get_url:
url={{ chromedriver_url }} url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: browsers | Install ChromeDriver 2 - name: Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp chdir=/var/tmp
- name: browsers | Install ChromeDriver 3 - name: Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4 - name: Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755 file: path=/usr/local/bin/chromedriver mode=0755
- name: browsers | create xvfb upstart script - name: create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: browsers | start xvfb - name: start xvfb
shell: start xvfb shell: start xvfb
ignore_errors: yes ignore_errors: yes
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# Overview: # Overview:
# #
- name: certs | restart certs - name: restart certs
supervisorctl_local: > supervisorctl_local: >
name=certs name=certs
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!" fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY when: not CERTS_LOCAL_GIT_IDENTITY
- name: certs | create application user - name: create application user
user: > user: >
name="{{ certs_user }}" name="{{ certs_user }}"
home="{{ certs_app_dir }}" home="{{ certs_app_dir }}"
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
shell=/bin/false shell=/bin/false
notify: certs | restart certs notify: certs | restart certs
- name: certs | create certs app and data dirs - name: create certs app and data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -54,14 +54,14 @@ ...@@ -54,14 +54,14 @@
- "{{ certs_app_dir }}" - "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}" - "{{ certs_venvs_dir }}"
- name: certs | create certs gpg dir - name: create certs gpg dir
file: > file: >
path="{{ certs_gpg_dir }}" state=directory path="{{ certs_gpg_dir }}" state=directory
owner="{{ common_web_user }}" owner="{{ common_web_user }}"
mode=0700 mode=0700
notify: certs | restart certs notify: certs | restart certs
- name: certs | copy the private gpg signing key - name: copy the private gpg signing key
copy: > copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }} src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
...@@ -70,7 +70,7 @@ ...@@ -70,7 +70,7 @@
register: certs_gpg_key register: certs_gpg_key
- name: certs | load the gpg key - name: load the gpg key
shell: > shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} /usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
......
--- ---
- name: common | restart rsyslogd - name: restart rsyslogd
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
sudo: True sudo: True
--- ---
- name: common | Add user www-data - name: Add user www-data
# This is the default user for nginx # This is the default user for nginx
user: > user: >
name="{{ common_web_user }}" name="{{ common_web_user }}"
shell=/bin/false shell=/bin/false
- name: common | Create common directories - name: Create common directories
file: > file: >
path={{ item }} state=directory owner=root path={{ item }} state=directory owner=root
group=root mode=0755 group=root mode=0755
...@@ -16,57 +16,57 @@ ...@@ -16,57 +16,57 @@
- "{{ COMMON_CFG_DIR }}" - "{{ COMMON_CFG_DIR }}"
# Need to install python-pycurl to use Ansible's apt_repository module # Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl - name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes apt: pkg=python-pycurl state=present update_cache=yes
# Ensure that we get a current version of Git # Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later # GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors # https://help.github.com/articles/https-cloning-errors
- name: common | Add git apt repository - name: Add git apt repository
apt_repository: repo="{{ common_git_ppa }}" apt_repository: repo="{{ common_git_ppa }}"
- name: common | Install role-independent useful system packages - name: Install role-independent useful system packages
# do this before log dir setup; rsyslog package guarantees syslog user present # do this before log dir setup; rsyslog package guarantees syslog user present
apt: > apt: >
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes state=present update_cache=yes
- name: common | Create common log directory - name: Create common log directory
file: > file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755 group=syslog mode=0755
- name: common | upload sudo config for key forwarding as root - name: upload sudo config for key forwarding as root
copy: > copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
validate='visudo -c -f %s' owner=root group=root mode=0440 validate='visudo -c -f %s' owner=root group=root mode=0440
- name: common | pip install virtualenv - name: pip install virtualenv
pip: > pip: >
name="{{ item }}" state=present name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: common_pip_pkgs with_items: common_pip_pkgs
- name: common | Install rsyslog configuration for edX - name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644 template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd notify: common | restart rsyslogd
- name: common | Install logrotate configuration for edX - name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644 template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
- name: common | update /etc/hosts - name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
register: etc_hosts register: etc_hosts
- name: common | update /etc/hostname - name: update /etc/hostname
template: src=hostname.j2 dest=/etc/hostname template: src=hostname.j2 dest=/etc/hostname
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
register: etc_hostname register: etc_hostname
- name: common | run hostname - name: run hostname
shell: > shell: >
hostname -F /etc/hostname hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed) when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
--- ---
- name: datadog | restart the datadog service - name: restart the datadog service
service: name=datadog-agent state=restarted service: name=datadog-agent state=restarted
...@@ -15,33 +15,33 @@ ...@@ -15,33 +15,33 @@
# - datadog # - datadog
# #
- name: datadog | install debian needed pkgs - name: install debian needed pkgs
apt: pkg={{ item }} apt: pkg={{ item }}
with_items: datadog_debian_pkgs with_items: datadog_debian_pkgs
tags: tags:
- datadog - datadog
- name: datadog | add apt key - name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
tags: tags:
- datadog - datadog
- name: datadog | install apt repository - name: install apt repository
apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes
tags: tags:
- datadog - datadog
- name: datadog | install datadog agent - name: install datadog agent
apt: pkg="datadog-agent" apt: pkg="datadog-agent"
tags: tags:
- datadog - datadog
- name: datadog | bootstrap config - name: bootstrap config
shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf
tags: tags:
- datadog - datadog
- name: datadog | update api-key - name: update api-key
lineinfile: > lineinfile: >
dest="/etc/dd-agent/datadog.conf" dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*" regexp="^api_key:.*"
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
tags: tags:
- datadog - datadog
- name: datadog | ensure started and enabled - name: ensure started and enabled
service: name=datadog-agent state=started enabled=yes service: name=datadog-agent state=started enabled=yes
tags: tags:
- datadog - datadog
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
# - edxapp # - edxapp
# - demo # - demo
- name: demo | create demo app and data dirs - name: create demo app and data dirs
file: > file: >
path="{{ demo_app_dir }}" state=directory path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}" owner="{{ edxapp_user }}" group="{{ common_web_group }}"
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# Defaults for role devpi # Defaults for role devpi
# #
--- ---
- name: devpi | restart devpi - name: restart devpi
supervisorctl_local: > supervisorctl_local: >
state=restarted state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }} supervisorctl_path={{ devpi_supervisor_ctl }}
......
...@@ -30,13 +30,13 @@ ...@@ -30,13 +30,13 @@
# - devpi # - devpi
--- ---
- name: devpi | create devpi user - name: create devpi user
user: > user: >
name={{ devpi_user }} name={{ devpi_user }}
shell=/bin/false createhome=no shell=/bin/false createhome=no
notify: devpi | restart devpi notify: devpi | restart devpi
- name: devpi | create devpi application directories - name: create devpi application directories
file: > file: >
path={{ item }} path={{ item }}
state=directory state=directory
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
- "{{ devpi_venv_dir }}" - "{{ devpi_venv_dir }}"
notify: devpi | restart devpi notify: devpi | restart devpi
- name: devpi | create the devpi data directory, needs write access by the service user - name: create the devpi data directory, needs write access by the service user
file: > file: >
path={{ item }} path={{ item }}
state=directory state=directory
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
- "{{ devpi_mirror_dir }}" - "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi notify: devpi | restart devpi
- name: devpi | install devpi pip pkgs - name: install devpi pip pkgs
pip: > pip: >
name={{ item }} name={{ item }}
state=present state=present
...@@ -67,13 +67,13 @@ ...@@ -67,13 +67,13 @@
with_items: devpi_pip_pkgs with_items: devpi_pip_pkgs
notify: devpi | restart devpi notify: devpi | restart devpi
- name: devpi | writing supervisor script - name: writing supervisor script
template: > template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644 owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi notify: devpi | restart devpi
- name: devpi | create a symlink for venv python, pip - name: create a symlink for venv python, pip
file: > file: >
src="{{ devpi_venv_bin }}/{{ item }}" src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
...@@ -83,13 +83,13 @@ ...@@ -83,13 +83,13 @@
- python - python
- pip - pip
- name: devpi | create a symlink for venv supervisor - name: create a symlink for venv supervisor
file: > file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl" src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link state=link
- name: devpi | create a symlink for supervisor config - name: create a symlink for supervisor config
file: > file: >
src="{{ devpi_supervisor_app_dir }}/supervisord.conf" src="{{ devpi_supervisor_app_dir }}/supervisord.conf"
dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi
...@@ -100,12 +100,12 @@ ...@@ -100,12 +100,12 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: devpi | update devpi supervisor configuration - name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update" shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: devpi | ensure devpi is started - name: ensure devpi is started
supervisorctl_local: > supervisorctl_local: >
state=started state=started
supervisorctl_path={{ devpi_supervisor_ctl }} supervisorctl_path={{ devpi_supervisor_ctl }}
......
--- ---
- name: discern | restart discern - name: restart discern
supervisorctl_local: > supervisorctl_local: >
name=discern name=discern
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
--- ---
- name: discern | create application user - name: create application user
user: > user: >
name="{{ discern_user }}" name="{{ discern_user }}"
home="{{ discern_app_dir }}" home="{{ discern_app_dir }}"
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
notify: notify:
- discern | restart discern - discern | restart discern
- name: discern | create discern app dirs owned by discern - name: create discern app dirs owned by discern
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
- "{{ discern_app_dir }}" - "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}" - "{{ discern_venvs_dir }}"
- name: discern | create discern data dir, owned by {{ common_web_user }} - name: create discern data dir, owned by {{ common_web_user }}
file: > file: >
path="{{ discern_data_dir }}" state=directory path="{{ discern_data_dir }}" state=directory
owner="{{ common_web_user }}" group="{{ discern_user }}" owner="{{ common_web_user }}" group="{{ discern_user }}"
...@@ -28,19 +28,19 @@ ...@@ -28,19 +28,19 @@
notify: notify:
- discern | restart discern - discern | restart discern
- name: discern | install debian packages that discern needs - name: install debian packages that discern needs
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
notify: notify:
- discern | restart discern - discern | restart discern
with_items: discern_debian_pkgs with_items: discern_debian_pkgs
- name: discern | install debian packages for ease that discern needs - name: install debian packages for ease that discern needs
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
notify: notify:
- discern | restart discern - discern | restart discern
with_items: discern_ease_debian_pkgs with_items: discern_ease_debian_pkgs
- name: discern | copy sudoers file for discern - name: copy sudoers file for discern
copy: > copy: >
src=sudoers-discern dest=/etc/sudoers.d/discern src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root mode=0440 validate='visudo -cf %s' owner=root group=root
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
- discern | restart discern - discern | restart discern
#Needed if using redis to prevent memory issues #Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis - name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1 command: sysctl vm.overcommit_memory=1
notify: notify:
- discern | restart discern - discern | restart discern
......
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
# #
# #
# #
- name: edx_ansible | create application user - name: create application user
user: > user: >
name="{{ edx_ansible_user }}" name="{{ edx_ansible_user }}"
home="{{ edx_ansible_app_dir }}" home="{{ edx_ansible_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: edx_ansible | create edx_ansible app and venv dir - name: create edx_ansible app and venv dir
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
- "{{ edx_ansible_data_dir }}" - "{{ edx_ansible_data_dir }}"
- "{{ edx_ansible_venvs_dir }}" - "{{ edx_ansible_venvs_dir }}"
- name: edx_ansible | install a bunch of system packages on which edx_ansible relies - name: install a bunch of system packages on which edx_ansible relies
apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
--- ---
- name: edxapp | restart edxapp - name: restart edxapp
supervisorctl_local: > supervisorctl_local: >
state=restarted state=restarted
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled with_items: service_variants_enabled
- name: edxapp | restart edxapp_workers - name: restart edxapp_workers
supervisorctl_local: > supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -4,13 +4,13 @@ ...@@ -4,13 +4,13 @@
--- ---
- name: edxapp | Install logrotate configuration for tracking file - name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644 template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify: notify:
- "edxapp | restart edxapp" - "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers" - "edxapp | restart edxapp_workers"
- name: edxapp | create application user - name: create application user
user: > user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}" name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
createhome=no shell=/bin/false createhome=no shell=/bin/false
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
- "edxapp | restart edxapp" - "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers" - "edxapp | restart edxapp_workers"
- name: edxapp | create edxapp user dirs - name: create edxapp user dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}" owner="{{ edxapp_user }}" group="{{ common_web_group }}"
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}" - "{{ edxapp_staticfile_dir }}"
- name: edxapp | create edxapp log dir - name: create edxapp log dir
file: > file: >
path="{{ edxapp_log_dir }}" state=directory path="{{ edxapp_log_dir }}" state=directory
owner="{{ common_log_user }}" group="{{ common_log_user }}" owner="{{ common_log_user }}" group="{{ common_log_user }}"
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
- "edxapp | restart edxapp" - "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers" - "edxapp | restart edxapp_workers"
- name: edxapp | create web-writable edxapp data dirs - name: create web-writable edxapp data dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ edxapp_user }}" owner="{{ common_web_user }}" group="{{ edxapp_user }}"
...@@ -52,13 +52,13 @@ ...@@ -52,13 +52,13 @@
- "{{ edxapp_course_data_dir }}" - "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}" - "{{ edxapp_upload_dir }}"
- name: edxapp | install system packages on which LMS and CMS rely - name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
notify: notify:
- "edxapp | restart edxapp" - "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers" - "edxapp | restart edxapp_workers"
- name: edxapp | create log directories for service variants - name: create log directories for service variants
notify: notify:
- "edxapp | restart edxapp" - "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers" - "edxapp | restart edxapp_workers"
......
...@@ -10,33 +10,33 @@ ...@@ -10,33 +10,33 @@
# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz # http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz
# #
--- ---
- name: edxlocal| install packages needed for single server - name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: edxlocal | create a database for edxapp - name: create a database for edxapp
mysql_db: > mysql_db: >
db=edxapp db=edxapp
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for xqueue - name: create a database for xqueue
mysql_db: > mysql_db: >
db=xqueue db=xqueue
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for ora - name: create a database for ora
mysql_db: > mysql_db: >
db=ora db=ora
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for discern - name: create a database for discern
mysql_db: > mysql_db: >
db=discern db=discern
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | install memcached - name: install memcached
apt: pkg=memcached state=present apt: pkg=memcached state=present
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
# - oraclejdk # - oraclejdk
# - elasticsearch # - elasticsearch
- name: elasticsearch | download elasticsearch - name: download elasticsearch
get_url: > get_url: >
url={{ elasticsearch_url }} url={{ elasticsearch_url }}
dest=/var/tmp/{{ elasticsearch_file }} dest=/var/tmp/{{ elasticsearch_file }}
force=no force=no
- name: elasticsearch | install elasticsearch from local package - name: install elasticsearch from local package
shell: > shell: >
dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb
executable=/bin/bash executable=/bin/bash
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
- elasticsearch - elasticsearch
- install - install
- name: elasticsearch | Ensure elasticsearch is enabled and started - name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=started enabled=yes service: name=elasticsearch state=started enabled=yes
tags: tags:
- elasticsearch - elasticsearch
......
--- ---
- name: forum | restart the forum service - name: restart the forum service
supervisorctl_local: > supervisorctl_local: >
name=forum name=forum
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -21,20 +21,20 @@ ...@@ -21,20 +21,20 @@
# rbenv_ruby_version: "{{ forum_ruby_version }}" # rbenv_ruby_version: "{{ forum_ruby_version }}"
# - forum # - forum
- name: forum | create application user - name: create application user
user: > user: >
name="{{ forum_user }}" home="{{ forum_app_dir }}" name="{{ forum_user }}" home="{{ forum_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: forum | restart the forum service notify: forum | restart the forum service
- name: forum | create forum app dir - name: create forum app dir
file: > file: >
path="{{ forum_app_dir }}" state=directory path="{{ forum_app_dir }}" state=directory
owner="{{ forum_user }}" group="{{ common_web_group }}" owner="{{ forum_user }}" group="{{ common_web_group }}"
notify: forum | restart the forum service notify: forum | restart the forum service
- name: forum | setup the forum env - name: setup the forum env
template: > template: >
src=forum_env.j2 dest={{ forum_app_dir }}/forum_env src=forum_env.j2 dest={{ forum_app_dir }}/forum_env
owner={{ forum_user }} group={{ common_web_user }} owner={{ forum_user }} group={{ common_web_user }}
......
...@@ -28,34 +28,34 @@ ...@@ -28,34 +28,34 @@
--- ---
- name: gh_mirror | install pip packages - name: install pip packages
pip: name={{ item }} state=present pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs with_items: gh_mirror_pip_pkgs
- name: gh_mirror | install debian packages - name: install debian packages
apt: > apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }} pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present state=present
update_cache=yes update_cache=yes
- name: gh_mirror | create gh_mirror user - name: create gh_mirror user
user: > user: >
name={{ gh_mirror_user }} name={{ gh_mirror_user }}
state=present state=present
- name: gh_mirror | create the gh_mirror data directory - name: create the gh_mirror data directory
file: > file: >
path={{ gh_mirror_data_dir }} path={{ gh_mirror_data_dir }}
state=directory state=directory
owner={{ gh_mirror_user }} owner={{ gh_mirror_user }}
group={{ gh_mirror_group }} group={{ gh_mirror_group }}
- name: gh_mirror | create the gh_mirror app directory - name: create the gh_mirror app directory
file: > file: >
path={{ gh_mirror_app_dir }} path={{ gh_mirror_app_dir }}
state=directory state=directory
- name: gh_mirror | create org config - name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts - name: copying sync scripts
......
...@@ -12,34 +12,34 @@ ...@@ -12,34 +12,34 @@
# - mark # - mark
- name: gh_users | creating default .bashrc - name: creating default .bashrc
template: > template: >
src=default.bashrc.j2 dest=/etc/skel/.bashrc src=default.bashrc.j2 dest=/etc/skel/.bashrc
mode=0644 owner=root group=root mode=0644 owner=root group=root
- name: gh_users | create gh group - name: create gh group
group: name=gh state=present group: name=gh state=present
# TODO: give limited sudo access to this group # TODO: give limited sudo access to this group
- name: gh_users | grant full sudo access to gh group - name: grant full sudo access to gh group
copy: > copy: >
content="%gh ALL=(ALL) NOPASSWD:ALL" content="%gh ALL=(ALL) NOPASSWD:ALL"
dest=/etc/sudoers.d/gh owner=root group=root dest=/etc/sudoers.d/gh owner=root group=root
mode=0440 validate='visudo -cf %s' mode=0440 validate='visudo -cf %s'
- name: gh_users | create github users - name: create github users
user: user:
name={{ item }} groups=gh name={{ item }} groups=gh
shell=/bin/bash shell=/bin/bash
with_items: gh_users with_items: gh_users
- name: gh_users | create .ssh directory - name: create .ssh directory
file: file:
path=/home/{{ item }}/.ssh state=directory mode=0700 path=/home/{{ item }}/.ssh state=directory mode=0700
owner={{ item }} owner={{ item }}
with_items: gh_users with_items: gh_users
- name: gh_users | copy github key[s] to .ssh/authorized_keys - name: copy github key[s] to .ssh/authorized_keys
get_url: get_url:
url=https://github.com/{{ item }}.keys url=https://github.com/{{ item }}.keys
dest=/home/{{ item }}/.ssh/authorized_keys mode=0600 dest=/home/{{ item }}/.ssh/authorized_keys mode=0600
......
--- ---
# Install and configure simple glusterFS shared storage # Install and configure simple glusterFS shared storage
- name: gluster | all | Install common packages - name: Install common packages
apt: name={{ item }} state=present apt: name={{ item }} state=present
with_items: with_items:
- glusterfs-client - glusterfs-client
...@@ -9,20 +9,20 @@ ...@@ -9,20 +9,20 @@
- nfs-common - nfs-common
tags: gluster tags: gluster
- name: gluster | all | Install server packages - name: Install server packages
apt: name=glusterfs-server state=present apt: name=glusterfs-server state=present
when: > when: >
"{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}"
tags: gluster tags: gluster
- name: gluster | all | enable server - name: enable server
service: name=glusterfs-server state=started enabled=yes service: name=glusterfs-server state=started enabled=yes
when: > when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
tags: gluster tags: gluster
# Ignoring error below so that we can move the data folder and have it be a link # Ignoring error below so that we can move the data folder and have it be a link
- name: gluster | all | create folders - name: create folders
file: path={{ item.path }} state=directory file: path={{ item.path }} state=directory
with_items: gluster_volumes with_items: gluster_volumes
when: > when: >
...@@ -30,39 +30,39 @@ ...@@ -30,39 +30,39 @@
ignore_errors: yes ignore_errors: yes
tags: gluster tags: gluster
- name: gluster | primary | create peers - name: create peers
command: gluster peer probe {{ item }} command: gluster peer probe {{ item }}
with_items: gluster_peers with_items: gluster_peers
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | primary | create volumes - name: create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %} command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: gluster | primary | start volumes - name: start volumes
command: gluster volume start {{ item.name }} command: gluster volume start {{ item.name }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: gluster | primary | set security - name: set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }} command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | primary | set performance cache - name: set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }} command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | all | mount volume - name: mount volume
mount: > mount: >
name={{ item.mount_location }} name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }} src={{ gluster_primary_ip }}:{{ item.name }}
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system # This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1 # before the network stack is up and can't lookup 127.0.0.1
- name: gluster | all | sleep mount - name: sleep mount
lineinfile: > lineinfile: >
dest=/etc/rc.local dest=/etc/rc.local
line='sleep 5; /bin/mount -a' line='sleep 5; /bin/mount -a'
......
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
# Overview: # Overview:
# #
# #
- name: haproxy | restart haproxy - name: restart haproxy
service: name=haproxy state=restarted service: name=haproxy state=restarted
- name: haproxy | reload haproxy - name: reload haproxy
service: name=haproxy state=reloaded service: name=haproxy state=reloaded
- name: haproxy | restart rsyslog - name: restart rsyslog
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
...@@ -17,26 +17,26 @@ ...@@ -17,26 +17,26 @@
# so it allows for a configuration template to be overriden # so it allows for a configuration template to be overriden
# with a variable # with a variable
- name: haproxy | Install haproxy - name: Install haproxy
apt: pkg=haproxy state={{ pkgs.haproxy.state }} apt: pkg=haproxy state={{ pkgs.haproxy.state }}
notify: haproxy | restart haproxy notify: haproxy | restart haproxy
- name: haproxy | Server configuration file - name: Server configuration file
template: > template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644 owner=root group=root mode=0644
notify: haproxy | reload haproxy notify: haproxy | reload haproxy
- name: haproxy | Enabled in default - name: Enabled in default
lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1 lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1
notify: haproxy | restart haproxy notify: haproxy | restart haproxy
- name: haproxy | install logrotate - name: install logrotate
template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644 template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644
- name: haproxy | install rsyslog conf - name: install rsyslog conf
template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644 template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644
notify: haproxy | restart rsyslog notify: haproxy | restart rsyslog
- name: haproxy | make sure haproxy has started - name: make sure haproxy has started
service: name=haproxy state=started service: name=haproxy state=started
--- ---
- name: jenkins_master | restart Jenkins - name: restart Jenkins
service: name=jenkins state=restarted service: name=jenkins state=restarted
- name: jenkins_master | start nginx - name: start nginx
service: name=nginx state=started service: name=nginx state=started
- name: jenkins_master | reload nginx - name: reload nginx
service: name=nginx state=reloaded service: name=nginx state=reloaded
--- ---
- name: jenkins_master | install jenkins specific system packages - name: install jenkins specific system packages
apt: apt:
pkg={{','.join(jenkins_debian_pkgs)}} pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
tags: tags:
- jenkins - jenkins
- name: jenkins_master | install jenkins extra system packages - name: install jenkins extra system packages
apt: apt:
pkg={{','.join(JENKINS_EXTRA_PKGS)}} pkg={{','.join(JENKINS_EXTRA_PKGS)}}
state=present update_cache=yes state=present update_cache=yes
tags: tags:
- jenkins - jenkins
- name: jenkins_master | create jenkins group - name: create jenkins group
group: name={{ jenkins_group }} state=present group: name={{ jenkins_group }} state=present
- name: jenkins_master | add the jenkins user to the group - name: add the jenkins user to the group
user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }} user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }}
# Should be resolved in the next release, but until then we need to do this # Should be resolved in the next release, but until then we need to do this
# https://issues.jenkins-ci.org/browse/JENKINS-20407 # https://issues.jenkins-ci.org/browse/JENKINS-20407
- name: jenkins_master | workaround for JENKINS-20407 - name: workaround for JENKINS-20407
command: "mkdir -p /var/run/jenkins" command: "mkdir -p /var/run/jenkins"
- name: jenkins_master | download Jenkins package - name: download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}" get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: jenkins_master | install Jenkins package - name: install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}" command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: jenkins_master | stop Jenkins - name: stop Jenkins
service: name=jenkins state=stopped service: name=jenkins state=stopped
# Move /var/lib/jenkins to Jenkins home (on the EBS) # Move /var/lib/jenkins to Jenkins home (on the EBS)
- name: jenkins_master | move /var/lib/jenkins - name: move /var/lib/jenkins
command: mv /var/lib/jenkins {{ jenkins_home }} command: mv /var/lib/jenkins {{ jenkins_home }}
creates={{ jenkins_home }} creates={{ jenkins_home }}
- name: jenkins_master | set owner for Jenkins home - name: set owner for Jenkins home
file: path={{ jenkins_home }} recurse=yes state=directory file: path={{ jenkins_home }} recurse=yes state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
# Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins # Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins
# since Jenkins will expect its files to be in /var/lib/jenkins # since Jenkins will expect its files to be in /var/lib/jenkins
- name: jenkins_master | symlink /var/lib/jenkins - name: symlink /var/lib/jenkins
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
notify: notify:
- jenkins_master | restart Jenkins - jenkins_master | restart Jenkins
- name: jenkins_master | make plugins directory - name: make plugins directory
sudo_user: jenkins sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins shell: mkdir -p {{ jenkins_home }}/plugins
# We first download the plugins to a temp directory and include # We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment # the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins # the version, the plugin will be updated in Jenkins
- name: jenkins_master | download Jenkins plugins - name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }} dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: "{{ jenkins_plugins }}" with_items: "{{ jenkins_plugins }}"
- name: jenkins_master | install Jenkins plugins - name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: "{{ jenkins_plugins }}" with_items: "{{ jenkins_plugins }}"
- name: jenkins_master | set Jenkins plugin permissions - name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_plugins }}" with_items: "{{ jenkins_plugins }}"
...@@ -78,22 +78,22 @@ ...@@ -78,22 +78,22 @@
# certain issues. If these changes get merged # certain issues. If these changes get merged
# upstream, we may be able to use the regular plugin install process. # upstream, we may be able to use the regular plugin install process.
# Until then, we compile and install the forks ourselves. # Until then, we compile and install the forks ourselves.
- name: jenkins_master | checkout custom plugin repo - name: checkout custom plugin repo
git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }} git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
with_items: "{{ jenkins_custom_plugins }}" with_items: "{{ jenkins_custom_plugins }}"
- name: jenkins_master | compile custom plugins - name: compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }} command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }}
with_items: "{{ jenkins_custom_plugins }}" with_items: "{{ jenkins_custom_plugins }}"
- name: jenkins_master | install custom plugins - name: install custom plugins
command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }} command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }}
{{ jenkins_home }}/plugins/{{ item.package }} {{ jenkins_home }}/plugins/{{ item.package }}
with_items: "{{ jenkins_custom_plugins }}" with_items: "{{ jenkins_custom_plugins }}"
notify: notify:
- jenkins_master | restart Jenkins - jenkins_master | restart Jenkins
- name: jenkins_master | set custom plugin permissions - name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }} file: path={{ jenkins_home }}/plugins/{{ item.package }}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_custom_plugins }}" with_items: "{{ jenkins_custom_plugins }}"
...@@ -103,17 +103,17 @@ ...@@ -103,17 +103,17 @@
# Jenkins will overwrite updated plugins with its built-in version # Jenkins will overwrite updated plugins with its built-in version
# unless we create a ".pinned" file for the plugin. # unless we create a ".pinned" file for the plugin.
# See https://issues.jenkins-ci.org/browse/JENKINS-13129 # See https://issues.jenkins-ci.org/browse/JENKINS-13129
- name: jenkins_master | create plugin pin files - name: create plugin pin files
command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
with_items: "{{ jenkins_bundled_plugins }}" with_items: "{{ jenkins_bundled_plugins }}"
- name: jenkins_master | setup nginix vhost - name: setup nginix vhost
template: template:
src=etc/nginx/sites-available/jenkins.j2 src=etc/nginx/sites-available/jenkins.j2
dest=/etc/nginx/sites-available/jenkins dest=/etc/nginx/sites-available/jenkins
- name: jenkins_master | enable jenkins vhost - name: enable jenkins vhost
file: file:
src=/etc/nginx/sites-available/jenkins src=/etc/nginx/sites-available/jenkins
dest=/etc/nginx/sites-enabled/jenkins dest=/etc/nginx/sites-enabled/jenkins
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Will terminate an instance if one and only one already exists # Will terminate an instance if one and only one already exists
# with the same name # with the same name
- name: launch_ec2 | lookup tags for terminating existing instance - name: lookup tags for terminating existing instance
local_action: local_action:
module: ec2_lookup module: ec2_lookup
region: "{{ region }}" region: "{{ region }}"
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
register: tag_lookup register: tag_lookup
when: terminate_instance == true when: terminate_instance == true
- name: launch_ec2 | checking for other instances - name: checking for other instances
debug: msg="Too many results returned, not terminating!" debug: msg="Too many results returned, not terminating!"
when: terminate_instance == true and tag_lookup.instance_ids|length > 1 when: terminate_instance == true and tag_lookup.instance_ids|length > 1
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
state: absent state: absent
when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1 when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1
- name: launch_ec2 | Launch ec2 instance - name: Launch ec2 instance
local_action: local_action:
module: ec2_local module: ec2_local
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
instance_profile_name: "{{ instance_profile_name }}" instance_profile_name: "{{ instance_profile_name }}"
register: ec2 register: ec2
- name: launch_ec2 | Add DNS name - name: Add DNS name
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name studio - name: Add DNS name studio
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name preview - name: Add DNS name preview
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -86,14 +86,14 @@ ...@@ -86,14 +86,14 @@
with_items: "{{ ec2.instances }}" with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add new instance to host group - name: Add new instance to host group
local_action: > local_action: >
add_host add_host
hostname={{ item.public_ip }} hostname={{ item.public_ip }}
groupname=launched groupname=launched
with_items: "{{ ec2.instances }}" with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Wait for SSH to come up - name: Wait for SSH to come up
local_action: > local_action: >
wait_for wait_for
host={{ item.public_dns_name }} host={{ item.public_dns_name }}
......
...@@ -16,14 +16,14 @@ ...@@ -16,14 +16,14 @@
- fail: msg="secure_dir not defined. This is a path to the secure ora config file." - fail: msg="secure_dir not defined. This is a path to the secure ora config file."
when: secure_dir is not defined when: secure_dir is not defined
- name: legacy_ora | create ora application config - name: create ora application config
copy: copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json dest={{ora_app_dir}}/env.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
register: env_state register: env_state
- name: legacy_ora | create ora auth file - name: create ora auth file
copy: copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json dest={{ora_app_dir}}/auth.json
...@@ -31,13 +31,13 @@ ...@@ -31,13 +31,13 @@
register: auth_state register: auth_state
# Restart ORA Services # Restart ORA Services
- name: legacy_ora | restart edx-ora - name: restart edx-ora
service: service:
name=edx-ora name=edx-ora
state=restarted state=restarted
when: env_state.changed or auth_state.changed when: env_state.changed or auth_state.changed
- name: legacy_ora | restart edx-ora-celery - name: restart edx-ora-celery
service: service:
name=edx-ora-celery name=edx-ora-celery
state=restarted state=restarted
......
--- ---
- name: local_dev | install useful system packages - name: install useful system packages
apt: apt:
pkg={{','.join(local_dev_pkgs)}} install_recommends=yes pkg={{','.join(local_dev_pkgs)}} install_recommends=yes
state=present update_cache=yes state=present update_cache=yes
- name: local_dev | set login shell for app accounts - name: set login shell for app accounts
user: name={{ item.user }} shell="/bin/bash" user: name={{ item.user }} shell="/bin/bash"
with_items: "{{ localdev_accounts }}" with_items: "{{ localdev_accounts }}"
# Ensure forum user has permissions to access .gem and .rbenv # Ensure forum user has permissions to access .gem and .rbenv
# This is a little twisty: the forum role sets the owner and group to www-data # This is a little twisty: the forum role sets the owner and group to www-data
# So we add the forum user to the www-data group and give group write permissions # So we add the forum user to the www-data group and give group write permissions
- name: local_dev | add forum user to www-data group - name: add forum user to www-data group
user: name={{ forum_user }} groups={{ common_web_group }} append=yes user: name={{ forum_user }} groups={{ common_web_group }} append=yes
- name: local_dev | set forum rbenv and gem permissions - name: set forum rbenv and gem permissions
file: file:
path={{ item }} state=directory mode=770 path={{ item }} state=directory mode=770
with_items: with_items:
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
- "{{ forum_app_dir }}/.rbenv" - "{{ forum_app_dir }}/.rbenv"
# Create scripts to configure environment # Create scripts to configure environment
- name: local_dev | create login scripts - name: create login scripts
template: template:
src=app_bashrc.j2 dest={{ item.home }}/.bashrc src=app_bashrc.j2 dest={{ item.home }}/.bashrc
owner={{ item.user }} mode=755 owner={{ item.user }} mode=755
...@@ -30,24 +30,24 @@ ...@@ -30,24 +30,24 @@
# Default to the correct git config # Default to the correct git config
# No more accidentally force pushing to master! :) # No more accidentally force pushing to master! :)
- name: local_dev | configure git - name: configure git
copy: copy:
src=gitconfig dest={{ item.home }}/.gitconfig src=gitconfig dest={{ item.home }}/.gitconfig
owner={{ item.user }} mode=700 owner={{ item.user }} mode=700
with_items: "{{ localdev_accounts }}" with_items: "{{ localdev_accounts }}"
# Configure X11 for application users # Configure X11 for application users
- name: local_dev | preserve DISPLAY for sudo - name: preserve DISPLAY for sudo
copy: copy:
src=x11_display dest=/etc/sudoers.d/x11_display src=x11_display dest=/etc/sudoers.d/x11_display
owner=root group=root mode=0440 owner=root group=root mode=0440
- name: local_dev | login share X11 auth to app users - name: login share X11 auth to app users
template: template:
src=share_x11.j2 dest={{ localdev_home }}/share_x11 src=share_x11.j2 dest={{ localdev_home }}/share_x11
owner={{ localdev_user }} mode=0700 owner={{ localdev_user }} mode=0700
- name: local_dev | update bashrc with X11 share script - name: update bashrc with X11 share script
lineinfile: lineinfile:
dest={{ localdev_home }}/.bashrc dest={{ localdev_home }}/.bashrc
regexp=". {{ localdev_home }}/share_x11" regexp=". {{ localdev_home }}/share_x11"
......
--- ---
- name: mongo | install python pymongo for mongo_user ansible module - name: install python pymongo for mongo_user ansible module
pip: > pip: >
name=pymongo state=present name=pymongo state=present
version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: mongo | add the mongodb signing key - name: add the mongodb signing key
apt_key: > apt_key: >
id=7F0CEB10 id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc url=http://docs.mongodb.org/10gen-gpg-key.asc
state=present state=present
- name: mongo | add the mongodb repo to the sources list - name: add the mongodb repo to the sources list
apt_repository: > apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
state=present state=present
- name: mongo | install mongo server and recommends - name: install mongo server and recommends
apt: > apt: >
pkg=mongodb-10gen={{ mongo_version }} pkg=mongodb-10gen={{ mongo_version }}
state=present install_recommends=yes state=present install_recommends=yes
update_cache=yes update_cache=yes
- name: mongo | create mongo dirs - name: create mongo dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ mongo_user }}" owner="{{ mongo_user }}"
...@@ -32,14 +32,14 @@ ...@@ -32,14 +32,14 @@
- "{{ mongo_dbpath }}" - "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}" - "{{ mongo_log_dir }}"
- name: mongo | stop mongo service - name: stop mongo service
service: name=mongodb state=stopped service: name=mongodb state=stopped
- name: mongo | move mongodb to {{ mongo_data_dir }} - name: move mongodb to {{ mongo_data_dir }}
command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb
- name: mongo | copy mongodb key file - name: copy mongodb key file
copy: > copy: >
src={{ secure_dir }}/files/mongo_key src={{ secure_dir }}/files/mongo_key
dest={{ mongo_key_file }} dest={{ mongo_key_file }}
...@@ -48,27 +48,27 @@ ...@@ -48,27 +48,27 @@
group=mongodb group=mongodb
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
- name: mongo | copy configuration template - name: copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes
notify: restart mongo notify: restart mongo
- name: mongo | start mongo service - name: start mongo service
service: name=mongodb state=started service: name=mongodb state=started
- name: mongo | wait for mongo server to start - name: wait for mongo server to start
wait_for: port=27017 delay=2 wait_for: port=27017 delay=2
- name: mongo | Create the file to initialize the mongod replica set - name: Create the file to initialize the mongod replica set
template: src=repset_init.j2 dest=/tmp/repset_init.js template: src=repset_init.j2 dest=/tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
- name: mongo | Initialize the replication set - name: Initialize the replication set
shell: /usr/bin/mongo /tmp/repset_init.js shell: /usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
# Ignore errors doesn't work because the module throws an exception # Ignore errors doesn't work because the module throws an exception
# it doesn't catch. # it doesn't catch.
- name: mongo | create a mongodb user - name: create a mongodb user
mongodb_user: > mongodb_user: >
database={{ item.database }} database={{ item.database }}
name={{ item.user }} name={{ item.user }}
......
--- ---
- name: nginx | restart nginx - name: restart nginx
service: name=nginx state=restarted service: name=nginx state=restarted
- name: nginx | reload nginx - name: reload nginx
service: name=nginx state=reloaded service: name=nginx state=reloaded
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# - common/tasks/main.yml # - common/tasks/main.yml
--- ---
- name: nginx | create nginx app dirs - name: create nginx app dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
- "{{ nginx_sites_enabled_dir }}" - "{{ nginx_sites_enabled_dir }}"
notify: nginx | restart nginx notify: nginx | restart nginx
- name: nginx | create nginx data dirs - name: create nginx data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -25,37 +25,37 @@ ...@@ -25,37 +25,37 @@
- "{{ nginx_log_dir }}" - "{{ nginx_log_dir }}"
notify: nginx | restart nginx notify: nginx | restart nginx
- name: nginx | Install nginx packages - name: Install nginx packages
apt: pkg={{','.join(nginx_debian_pkgs)}} state=present apt: pkg={{','.join(nginx_debian_pkgs)}} state=present
notify: nginx | restart nginx notify: nginx | restart nginx
- name: nginx | Server configuration file - name: Server configuration file
template: > template: >
src=nginx.conf.j2 dest=/etc/nginx/nginx.conf src=nginx.conf.j2 dest=/etc/nginx/nginx.conf
owner=root group={{ common_web_user }} mode=0644 owner=root group={{ common_web_user }} mode=0644
notify: nginx | reload nginx notify: nginx | reload nginx
- name: nginx | Creating common nginx configuration - name: Creating common nginx configuration
template: > template: >
src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release
owner=root group=root mode=0600 owner=root group=root mode=0600
notify: nginx | reload nginx notify: nginx | reload nginx
- name: nginx | Creating link for common nginx configuration - name: Creating link for common nginx configuration
file: > file: >
src={{ nginx_sites_available_dir }}/edx-release src={{ nginx_sites_available_dir }}/edx-release
dest={{ nginx_sites_enabled_dir }}/edx-release dest={{ nginx_sites_enabled_dir }}/edx-release
state=link owner=root group=root state=link owner=root group=root
notify: nginx | reload nginx notify: nginx | reload nginx
- name: nginx | Copying nginx configs for {{ nginx_sites }} - name: Copying nginx configs for {{ nginx_sites }}
template: > template: >
src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }} src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640 owner=root group={{ common_web_user }} mode=0640
notify: nginx | reload nginx notify: nginx | reload nginx
with_items: nginx_sites with_items: nginx_sites
- name: nginx | Creating nginx config links for {{ nginx_sites }} - name: Creating nginx config links for {{ nginx_sites }}
file: > file: >
src={{ nginx_sites_available_dir }}/{{ item }} src={{ nginx_sites_available_dir }}/{{ item }}
dest={{ nginx_sites_enabled_dir }}/{{ item }} dest={{ nginx_sites_enabled_dir }}/{{ item }}
...@@ -63,26 +63,26 @@ ...@@ -63,26 +63,26 @@
notify: nginx | reload nginx notify: nginx | reload nginx
with_items: nginx_sites with_items: nginx_sites
- name: nginx | Write out htpasswd file - name: Write out htpasswd file
htpasswd: > htpasswd: >
name={{ NGINX_HTPASSWD_USER }} name={{ NGINX_HTPASSWD_USER }}
password={{ NGINX_HTPASSWD_PASS }} password={{ NGINX_HTPASSWD_PASS }}
path={{ nginx_htpasswd_file }} path={{ nginx_htpasswd_file }}
when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS
- name: nginx | Create nginx log file location (just in case) - name: Create nginx log file location (just in case)
file: > file: >
path={{ nginx_log_dir}} state=directory path={{ nginx_log_dir}} state=directory
owner={{ common_web_user }} group={{ common_web_user }} owner={{ common_web_user }} group={{ common_web_user }}
- name: nginx | copy ssl cert - name: copy ssl cert
copy: > copy: >
src={{ NGINX_SSL_CERTIFICATE }} src={{ NGINX_SSL_CERTIFICATE }}
dest=/etc/ssl/certs/{{ item|basename }} dest=/etc/ssl/certs/{{ item|basename }}
owner=root group=root mode=0644 owner=root group=root mode=0644
when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem' when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem'
- name: nginx | copy ssl key - name: copy ssl key
copy: > copy: >
src={{ NGINX_SSL_KEY }} src={{ NGINX_SSL_KEY }}
dest=/etc/ssl/private/{{ item|basename }} dest=/etc/ssl/private/{{ item|basename }}
...@@ -91,18 +91,18 @@ ...@@ -91,18 +91,18 @@
# removing default link # removing default link
- name: nginx | Removing default nginx config and restart (enabled) - name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: nginx | reload nginx notify: nginx | reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good # Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
- name: nginx | Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2 dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2
owner=root group=root mode=644 owner=root group=root mode=644
- name: nginx | Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2 dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2
owner=root group=root mode=644 owner=root group=root mode=644
...@@ -110,10 +110,10 @@ ...@@ -110,10 +110,10 @@
# If tasks that notify restart nginx don't change the state of the remote system # If tasks that notify restart nginx don't change the state of the remote system
# their corresponding notifications don't get run. If nginx has been stopped for # their corresponding notifications don't get run. If nginx has been stopped for
# any reason, this will ensure that it is started up again. # any reason, this will ensure that it is started up again.
- name: nginx | make sure nginx has started - name: make sure nginx has started
service: name=nginx state=started service: name=nginx state=started
when: start_services when: start_services
- name: nginx | make sure nginx has stopped - name: make sure nginx has stopped
service: name=nginx state=stopped service: name=nginx state=stopped
when: not start_services when: not start_services
--- ---
- name: notifier | restart notifier-scheduler - name: restart notifier-scheduler
supervisorctl_local: > supervisorctl_local: >
name=notifier-scheduler name=notifier-scheduler
state=restarted state=restarted
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
- name: notifier | restart notifier-celery-workers - name: restart notifier-celery-workers
supervisorctl_local: > supervisorctl_local: >
name=notifier-celery-workers name=notifier-celery-workers
state=restarted state=restarted
......
...@@ -17,82 +17,82 @@ ...@@ -17,82 +17,82 @@
# - common # - common
# - notifier # - notifier
# #
- name: notifier | install notifier specific system packages - name: install notifier specific system packages
apt: pkg={{','.join(notifier_debian_pkgs)}} state=present apt: pkg={{','.join(notifier_debian_pkgs)}} state=present
- name: notifier | check if incommon ca is installed - name: check if incommon ca is installed
command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt
register: incommon_present register: incommon_present
ignore_errors: yes ignore_errors: yes
- name: common | create incommon ca directory - name: create incommon ca directory
file: file:
path="/usr/share/ca-certificates/incommon" mode=2775 state=directory path="/usr/share/ca-certificates/incommon" mode=2775 state=directory
when: incommon_present|failed when: incommon_present|failed
- name: common | retrieve incommon server CA - name: retrieve incommon server CA
shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt
when: incommon_present|failed when: incommon_present|failed
- name: common | add InCommon ca cert - name: add InCommon ca cert
lineinfile: lineinfile:
dest=/etc/ca-certificates.conf dest=/etc/ca-certificates.conf
regexp='incommon/InCommonServerCA.crt' regexp='incommon/InCommonServerCA.crt'
line='incommon/InCommonServerCA.crt' line='incommon/InCommonServerCA.crt'
- name: common | update ca certs globally - name: update ca certs globally
shell: update-ca-certificates shell: update-ca-certificates
- name: notifier | create notifier user {{ NOTIFIER_USER }} - name: create notifier user {{ NOTIFIER_USER }}
user: user:
name={{ NOTIFIER_USER }} state=present shell=/bin/bash name={{ NOTIFIER_USER }} state=present shell=/bin/bash
home={{ NOTIFIER_HOME }} createhome=yes home={{ NOTIFIER_HOME }} createhome=yes
- name: notifier | setup the notifier env - name: setup the notifier env
template: template:
src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env
owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}" owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}"
- name: notifier | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ NOTIFIER_HOME }}/.bash_profile dest={{ NOTIFIER_HOME }}/.bash_profile
owner={{ NOTIFIER_USER }} owner={{ NOTIFIER_USER }}
group={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ NOTIFIER_HOME }}/.bashrc shell: touch {{ NOTIFIER_HOME }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
- name: notifier | add source of notifier_env to .bashrc - name: add source of notifier_env to .bashrc
lineinfile: lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_HOME }}/notifier_env' regexp='. {{ NOTIFIER_HOME }}/notifier_env'
line='. {{ NOTIFIER_HOME }}/notifier_env' line='. {{ NOTIFIER_HOME }}/notifier_env'
- name: notifier | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate' regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ NOTIFIER_VENV_DIR }}/bin/activate' line='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: notifier | create notifier DB directory - name: create notifier DB directory
file: file:
path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }} path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
- name: notifier | create notifier/bin directory - name: create notifier/bin directory
file: file:
path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }} path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | supervisord config for celery workers - name: supervisord config for celery workers
template: > template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf" dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-celery-workers notify: notifier | restart notifier-celery-workers
- name: notifier | supervisord config for scheduler - name: supervisord config for scheduler
template: > template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf" dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
......
--- ---
- name: ora | restart ora - name: restart ora
supervisorctl_local: > supervisorctl_local: >
name=ora name=ora
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
state=restarted state=restarted
when: start_services and ora_installed is defined and not devstack when: start_services and ora_installed is defined and not devstack
- name: ora | restart ora_celery - name: restart ora_celery
supervisorctl_local: > supervisorctl_local: >
name=ora_celery name=ora_celery
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# - common/tasks/main.yml # - common/tasks/main.yml
--- ---
- name: ora | create application user - name: create application user
user: > user: >
name="{{ ora_user }}" home="{{ ora_app_dir }}" name="{{ ora_user }}" home="{{ ora_app_dir }}"
createhome=no shell=/bin/false createhome=no shell=/bin/false
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
- ora | restart ora - ora | restart ora
- ora | restart ora_celery - ora | restart ora_celery
- name: ora | create ora app dir - name: create ora app dir
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ ora_user }}" group="{{ common_web_group }}" owner="{{ ora_user }}" group="{{ common_web_group }}"
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
- "{{ ora_venvs_dir }}" - "{{ ora_venvs_dir }}"
- "{{ ora_app_dir }}" - "{{ ora_app_dir }}"
- name: ora | create ora data dir, owned by {{ common_web_user }} - name: create ora data dir, owned by {{ common_web_user }}
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ common_web_group }}" owner="{{ common_web_user }}" group="{{ common_web_group }}"
...@@ -34,14 +34,14 @@ ...@@ -34,14 +34,14 @@
- "{{ ora_data_course_dir }}" - "{{ ora_data_course_dir }}"
- "{{ ora_app_dir }}/ml_models" - "{{ ora_app_dir }}/ml_models"
- name: ora | install debian packages that ora needs - name: install debian packages that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
notify: notify:
- ora | restart ora - ora | restart ora
- ora | restart ora_celery - ora | restart ora_celery
with_items: ora_debian_pkgs with_items: ora_debian_pkgs
- name: ora | install debian packages for ease that ora needs - name: install debian packages for ease that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
notify: notify:
- ora | restart ora - ora | restart ora
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# - common # - common
# - oraclejdk # - oraclejdk
- name: oraclejdk | check for Oracle Java version {{ oraclejdk_base }} - name: check for Oracle Java version {{ oraclejdk_base }}
command: test -d /usr/lib/jvm/{{ oraclejdk_base }} command: test -d /usr/lib/jvm/{{ oraclejdk_base }}
ignore_errors: true ignore_errors: true
register: oraclejdk_present register: oraclejdk_present
- name: oraclejdk | download Oracle Java - name: download Oracle Java
shell: > shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }} curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }}
executable=/bin/bash executable=/bin/bash
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
creates=/var/tmp/{{ oraclejdk_file }} creates=/var/tmp/{{ oraclejdk_file }}
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | install Oracle Java - name: install Oracle Java
shell: > shell: >
mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }} mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
creates=/usr/lib/jvm/{{ oraclejdk_base }} creates=/usr/lib/jvm/{{ oraclejdk_base }}
...@@ -34,10 +34,10 @@ ...@@ -34,10 +34,10 @@
sudo: true sudo: true
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | create symlink expected by elasticsearch - name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | add JAVA_HOME for Oracle Java - name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755 template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed when: oraclejdk_present|failed
...@@ -3,80 +3,80 @@ ...@@ -3,80 +3,80 @@
# There is a bug with initializing multiple nodes in the HA cluster at once # There is a bug with initializing multiple nodes in the HA cluster at once
# http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html # http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html
- name: rabbitmq | trust rabbit repository - name: trust rabbit repository
apt_key: url={{rabbitmq_apt_key}} state=present apt_key: url={{rabbitmq_apt_key}} state=present
- name: rabbitmq | install python-software-properties if debian - name: install python-software-properties if debian
apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present
- name: rabbitmq | add rabbit repository - name: add rabbit repository
apt_repository: repo="{{rabbitmq_repository}}" state=present apt_repository: repo="{{rabbitmq_repository}}" state=present
- name: rabbitmq | install rabbitmq - name: install rabbitmq
apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes
- name: rabbitmq | stop rabbit cluster - name: stop rabbit cluster
service: name=rabbitmq-server state=stopped service: name=rabbitmq-server state=stopped
# in case there are lingering processes, ignore errors # in case there are lingering processes, ignore errors
# silently # silently
- name: rabbitmq | send sigterm to any running rabbitmq processes - name: send sigterm to any running rabbitmq processes
shell: pkill -u rabbitmq || true shell: pkill -u rabbitmq || true
# Defaulting to /var/lib/rabbitmq # Defaulting to /var/lib/rabbitmq
- name: rabbitmq | create cookie directory - name: create cookie directory
file: > file: >
path={{rabbitmq_cookie_dir}} path={{rabbitmq_cookie_dir}}
owner=rabbitmq group=rabbitmq mode=0755 state=directory owner=rabbitmq group=rabbitmq mode=0755 state=directory
- name: rabbitmq | add rabbitmq erlang cookie - name: add rabbitmq erlang cookie
template: > template: >
src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}} src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}}
owner=rabbitmq group=rabbitmq mode=0400 owner=rabbitmq group=rabbitmq mode=0400
register: erlang_cookie register: erlang_cookie
# Defaulting to /etc/rabbitmq # Defaulting to /etc/rabbitmq
- name: rabbitmq | create rabbitmq config directory - name: create rabbitmq config directory
file: > file: >
path={{rabbitmq_config_dir}} path={{rabbitmq_config_dir}}
owner=root group=root mode=0755 state=directory owner=root group=root mode=0755 state=directory
- name: rabbitmq | add rabbitmq environment configuration - name: add rabbitmq environment configuration
template: > template: >
src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf
owner=root group=root mode=0644 owner=root group=root mode=0644
- name: rabbitmq | add rabbitmq cluster configuration - name: add rabbitmq cluster configuration
template: > template: >
src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config
owner=root group=root mode=0644 owner=root group=root mode=0644
register: cluster_configuration register: cluster_configuration
- name: rabbitmq | install plugins - name: install plugins
rabbitmq_plugin: rabbitmq_plugin:
names={{",".join(rabbitmq_plugins)}} state=enabled names={{",".join(rabbitmq_plugins)}} state=enabled
# When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'. # When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'.
# This folder should be deleted before clustering is setup because it retains data # This folder should be deleted before clustering is setup because it retains data
# that can conflict with the clustering information. # that can conflict with the clustering information.
- name: rabbitmq | remove mnesia configuration - name: remove mnesia configuration
file: path={{rabbitmq_mnesia_folder}} state=absent file: path={{rabbitmq_mnesia_folder}} state=absent
when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh
- name: rabbitmq | start rabbit nodes - name: start rabbit nodes
service: name=rabbitmq-server state=restarted service: name=rabbitmq-server state=restarted
- name: rabbitmq | wait for rabbit to start - name: wait for rabbit to start
wait_for: port={{ rabbitmq_management_port }} delay=2 wait_for: port={{ rabbitmq_management_port }} delay=2
- name: rabbitmq | remove guest user - name: remove guest user
rabbitmq_user: user="guest" state=absent rabbitmq_user: user="guest" state=absent
- name: rabbitmq | add vhosts - name: add vhosts
rabbitmq_vhost: name={{ item }} state=present rabbitmq_vhost: name={{ item }} state=present
with_items: RABBITMQ_VHOSTS with_items: RABBITMQ_VHOSTS
- name: rabbitmq | add admin users - name: add admin users
rabbitmq_user: > rabbitmq_user: >
user='{{item[0].name}}' password='{{item[0].password}}' user='{{item[0].name}}' password='{{item[0].password}}'
read_priv='.*' write_priv='.*' read_priv='.*' write_priv='.*'
...@@ -87,23 +87,23 @@ ...@@ -87,23 +87,23 @@
- RABBITMQ_VHOSTS - RABBITMQ_VHOSTS
when: "'admins' in rabbitmq_auth_config" when: "'admins' in rabbitmq_auth_config"
- name: rabbitmq | make queues mirrored - name: make queues mirrored
shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'" shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'"
when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1 when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1
# #
# Depends upon the management plugin # Depends upon the management plugin
# #
- name: rabbitmq | install admin tools - name: install admin tools
get_url: > get_url: >
url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin
dest=/usr/local/bin/rabbitmqadmin dest=/usr/local/bin/rabbitmqadmin
- name: rabbitmq | ensure rabbitmqadmin attributes - name: ensure rabbitmqadmin attributes
file: > file: >
path=/usr/local/bin/rabbitmqadmin owner=root path=/usr/local/bin/rabbitmqadmin owner=root
group=root mode=0655 group=root mode=0655
- name: rabbitmq | stop rabbit nodes - name: stop rabbit nodes
service: name=rabbitmq-server state=restarted service: name=rabbitmq-server state=restarted
when: not start_services when: not start_services
...@@ -34,95 +34,95 @@ ...@@ -34,95 +34,95 @@
- fail: rbenv_ruby_version required for role - fail: rbenv_ruby_version required for role
when: rbenv_ruby_version is not defined when: rbenv_ruby_version is not defined
- name: rbenv | create rbenv user {{ rbenv_user }} - name: create rbenv user {{ rbenv_user }}
user: > user: >
name={{ rbenv_user }} home={{ rbenv_dir }} name={{ rbenv_user }} home={{ rbenv_dir }}
shell=/bin/false createhome=no shell=/bin/false createhome=no
when: rbenv_user != common_web_user when: rbenv_user != common_web_user
- name: rbenv | create rbenv dir if it does not exist - name: create rbenv dir if it does not exist
file: > file: >
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}" path="{{ rbenv_dir }}" owner="{{ rbenv_user }}"
state=directory state=directory
- name: rbenv | install build depends - name: install build depends
apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no
with_items: rbenv_debian_pkgs with_items: rbenv_debian_pkgs
- name: rbenv | update rbenv repo - name: update rbenv repo
git: > git: >
repo=https://github.com/sstephenson/rbenv.git repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }} dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | ensure ruby_env exists - name: ensure ruby_env exists
template: > template: >
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | check ruby-build installed - name: check ruby-build installed
command: test -x /usr/local/bin/ruby-build command: test -x /usr/local/bin/ruby-build
register: rbuild_present register: rbuild_present
ignore_errors: yes ignore_errors: yes
- name: rbenv | if ruby-build exists, which versions we can install - name: if ruby-build exists, which versions we can install
command: /usr/local/bin/ruby-build --definitions command: /usr/local/bin/ruby-build --definitions
when: rbuild_present|success when: rbuild_present|success
register: installable_ruby_vers register: installable_ruby_vers
ignore_errors: yes ignore_errors: yes
### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version ### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version
- name: rbenv | create temporary directory - name: create temporary directory
command: mktemp -d command: mktemp -d
register: tempdir register: tempdir
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | clone ruby-build repo - name: clone ruby-build repo
git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | install ruby-build - name: install ruby-build
command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | remove temporary directory - name: remove temporary directory
file: path={{ tempdir.stdout }} state=absent file: path={{ tempdir.stdout }} state=absent
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | check ruby {{ rbenv_ruby_version }} installed - name: check ruby {{ rbenv_ruby_version }} installed
shell: "rbenv versions | grep {{ rbenv_ruby_version }}" shell: "rbenv versions | grep {{ rbenv_ruby_version }}"
register: ruby_installed register: ruby_installed
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
ignore_errors: yes ignore_errors: yes
- name: rbenv | install ruby {{ rbenv_ruby_version }} - name: install ruby {{ rbenv_ruby_version }}
shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}" shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}"
when: ruby_installed|failed when: ruby_installed|failed
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | set global ruby {{ rbenv_ruby_version }} - name: set global ruby {{ rbenv_ruby_version }}
shell: "rbenv global {{ rbenv_ruby_version }}" shell: "rbenv global {{ rbenv_ruby_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | install bundler - name: install bundler
shell: "gem install bundler -v {{ rbenv_bundler_version }}" shell: "gem install bundler -v {{ rbenv_bundler_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | remove rbenv version of rake - name: remove rbenv version of rake
file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent
- name: rbenv | install rake gem - name: install rake gem
shell: "gem install rake -v {{ rbenv_rake_version }}" shell: "gem install rake -v {{ rbenv_rake_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | rehash - name: rehash
shell: "rbenv rehash" shell: "rbenv rehash"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
...@@ -53,37 +53,37 @@ ...@@ -53,37 +53,37 @@
# - s3fs # - s3fs
# #
- name: s3fs | install system packages - name: install system packages
apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present
tags: tags:
- s3fs - s3fs
- install - install
- update - update
- name: s3fs | fetch package - name: fetch package
get_url: get_url:
url={{ s3fs_download_url }} url={{ s3fs_download_url }}
dest={{ s3fs_temp_dir }} dest={{ s3fs_temp_dir }}
- name: s3fs | extract package - name: extract package
shell: shell:
/bin/tar -xzf {{ s3fs_archive }} /bin/tar -xzf {{ s3fs_archive }}
chdir={{ s3fs_temp_dir }} chdir={{ s3fs_temp_dir }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure
- name: s3fs | configure - name: configure
shell: shell:
./configure ./configure
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status
- name: s3fs | make - name: make
shell: shell:
/usr/bin/make /usr/bin/make
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd
- name: s3fs | make install - name: make install
shell: shell:
/usr/bin/make install /usr/bin/make install
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
......
--- ---
- name: shibboleth | restart shibd - name: restart shibd
service: name=shibd state=restarted service: name=shibd state=restarted
#Install shibboleth #Install shibboleth
--- ---
- name: shibboleth | Installs shib and dependencies from apt - name: Installs shib and dependencies from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items: with_items:
- shibboleth-sp2-schemas - shibboleth-sp2-schemas
...@@ -14,19 +14,19 @@ ...@@ -14,19 +14,19 @@
- shib - shib
- install - install
- name: shibboleth | Creates /etc/shibboleth/metadata directory - name: Creates /etc/shibboleth/metadata directory
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | Downloads metadata into metadata directory as backup - name: Downloads metadata into metadata directory as backup
get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | writes out key and pem file - name: writes out key and pem file
template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600 template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600
with_items: with_items:
- key - key
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
- shib - shib
- install - install
- name: shibboleth | writes out configuration files - name: writes out configuration files
template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644 template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
with_items: with_items:
- attribute-map.xml - attribute-map.xml
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
- shib - shib
- install - install
- name: shibboleth | enables shib - name: enables shib
command: a2enmod shib2 command: a2enmod shib2
notify: shibboleth | restart shibd notify: shibboleth | restart shibd
tags: tags:
......
...@@ -16,5 +16,5 @@ ...@@ -16,5 +16,5 @@
# #
# Restart Splunk # Restart Splunk
- name: splunkforwarder | restart splunkforwarder - name: restart splunkforwarder
service: name=splunk state=restarted service: name=splunk state=restarted
...@@ -22,44 +22,44 @@ ...@@ -22,44 +22,44 @@
# #
# Install Splunk Forwarder # Install Splunk Forwarder
- name: splunkforwarder| install splunkforwarder specific system packages - name: install splunkforwarder specific system packages
apt: pkg={{','.join(splunk_debian_pkgs)}} state=present apt: pkg={{','.join(splunk_debian_pkgs)}} state=present
tags: tags:
- splunk - splunk
- install - install
- update - update
- name: splunkforwarder | download the splunk deb - name: download the splunk deb
get_url: > get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}" dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}" url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}"
register: download_deb register: download_deb
- name: splunkforwarder | install splunk forwarder - name: install splunk forwarder
shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}} shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}}
when: download_deb.changed when: download_deb.changed
# Create splunk user # Create splunk user
- name: splunkforwarder | create splunk user - name: create splunk user
user: name=splunk createhome=no state=present append=yes groups=syslog user: name=splunk createhome=no state=present append=yes groups=syslog
when: download_deb.changed when: download_deb.changed
# Need to start splunk manually so that it can create various files # Need to start splunk manually so that it can create various files
# and directories that aren't created till the first run and are needed # and directories that aren't created till the first run and are needed
# to run some of the below commands. # to run some of the below commands.
- name: splunkforwarder | start splunk manually - name: start splunk manually
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{splunkforwarder_output_dir}}/var/lib/splunk creates={{splunkforwarder_output_dir}}/var/lib/splunk
when: download_deb.changed when: download_deb.changed
register: started_manually register: started_manually
- name: splunkforwarder | stop splunk manually - name: stop splunk manually
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt
when: download_deb.changed and started_manually.changed when: download_deb.changed and started_manually.changed
- name: splunkforwarder | create boot script - name: create boot script
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk creates=/etc/init.d/splunk
...@@ -68,24 +68,24 @@ ...@@ -68,24 +68,24 @@
notify: splunkforwarder | restart splunkforwarder notify: splunkforwarder | restart splunkforwarder
# Update credentials # Update credentials
- name: splunkforwarder | update admin pasword - name: update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt" shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: splunkforwarder | restart splunkforwarder
- name: splunkforwarder | add chkconfig to init script - name: add chkconfig to init script
shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk' shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk'
when: download_deb.changed and create_boot_script.changed when: download_deb.changed and create_boot_script.changed
notify: splunkforwarder | restart splunkforwarder notify: splunkforwarder | restart splunkforwarder
# Ensure permissions on splunk content # Ensure permissions on splunk content
- name: splunkforwarder | ensure splunk forder permissions - name: ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: splunkforwarder | restart splunkforwarder
# Drop template files. # Drop template files.
- name: splunkforwarder | drop input configuration - name: drop input configuration
template: template:
src=opt/splunkforwarder/etc/system/local/inputs.conf.j2 src=opt/splunkforwarder/etc/system/local/inputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/inputs.conf dest=/opt/splunkforwarder/etc/system/local/inputs.conf
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
mode=644 mode=644
notify: splunkforwarder | restart splunkforwarder notify: splunkforwarder | restart splunkforwarder
- name: splunkforwarder | create outputs config file - name: create outputs config file
template: template:
src=opt/splunkforwarder/etc/system/local/outputs.conf.j2 src=opt/splunkforwarder/etc/system/local/outputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/outputs.conf dest=/opt/splunkforwarder/etc/system/local/outputs.conf
......
...@@ -50,19 +50,19 @@ ...@@ -50,19 +50,19 @@
# supervisor_service: upstart-service-name # supervisor_service: upstart-service-name
# #
--- ---
- name: supervisor | create application user - name: create application user
user: > user: >
name="{{ supervisor_user }}" name="{{ supervisor_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: supervisor | create supervisor service user - name: create supervisor service user
user: > user: >
name="{{ supervisor_service_user }}" name="{{ supervisor_service_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: supervisor | create supervisor directories - name: create supervisor directories
file: > file: >
name={{ item }} name={{ item }}
state=directory state=directory
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
- "{{ supervisor_venv_dir }}" - "{{ supervisor_venv_dir }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
- name: supervisor | create supervisor directories - name: create supervisor directories
file: > file: >
name={{ item }} name={{ item }}
state=directory state=directory
...@@ -84,29 +84,29 @@ ...@@ -84,29 +84,29 @@
- "{{ supervisor_log_dir }}" - "{{ supervisor_log_dir }}"
- name: supervisor | install supervisor in its venv - name: install supervisor in its venv
pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
- name: supervisor | create supervisor upstart job - name: create supervisor upstart job
template: > template: >
src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf
owner=root group=root owner=root group=root
- name: supervisor | create supervisor master config - name: create supervisor master config
template: > template: >
src=supervisord.conf.j2 dest={{ supervisor_cfg }} src=supervisord.conf.j2 dest={{ supervisor_cfg }}
owner={{ supervisor_user }} group={{ supervisor_service_user }} owner={{ supervisor_user }} group={{ supervisor_service_user }}
mode=0644 mode=0644
- name: supervisor | create a symlink for supervisortctl - name: create a symlink for supervisortctl
file: > file: >
src={{ supervisor_ctl }} src={{ supervisor_ctl }}
dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }} dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }}
state=link state=link
when: supervisor_service == "supervisor" when: supervisor_service == "supervisor"
- name: supervisor | create a symlink for supervisor cfg - name: create a symlink for supervisor cfg
file: > file: >
src={{ item }} src={{ item }}
dest={{ COMMON_CFG_DIR }}/{{ item|basename }} dest={{ COMMON_CFG_DIR }}/{{ item|basename }}
...@@ -116,7 +116,7 @@ ...@@ -116,7 +116,7 @@
- "{{ supervisor_cfg }}" - "{{ supervisor_cfg }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
- name: supervisor | start supervisor - name: start supervisor
service: > service: >
name={{supervisor_service}} name={{supervisor_service}}
state=started state=started
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
# calling update on supervisor too soon after it # calling update on supervisor too soon after it
# starts will result in an errror. # starts will result in an errror.
- name: supervisor | wait for web port to be available - name: wait for web port to be available
wait_for: port={{ supervisor_http_bind_port }} timeout=5 wait_for: port={{ supervisor_http_bind_port }} timeout=5
when: start_supervisor.changed when: start_supervisor.changed
...@@ -134,7 +134,7 @@ ...@@ -134,7 +134,7 @@
# we don't use notifications for supervisor because # we don't use notifications for supervisor because
# they don't work well with parameterized roles. # they don't work well with parameterized roles.
# See https://github.com/ansible/ansible/issues/4853 # See https://github.com/ansible/ansible/issues/4853
- name: supervisor | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: xqueue | restart xqueue - name: restart xqueue
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
# #
# #
- name: xqueue | create application user - name: create application user
user: > user: >
name="{{ xqueue_user }}" name="{{ xqueue_user }}"
home="{{ xqueue_app_dir }}" home="{{ xqueue_app_dir }}"
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
notify: notify:
- xqueue | restart xqueue - xqueue | restart xqueue
- name: xqueue | create xqueue app and venv dir - name: create xqueue app and venv dir
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -27,12 +27,12 @@ ...@@ -27,12 +27,12 @@
- "{{ xqueue_app_dir }}" - "{{ xqueue_app_dir }}"
- "{{ xqueue_venvs_dir }}" - "{{ xqueue_venvs_dir }}"
- name: xqueue | install a bunch of system packages on which xqueue relies - name: install a bunch of system packages on which xqueue relies
apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present
notify: notify:
- xqueue | restart xqueue - xqueue | restart xqueue
- name: xqueue | create xqueue db - name: create xqueue db
mysql_db: > mysql_db: >
name={{xqueue_auth_config.DATABASES.default.NAME}} name={{xqueue_auth_config.DATABASES.default.NAME}}
login_host={{xqueue_auth_config.DATABASES.default.HOST}} login_host={{xqueue_auth_config.DATABASES.default.HOST}}
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# Overview: # Overview:
# #
- name: xserver | restart xserver - name: restart xserver
supervisorctl_local: > supervisorctl_local: >
name=xserver name=xserver
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
......
...@@ -3,28 +3,28 @@ ...@@ -3,28 +3,28 @@
# access to the edX 6.00x repo which is not public # access to the edX 6.00x repo which is not public
--- ---
- name: xserver | checking for grader info - name: checking for grader info
fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!" fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!"
when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE
- name: xserver | checking for git identity - name: checking for git identity
fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role" fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role"
when: not XSERVER_LOCAL_GIT_IDENTITY when: not XSERVER_LOCAL_GIT_IDENTITY
- name: xserver | create application user - name: create application user
user: > user: >
name="{{ xserver_user }}" name="{{ xserver_user }}"
home="{{ xserver_app_dir }}" home="{{ xserver_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: xserver | create application sandbox user - name: create application sandbox user
user: > user: >
name="{{ xserver_sandbox_user }}" name="{{ xserver_sandbox_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: xserver | create xserver app and data dirs - name: create xserver app and data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -36,27 +36,27 @@ ...@@ -36,27 +36,27 @@
- "{{ xserver_data_dir }}" - "{{ xserver_data_dir }}"
- "{{ xserver_data_dir }}/data" - "{{ xserver_data_dir }}/data"
- name: xserver | create sandbox sudoers file - name: create sandbox sudoers file
template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440 template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440
# Make sure this line is in the common-session file. # Make sure this line is in the common-session file.
- name: xserver | ensure pam-limits module is loaded - name: ensure pam-limits module is loaded
lineinfile: lineinfile:
dest=/etc/pam.d/common-session dest=/etc/pam.d/common-session
regexp="session required pam_limits.so" regexp="session required pam_limits.so"
line="session required pam_limits.so" line="session required pam_limits.so"
- name: xserver | set sandbox limits - name: set sandbox limits
template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf
first_available_file: first_available_file:
- "{{ secure_dir }}/sandbox.conf.j2" - "{{ secure_dir }}/sandbox.conf.j2"
- "sandbox.conf.j2" - "sandbox.conf.j2"
- name: xserver | install system dependencies of xserver - name: install system dependencies of xserver
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
with_items: xserver_debian_pkgs with_items: xserver_debian_pkgs
- name: xserver | load python-sandbox apparmor profile - name: load python-sandbox apparmor profile
template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox
first_available_file: first_available_file:
- "{{ secure_dir }}/files/edx_apparmor_sandbox.j2" - "{{ secure_dir }}/files/edx_apparmor_sandbox.j2"
......
Jinja2==2.7.1 ansible==1.4.4
MarkupSafe==0.18
PyYAML==3.10 PyYAML==3.10
ansible==1.3.2 Jinja2==2.7.2
MarkupSafe==0.18
argparse==1.2.1 argparse==1.2.1
boto==2.10.0 boto==2.23.0
ecdsa==0.10 ecdsa==0.10
paramiko==1.12.0 paramiko==1.12.0
pycrypto==2.6.1 pycrypto==2.6.1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment