Commit 8946de03 by John Jarvis

removing role identifiers for ansible 1.4

parent aa0c2188
......@@ -15,8 +15,8 @@
#
#
- name: analytics-server | stop the analytics service
- name: stop the analytics service
service: name=analytics state=stopped
- name: analytics-server | start the analytics service
- name: start the analytics service
service: name=analytics state=started
......@@ -37,14 +37,14 @@
# - common
# - analytics-server
#
- name: analytics-server | install system packages
- name: install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags:
- analytics-server
- install
- update
- name: analytics-server | create analytics-server user {{ as_user }}
- name: create analytics-server user {{ as_user }}
user:
name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes
......@@ -53,7 +53,7 @@
- install
- update
- name: analytics-server | setup the analytics-server env
- name: setup the analytics-server env
template:
src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }}
......@@ -63,7 +63,7 @@
- install
- update
- name: analytics-server | drop a bash_profile
- name: drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile
......@@ -80,7 +80,7 @@
# - install
# - update
- name: analytics-server | ensure .bashrc exists
- name: ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc
sudo: true
sudo_user: "{{ as_user }}"
......@@ -89,7 +89,7 @@
- install
- update
- name: analytics-server | add source of analytics-server_env to .bashrc
- name: add source of analytics-server_env to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env'
......@@ -99,7 +99,7 @@
- install
- update
- name: analytics-server | add source venv to .bashrc
- name: add source venv to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate'
......@@ -109,7 +109,7 @@
- install
- update
- name: analytics-server | install global python requirements
- name: install global python requirements
pip: name={{ item }}
with_items: as_pip_pkgs
tags:
......@@ -117,7 +117,7 @@
- install
- update
- name: analytics-server | create config
- name: create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
......@@ -128,7 +128,7 @@
- install
- update
- name: analytics-server | install service
- name: install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
......
......@@ -15,8 +15,8 @@
#
#
- name: analytics | stop the analytics service
- name: stop the analytics service
service: name=analytics state=stopped
- name: analytics | start the analytics service
- name: start the analytics service
service: name=analytics state=started
......@@ -37,14 +37,14 @@
# - common
# - analytics
#
- name: analytics | install system packages
- name: install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags:
- analytics
- install
- update
- name: analytics | create analytics user {{ analytics_user }}
- name: create analytics user {{ analytics_user }}
user:
name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes
......@@ -53,7 +53,7 @@
- install
- update
- name: analytics | setup the analytics env
- name: setup the analytics env
template:
src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }}
......@@ -63,7 +63,7 @@
- install
- update
- name: analytics | drop a bash_profile
- name: drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile
......@@ -80,7 +80,7 @@
# - install
# - update
- name: analytics | ensure .bashrc exists
- name: ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc
sudo: true
sudo_user: "{{ analytics_user }}"
......@@ -89,7 +89,7 @@
- install
- update
- name: analytics | add source of analytics_env to .bashrc
- name: add source of analytics_env to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env'
......@@ -99,7 +99,7 @@
- install
- update
- name: analytics | add source venv to .bashrc
- name: add source venv to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate'
......@@ -109,7 +109,7 @@
- install
- update
- name: analytics | install global python requirements
- name: install global python requirements
pip: name={{ item }}
with_items: analytics_pip_pkgs
tags:
......@@ -117,7 +117,7 @@
- install
- update
- name: analytics | create config
- name: create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
......@@ -128,7 +128,7 @@
- install
- update
- name: analytics | install service
- name: install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
......
---
- name: ansible-role | check if the role exists
- name: check if the role exists
command: test -d roles/{{ role_name }}
register: role_exists
ignore_errors: yes
- name: ansible-role | prompt for overwrite
- name: prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success
- name: ansible-role | create role directories
- name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory
with_items:
- tasks
......@@ -19,7 +19,7 @@
- templates
- files
- name: ansible-role | make an ansible role
- name: make an ansible role
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items:
- tasks
......
---
- name: apache | restart apache
- name: restart apache
service: name=apache2 state=restarted
#Installs apache and runs the lms wsgi
---
- name: apache | Installs apache and mod_wsgi from apt
- name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items:
- apache2
......@@ -11,21 +11,21 @@
- apache
- install
- name: apache | disables default site
- name: disables default site
command: a2dissite 000-default
notify: apache | restart apache
tags:
- apache
- install
- name: apache | rewrite apache ports conf
- name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: apache | restart apache
tags:
- apache
- install
- name: apache | Register the fact that apache role has run
- name: Register the fact that apache role has run
command: echo True
register: apache_role_run
tags:
......
......@@ -57,7 +57,7 @@
- fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined
- name: automated | create automated user
- name: create automated user
user:
name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes
......@@ -66,7 +66,7 @@
- install
- update
- name: automated | create sudoers file from file
- name: create sudoers file from file
copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root"
......@@ -77,7 +77,7 @@
- install
- update
- name: automated | create sudoers file from template
- name: create sudoers file from template
template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root"
......@@ -92,7 +92,7 @@
# Prevent user from updating their PATH and
# environment.
#
- name: automated | update shell file mode
- name: update shell file mode
file:
path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }}
......@@ -105,7 +105,7 @@
- .profile
- .bash_logout
- name: automated | change ~automated ownership
- name: change ~automated ownership
file:
path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }}
......@@ -119,7 +119,7 @@
# and that links that were remove from the role are
# removed.
#
- name: automated | remove ~automated/bin directory
- name: remove ~automated/bin directory
file:
path={{ automated_home }}/bin state=absent
ignore_errors: yes
......@@ -128,7 +128,7 @@
- install
- update
- name: automated | create ~automated/bin directory
- name: create ~automated/bin directory
file:
path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }}
......@@ -137,7 +137,7 @@
- install
- update
- name: automated | re-write .profile
- name: re-write .profile
copy:
src=home/automator/.profile
dest={{ automated_home }}/.profile
......@@ -149,7 +149,7 @@
- install
- update
- name: automated | re-write .bashrc
- name: re-write .bashrc
copy:
src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc
......@@ -161,7 +161,7 @@
- install
- update
- name: automated | create .ssh directory
- name: create .ssh directory
file:
path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }}
......@@ -170,7 +170,7 @@
- install
- update
- name: automated | copy key to .ssh/authorized_keys
- name: copy key to .ssh/authorized_keys
copy:
src=home/automator/.ssh/authorized_keys
dest={{ automated_home }}/.ssh/authorized_keys mode=0600
......@@ -180,7 +180,7 @@
- install
- update
- name: automated | create allowed command links
- name: create allowed command links
file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link
......
# Install browsers required to run the JavaScript
# and acceptance test suite locally without a display
---
- name: browsers | install system packages
- name: install system packages
apt: pkg={{','.join(browser_deb_pkgs)}}
state=present update_cache=yes
- name: browsers | download browser debian packages from S3
- name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb
with_items: "{{ browser_s3_deb_pkgs }}"
- name: browsers | install browser debian packages
- name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed
with_items: "{{ browser_s3_deb_pkgs }}"
- name: browsers | Install ChromeDriver
- name: Install ChromeDriver
get_url:
url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: browsers | Install ChromeDriver 2
- name: Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp
- name: browsers | Install ChromeDriver 3
- name: Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4
- name: Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755
- name: browsers | create xvfb upstart script
- name: create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: browsers | start xvfb
- name: start xvfb
shell: start xvfb
ignore_errors: yes
......@@ -14,7 +14,7 @@
# Overview:
#
- name: certs | restart certs
- name: restart certs
supervisorctl_local: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -35,7 +35,7 @@
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY
- name: certs | create application user
- name: create application user
user: >
name="{{ certs_user }}"
home="{{ certs_app_dir }}"
......@@ -43,7 +43,7 @@
shell=/bin/false
notify: certs | restart certs
- name: certs | create certs app and data dirs
- name: create certs app and data dirs
file: >
path="{{ item }}"
state=directory
......@@ -54,14 +54,14 @@
- "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}"
- name: certs | create certs gpg dir
- name: create certs gpg dir
file: >
path="{{ certs_gpg_dir }}" state=directory
owner="{{ common_web_user }}"
mode=0700
notify: certs | restart certs
- name: certs | copy the private gpg signing key
- name: copy the private gpg signing key
copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
......@@ -70,7 +70,7 @@
register: certs_gpg_key
- name: certs | load the gpg key
- name: load the gpg key
shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ common_web_user }}"
......
---
- name: common | restart rsyslogd
- name: restart rsyslogd
service: name=rsyslog state=restarted
sudo: True
---
- name: common | Add user www-data
- name: Add user www-data
# This is the default user for nginx
user: >
name="{{ common_web_user }}"
shell=/bin/false
- name: common | Create common directories
- name: Create common directories
file: >
path={{ item }} state=directory owner=root
group=root mode=0755
......@@ -16,57 +16,57 @@
- "{{ COMMON_CFG_DIR }}"
# Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl
- name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes
# Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors
- name: common | Add git apt repository
- name: Add git apt repository
apt_repository: repo="{{ common_git_ppa }}"
- name: common | Install role-independent useful system packages
- name: Install role-independent useful system packages
# do this before log dir setup; rsyslog package guarantees syslog user present
apt: >
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes
- name: common | Create common log directory
- name: Create common log directory
file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755
- name: common | upload sudo config for key forwarding as root
- name: upload sudo config for key forwarding as root
copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
validate='visudo -c -f %s' owner=root group=root mode=0440
- name: common | pip install virtualenv
- name: pip install virtualenv
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: common_pip_pkgs
- name: common | Install rsyslog configuration for edX
- name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd
- name: common | Install logrotate configuration for edX
- name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
- name: common | update /etc/hosts
- name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME
register: etc_hosts
- name: common | update /etc/hostname
- name: update /etc/hostname
template: src=hostname.j2 dest=/etc/hostname
when: COMMON_HOSTNAME
register: etc_hostname
- name: common | run hostname
- name: run hostname
shell: >
hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
---
- name: datadog | restart the datadog service
- name: restart the datadog service
service: name=datadog-agent state=restarted
......@@ -15,33 +15,33 @@
# - datadog
#
- name: datadog | install debian needed pkgs
- name: install debian needed pkgs
apt: pkg={{ item }}
with_items: datadog_debian_pkgs
tags:
- datadog
- name: datadog | add apt key
- name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
tags:
- datadog
- name: datadog | install apt repository
- name: install apt repository
apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes
tags:
- datadog
- name: datadog | install datadog agent
- name: install datadog agent
apt: pkg="datadog-agent"
tags:
- datadog
- name: datadog | bootstrap config
- name: bootstrap config
shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf
tags:
- datadog
- name: datadog | update api-key
- name: update api-key
lineinfile: >
dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*"
......@@ -51,7 +51,7 @@
tags:
- datadog
- name: datadog | ensure started and enabled
- name: ensure started and enabled
service: name=datadog-agent state=started enabled=yes
tags:
- datadog
......@@ -30,7 +30,7 @@
# - edxapp
# - demo
- name: demo | create demo app and data dirs
- name: create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}"
......
......@@ -11,7 +11,7 @@
# Defaults for role devpi
#
---
- name: devpi | restart devpi
- name: restart devpi
supervisorctl_local: >
state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }}
......
......@@ -30,13 +30,13 @@
# - devpi
---
- name: devpi | create devpi user
- name: create devpi user
user: >
name={{ devpi_user }}
shell=/bin/false createhome=no
notify: devpi | restart devpi
- name: devpi | create devpi application directories
- name: create devpi application directories
file: >
path={{ item }}
state=directory
......@@ -47,7 +47,7 @@
- "{{ devpi_venv_dir }}"
notify: devpi | restart devpi
- name: devpi | create the devpi data directory, needs write access by the service user
- name: create the devpi data directory, needs write access by the service user
file: >
path={{ item }}
state=directory
......@@ -58,7 +58,7 @@
- "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi
- name: devpi | install devpi pip pkgs
- name: install devpi pip pkgs
pip: >
name={{ item }}
state=present
......@@ -67,13 +67,13 @@
with_items: devpi_pip_pkgs
notify: devpi | restart devpi
- name: devpi | writing supervisor script
- name: writing supervisor script
template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi
- name: devpi | create a symlink for venv python, pip
- name: create a symlink for venv python, pip
file: >
src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
......@@ -83,13 +83,13 @@
- python
- pip
- name: devpi | create a symlink for venv supervisor
- name: create a symlink for venv supervisor
file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link
- name: devpi | create a symlink for supervisor config
- name: create a symlink for supervisor config
file: >
src="{{ devpi_supervisor_app_dir }}/supervisord.conf"
dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi
......@@ -100,12 +100,12 @@
# the services if any of the configurations
# have changed.
#
- name: devpi | update devpi supervisor configuration
- name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
- name: devpi | ensure devpi is started
- name: ensure devpi is started
supervisorctl_local: >
state=started
supervisorctl_path={{ devpi_supervisor_ctl }}
......
---
- name: discern | restart discern
- name: restart discern
supervisorctl_local: >
name=discern
supervisorctl_path={{ supervisor_ctl }}
......
---
- name: discern | create application user
- name: create application user
user: >
name="{{ discern_user }}"
home="{{ discern_app_dir }}"
......@@ -8,7 +8,7 @@
notify:
- discern | restart discern
- name: discern | create discern app dirs owned by discern
- name: create discern app dirs owned by discern
file: >
path="{{ item }}"
state=directory
......@@ -20,7 +20,7 @@
- "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}"
- name: discern | create discern data dir, owned by {{ common_web_user }}
- name: create discern data dir, owned by {{ common_web_user }}
file: >
path="{{ discern_data_dir }}" state=directory
owner="{{ common_web_user }}" group="{{ discern_user }}"
......@@ -28,19 +28,19 @@
notify:
- discern | restart discern
- name: discern | install debian packages that discern needs
- name: install debian packages that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
with_items: discern_debian_pkgs
- name: discern | install debian packages for ease that discern needs
- name: install debian packages for ease that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
with_items: discern_ease_debian_pkgs
- name: discern | copy sudoers file for discern
- name: copy sudoers file for discern
copy: >
src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root
......@@ -48,7 +48,7 @@
- discern | restart discern
#Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis
- name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1
notify:
- discern | restart discern
......
......@@ -23,14 +23,14 @@
#
#
#
- name: edx_ansible | create application user
- name: create application user
user: >
name="{{ edx_ansible_user }}"
home="{{ edx_ansible_app_dir }}"
createhome=no
shell=/bin/false
- name: edx_ansible | create edx_ansible app and venv dir
- name: create edx_ansible app and venv dir
file: >
path="{{ item }}"
state=directory
......@@ -41,7 +41,7 @@
- "{{ edx_ansible_data_dir }}"
- "{{ edx_ansible_venvs_dir }}"
- name: edx_ansible | install a bunch of system packages on which edx_ansible relies
- name: install a bunch of system packages on which edx_ansible relies
apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present
- include: deploy.yml tags=deploy
---
- name: edxapp | restart edxapp
- name: restart edxapp
supervisorctl_local: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
......@@ -9,7 +9,7 @@
sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled
- name: edxapp | restart edxapp_workers
- name: restart edxapp_workers
supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -4,13 +4,13 @@
---
- name: edxapp | Install logrotate configuration for tracking file
- name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- name: edxapp | create application user
- name: create application user
user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
createhome=no shell=/bin/false
......@@ -18,7 +18,7 @@
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- name: edxapp | create edxapp user dirs
- name: create edxapp user dirs
file: >
path="{{ item }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}"
......@@ -32,7 +32,7 @@
- "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}"
- name: edxapp | create edxapp log dir
- name: create edxapp log dir
file: >
path="{{ edxapp_log_dir }}" state=directory
owner="{{ common_log_user }}" group="{{ common_log_user }}"
......@@ -40,7 +40,7 @@
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- name: edxapp | create web-writable edxapp data dirs
- name: create web-writable edxapp data dirs
file: >
path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ edxapp_user }}"
......@@ -52,13 +52,13 @@
- "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}"
- name: edxapp | install system packages on which LMS and CMS rely
- name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- name: edxapp | create log directories for service variants
- name: create log directories for service variants
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
......
......@@ -10,33 +10,33 @@
# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz
#
---
- name: edxlocal| install packages needed for single server
- name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: edxlocal | create a database for edxapp
- name: create a database for edxapp
mysql_db: >
db=edxapp
state=present
encoding=utf8
- name: edxlocal | create a database for xqueue
- name: create a database for xqueue
mysql_db: >
db=xqueue
state=present
encoding=utf8
- name: edxlocal | create a database for ora
- name: create a database for ora
mysql_db: >
db=ora
state=present
encoding=utf8
- name: edxlocal | create a database for discern
- name: create a database for discern
mysql_db: >
db=discern
state=present
encoding=utf8
- name: edxlocal | install memcached
- name: install memcached
apt: pkg=memcached state=present
......@@ -14,13 +14,13 @@
# - oraclejdk
# - elasticsearch
- name: elasticsearch | download elasticsearch
- name: download elasticsearch
get_url: >
url={{ elasticsearch_url }}
dest=/var/tmp/{{ elasticsearch_file }}
force=no
- name: elasticsearch | install elasticsearch from local package
- name: install elasticsearch from local package
shell: >
dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb
executable=/bin/bash
......@@ -29,7 +29,7 @@
- elasticsearch
- install
- name: elasticsearch | Ensure elasticsearch is enabled and started
- name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=started enabled=yes
tags:
- elasticsearch
......
---
- name: forum | restart the forum service
- name: restart the forum service
supervisorctl_local: >
name=forum
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -21,20 +21,20 @@
# rbenv_ruby_version: "{{ forum_ruby_version }}"
# - forum
- name: forum | create application user
- name: create application user
user: >
name="{{ forum_user }}" home="{{ forum_app_dir }}"
createhome=no
shell=/bin/false
notify: forum | restart the forum service
- name: forum | create forum app dir
- name: create forum app dir
file: >
path="{{ forum_app_dir }}" state=directory
owner="{{ forum_user }}" group="{{ common_web_group }}"
notify: forum | restart the forum service
- name: forum | setup the forum env
- name: setup the forum env
template: >
src=forum_env.j2 dest={{ forum_app_dir }}/forum_env
owner={{ forum_user }} group={{ common_web_user }}
......
......@@ -28,34 +28,34 @@
---
- name: gh_mirror | install pip packages
- name: install pip packages
pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs
- name: gh_mirror | install debian packages
- name: install debian packages
apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present
update_cache=yes
- name: gh_mirror | create gh_mirror user
- name: create gh_mirror user
user: >
name={{ gh_mirror_user }}
state=present
- name: gh_mirror | create the gh_mirror data directory
- name: create the gh_mirror data directory
file: >
path={{ gh_mirror_data_dir }}
state=directory
owner={{ gh_mirror_user }}
group={{ gh_mirror_group }}
- name: gh_mirror | create the gh_mirror app directory
- name: create the gh_mirror app directory
file: >
path={{ gh_mirror_app_dir }}
state=directory
- name: gh_mirror | create org config
- name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts
......
......@@ -12,34 +12,34 @@
# - mark
- name: gh_users | creating default .bashrc
- name: creating default .bashrc
template: >
src=default.bashrc.j2 dest=/etc/skel/.bashrc
mode=0644 owner=root group=root
- name: gh_users | create gh group
- name: create gh group
group: name=gh state=present
# TODO: give limited sudo access to this group
- name: gh_users | grant full sudo access to gh group
- name: grant full sudo access to gh group
copy: >
content="%gh ALL=(ALL) NOPASSWD:ALL"
dest=/etc/sudoers.d/gh owner=root group=root
mode=0440 validate='visudo -cf %s'
- name: gh_users | create github users
- name: create github users
user:
name={{ item }} groups=gh
shell=/bin/bash
with_items: gh_users
- name: gh_users | create .ssh directory
- name: create .ssh directory
file:
path=/home/{{ item }}/.ssh state=directory mode=0700
owner={{ item }}
with_items: gh_users
- name: gh_users | copy github key[s] to .ssh/authorized_keys
- name: copy github key[s] to .ssh/authorized_keys
get_url:
url=https://github.com/{{ item }}.keys
dest=/home/{{ item }}/.ssh/authorized_keys mode=0600
......
---
# Install and configure simple glusterFS shared storage
- name: gluster | all | Install common packages
- name: Install common packages
apt: name={{ item }} state=present
with_items:
- glusterfs-client
......@@ -9,20 +9,20 @@
- nfs-common
tags: gluster
- name: gluster | all | Install server packages
- name: Install server packages
apt: name=glusterfs-server state=present
when: >
"{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}"
tags: gluster
- name: gluster | all | enable server
- name: enable server
service: name=glusterfs-server state=started enabled=yes
when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
tags: gluster
# Ignoring error below so that we can move the data folder and have it be a link
- name: gluster | all | create folders
- name: create folders
file: path={{ item.path }} state=directory
with_items: gluster_volumes
when: >
......@@ -30,39 +30,39 @@
ignore_errors: yes
tags: gluster
- name: gluster | primary | create peers
- name: create peers
command: gluster peer probe {{ item }}
with_items: gluster_peers
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: gluster | primary | create volumes
- name: create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here
tags: gluster
- name: gluster | primary | start volumes
- name: start volumes
command: gluster volume start {{ item.name }}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here
tags: gluster
- name: gluster | primary | set security
- name: set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: gluster | primary | set performance cache
- name: set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: gluster | all | mount volume
- name: mount volume
mount: >
name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }}
......@@ -74,7 +74,7 @@
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1
- name: gluster | all | sleep mount
- name: sleep mount
lineinfile: >
dest=/etc/rc.local
line='sleep 5; /bin/mount -a'
......
......@@ -14,11 +14,11 @@
# Overview:
#
#
- name: haproxy | restart haproxy
- name: restart haproxy
service: name=haproxy state=restarted
- name: haproxy | reload haproxy
- name: reload haproxy
service: name=haproxy state=reloaded
- name: haproxy | restart rsyslog
- name: restart rsyslog
service: name=rsyslog state=restarted
......@@ -17,26 +17,26 @@
# so it allows for a configuration template to be overriden
# with a variable
- name: haproxy | Install haproxy
- name: Install haproxy
apt: pkg=haproxy state={{ pkgs.haproxy.state }}
notify: haproxy | restart haproxy
- name: haproxy | Server configuration file
- name: Server configuration file
template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644
notify: haproxy | reload haproxy
- name: haproxy | Enabled in default
- name: Enabled in default
lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1
notify: haproxy | restart haproxy
- name: haproxy | install logrotate
- name: install logrotate
template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644
- name: haproxy | install rsyslog conf
- name: install rsyslog conf
template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644
notify: haproxy | restart rsyslog
- name: haproxy | make sure haproxy has started
- name: make sure haproxy has started
service: name=haproxy state=started
---
- name: jenkins_master | restart Jenkins
- name: restart Jenkins
service: name=jenkins state=restarted
- name: jenkins_master | start nginx
- name: start nginx
service: name=nginx state=started
- name: jenkins_master | reload nginx
- name: reload nginx
service: name=nginx state=reloaded
---
- name: jenkins_master | install jenkins specific system packages
- name: install jenkins specific system packages
apt:
pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
tags:
- jenkins
- name: jenkins_master | install jenkins extra system packages
- name: install jenkins extra system packages
apt:
pkg={{','.join(JENKINS_EXTRA_PKGS)}}
state=present update_cache=yes
tags:
- jenkins
- name: jenkins_master | create jenkins group
- name: create jenkins group
group: name={{ jenkins_group }} state=present
- name: jenkins_master | add the jenkins user to the group
- name: add the jenkins user to the group
user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }}
# Should be resolved in the next release, but until then we need to do this
# https://issues.jenkins-ci.org/browse/JENKINS-20407
- name: jenkins_master | workaround for JENKINS-20407
- name: workaround for JENKINS-20407
command: "mkdir -p /var/run/jenkins"
- name: jenkins_master | download Jenkins package
- name: download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: jenkins_master | install Jenkins package
- name: install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: jenkins_master | stop Jenkins
- name: stop Jenkins
service: name=jenkins state=stopped
# Move /var/lib/jenkins to Jenkins home (on the EBS)
- name: jenkins_master | move /var/lib/jenkins
- name: move /var/lib/jenkins
command: mv /var/lib/jenkins {{ jenkins_home }}
creates={{ jenkins_home }}
- name: jenkins_master | set owner for Jenkins home
- name: set owner for Jenkins home
file: path={{ jenkins_home }} recurse=yes state=directory
owner={{ jenkins_user }} group={{ jenkins_group }}
# Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins
# since Jenkins will expect its files to be in /var/lib/jenkins
- name: jenkins_master | symlink /var/lib/jenkins
- name: symlink /var/lib/jenkins
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
owner={{ jenkins_user }} group={{ jenkins_group }}
notify:
- jenkins_master | restart Jenkins
- name: jenkins_master | make plugins directory
- name: make plugins directory
sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins
- name: jenkins_master | download Jenkins plugins
- name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: "{{ jenkins_plugins }}"
- name: jenkins_master | install Jenkins plugins
- name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: "{{ jenkins_plugins }}"
- name: jenkins_master | set Jenkins plugin permissions
- name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_plugins }}"
......@@ -78,22 +78,22 @@
# certain issues. If these changes get merged
# upstream, we may be able to use the regular plugin install process.
# Until then, we compile and install the forks ourselves.
- name: jenkins_master | checkout custom plugin repo
- name: checkout custom plugin repo
git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
with_items: "{{ jenkins_custom_plugins }}"
- name: jenkins_master | compile custom plugins
- name: compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }}
with_items: "{{ jenkins_custom_plugins }}"
- name: jenkins_master | install custom plugins
- name: install custom plugins
command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }}
{{ jenkins_home }}/plugins/{{ item.package }}
with_items: "{{ jenkins_custom_plugins }}"
notify:
- jenkins_master | restart Jenkins
- name: jenkins_master | set custom plugin permissions
- name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_custom_plugins }}"
......@@ -103,17 +103,17 @@
# Jenkins will overwrite updated plugins with its built-in version
# unless we create a ".pinned" file for the plugin.
# See https://issues.jenkins-ci.org/browse/JENKINS-13129
- name: jenkins_master | create plugin pin files
- name: create plugin pin files
command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
with_items: "{{ jenkins_bundled_plugins }}"
- name: jenkins_master | setup nginix vhost
- name: setup nginix vhost
template:
src=etc/nginx/sites-available/jenkins.j2
dest=/etc/nginx/sites-available/jenkins
- name: jenkins_master | enable jenkins vhost
- name: enable jenkins vhost
file:
src=/etc/nginx/sites-available/jenkins
dest=/etc/nginx/sites-enabled/jenkins
......
......@@ -3,7 +3,7 @@
# Will terminate an instance if one and only one already exists
# with the same name
- name: launch_ec2 | lookup tags for terminating existing instance
- name: lookup tags for terminating existing instance
local_action:
module: ec2_lookup
region: "{{ region }}"
......@@ -12,7 +12,7 @@
register: tag_lookup
when: terminate_instance == true
- name: launch_ec2 | checking for other instances
- name: checking for other instances
debug: msg="Too many results returned, not terminating!"
when: terminate_instance == true and tag_lookup.instance_ids|length > 1
......@@ -34,7 +34,7 @@
state: absent
when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1
- name: launch_ec2 | Launch ec2 instance
- name: Launch ec2 instance
local_action:
module: ec2_local
keypair: "{{ keypair }}"
......@@ -49,7 +49,7 @@
instance_profile_name: "{{ instance_profile_name }}"
register: ec2
- name: launch_ec2 | Add DNS name
- name: Add DNS name
local_action:
module: route53
overwrite: yes
......@@ -61,7 +61,7 @@
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name studio
- name: Add DNS name studio
local_action:
module: route53
overwrite: yes
......@@ -73,7 +73,7 @@
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name preview
- name: Add DNS name preview
local_action:
module: route53
overwrite: yes
......@@ -86,14 +86,14 @@
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add new instance to host group
- name: Add new instance to host group
local_action: >
add_host
hostname={{ item.public_ip }}
groupname=launched
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Wait for SSH to come up
- name: Wait for SSH to come up
local_action: >
wait_for
host={{ item.public_dns_name }}
......
......@@ -16,14 +16,14 @@
- fail: msg="secure_dir not defined. This is a path to the secure ora config file."
when: secure_dir is not defined
- name: legacy_ora | create ora application config
- name: create ora application config
copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json
sudo_user: "{{ ora_user }}"
register: env_state
- name: legacy_ora | create ora auth file
- name: create ora auth file
copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json
......@@ -31,13 +31,13 @@
register: auth_state
# Restart ORA Services
- name: legacy_ora | restart edx-ora
- name: restart edx-ora
service:
name=edx-ora
state=restarted
when: env_state.changed or auth_state.changed
- name: legacy_ora | restart edx-ora-celery
- name: restart edx-ora-celery
service:
name=edx-ora-celery
state=restarted
......
---
- name: local_dev | install useful system packages
- name: install useful system packages
apt:
pkg={{','.join(local_dev_pkgs)}} install_recommends=yes
state=present update_cache=yes
- name: local_dev | set login shell for app accounts
- name: set login shell for app accounts
user: name={{ item.user }} shell="/bin/bash"
with_items: "{{ localdev_accounts }}"
# Ensure forum user has permissions to access .gem and .rbenv
# This is a little twisty: the forum role sets the owner and group to www-data
# So we add the forum user to the www-data group and give group write permissions
- name: local_dev | add forum user to www-data group
- name: add forum user to www-data group
user: name={{ forum_user }} groups={{ common_web_group }} append=yes
- name: local_dev | set forum rbenv and gem permissions
- name: set forum rbenv and gem permissions
file:
path={{ item }} state=directory mode=770
with_items:
......@@ -22,7 +22,7 @@
- "{{ forum_app_dir }}/.rbenv"
# Create scripts to configure environment
- name: local_dev | create login scripts
- name: create login scripts
template:
src=app_bashrc.j2 dest={{ item.home }}/.bashrc
owner={{ item.user }} mode=755
......@@ -30,24 +30,24 @@
# Default to the correct git config
# No more accidentally force pushing to master! :)
- name: local_dev | configure git
- name: configure git
copy:
src=gitconfig dest={{ item.home }}/.gitconfig
owner={{ item.user }} mode=700
with_items: "{{ localdev_accounts }}"
# Configure X11 for application users
- name: local_dev | preserve DISPLAY for sudo
- name: preserve DISPLAY for sudo
copy:
src=x11_display dest=/etc/sudoers.d/x11_display
owner=root group=root mode=0440
- name: local_dev | login share X11 auth to app users
- name: login share X11 auth to app users
template:
src=share_x11.j2 dest={{ localdev_home }}/share_x11
owner={{ localdev_user }} mode=0700
- name: local_dev | update bashrc with X11 share script
- name: update bashrc with X11 share script
lineinfile:
dest={{ localdev_home }}/.bashrc
regexp=". {{ localdev_home }}/share_x11"
......
---
- name: mongo | install python pymongo for mongo_user ansible module
- name: install python pymongo for mongo_user ansible module
pip: >
name=pymongo state=present
version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: mongo | add the mongodb signing key
- name: add the mongodb signing key
apt_key: >
id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc
state=present
- name: mongo | add the mongodb repo to the sources list
- name: add the mongodb repo to the sources list
apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
state=present
- name: mongo | install mongo server and recommends
- name: install mongo server and recommends
apt: >
pkg=mongodb-10gen={{ mongo_version }}
state=present install_recommends=yes
update_cache=yes
- name: mongo | create mongo dirs
- name: create mongo dirs
file: >
path="{{ item }}" state=directory
owner="{{ mongo_user }}"
......@@ -32,14 +32,14 @@
- "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}"
- name: mongo | stop mongo service
- name: stop mongo service
service: name=mongodb state=stopped
- name: mongo | move mongodb to {{ mongo_data_dir }}
- name: move mongodb to {{ mongo_data_dir }}
command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb
- name: mongo | copy mongodb key file
- name: copy mongodb key file
copy: >
src={{ secure_dir }}/files/mongo_key
dest={{ mongo_key_file }}
......@@ -48,27 +48,27 @@
group=mongodb
when: MONGO_CLUSTERED
- name: mongo | copy configuration template
- name: copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes
notify: restart mongo
- name: mongo | start mongo service
- name: start mongo service
service: name=mongodb state=started
- name: mongo | wait for mongo server to start
- name: wait for mongo server to start
wait_for: port=27017 delay=2
- name: mongo | Create the file to initialize the mongod replica set
- name: Create the file to initialize the mongod replica set
template: src=repset_init.j2 dest=/tmp/repset_init.js
when: MONGO_CLUSTERED
- name: mongo | Initialize the replication set
- name: Initialize the replication set
shell: /usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED
# Ignore errors doesn't work because the module throws an exception
# it doesn't catch.
- name: mongo | create a mongodb user
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
......
---
- name: nginx | restart nginx
- name: restart nginx
service: name=nginx state=restarted
- name: nginx | reload nginx
- name: reload nginx
service: name=nginx state=reloaded
......@@ -2,7 +2,7 @@
# - common/tasks/main.yml
---
- name: nginx | create nginx app dirs
- name: create nginx app dirs
file: >
path="{{ item }}"
state=directory
......@@ -14,7 +14,7 @@
- "{{ nginx_sites_enabled_dir }}"
notify: nginx | restart nginx
- name: nginx | create nginx data dirs
- name: create nginx data dirs
file: >
path="{{ item }}"
state=directory
......@@ -25,37 +25,37 @@
- "{{ nginx_log_dir }}"
notify: nginx | restart nginx
- name: nginx | Install nginx packages
- name: Install nginx packages
apt: pkg={{','.join(nginx_debian_pkgs)}} state=present
notify: nginx | restart nginx
- name: nginx | Server configuration file
- name: Server configuration file
template: >
src=nginx.conf.j2 dest=/etc/nginx/nginx.conf
owner=root group={{ common_web_user }} mode=0644
notify: nginx | reload nginx
- name: nginx | Creating common nginx configuration
- name: Creating common nginx configuration
template: >
src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release
owner=root group=root mode=0600
notify: nginx | reload nginx
- name: nginx | Creating link for common nginx configuration
- name: Creating link for common nginx configuration
file: >
src={{ nginx_sites_available_dir }}/edx-release
dest={{ nginx_sites_enabled_dir }}/edx-release
state=link owner=root group=root
notify: nginx | reload nginx
- name: nginx | Copying nginx configs for {{ nginx_sites }}
- name: Copying nginx configs for {{ nginx_sites }}
template: >
src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: nginx | reload nginx
with_items: nginx_sites
- name: nginx | Creating nginx config links for {{ nginx_sites }}
- name: Creating nginx config links for {{ nginx_sites }}
file: >
src={{ nginx_sites_available_dir }}/{{ item }}
dest={{ nginx_sites_enabled_dir }}/{{ item }}
......@@ -63,26 +63,26 @@
notify: nginx | reload nginx
with_items: nginx_sites
- name: nginx | Write out htpasswd file
- name: Write out htpasswd file
htpasswd: >
name={{ NGINX_HTPASSWD_USER }}
password={{ NGINX_HTPASSWD_PASS }}
path={{ nginx_htpasswd_file }}
when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS
- name: nginx | Create nginx log file location (just in case)
- name: Create nginx log file location (just in case)
file: >
path={{ nginx_log_dir}} state=directory
owner={{ common_web_user }} group={{ common_web_user }}
- name: nginx | copy ssl cert
- name: copy ssl cert
copy: >
src={{ NGINX_SSL_CERTIFICATE }}
dest=/etc/ssl/certs/{{ item|basename }}
owner=root group=root mode=0644
when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem'
- name: nginx | copy ssl key
- name: copy ssl key
copy: >
src={{ NGINX_SSL_KEY }}
dest=/etc/ssl/private/{{ item|basename }}
......@@ -91,18 +91,18 @@
# removing default link
- name: nginx | Removing default nginx config and restart (enabled)
- name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: nginx | reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
- name: nginx | Set up nginx access log rotation
- name: Set up nginx access log rotation
template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2
owner=root group=root mode=644
- name: nginx | Set up nginx access log rotation
- name: Set up nginx access log rotation
template: >
dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2
owner=root group=root mode=644
......@@ -110,10 +110,10 @@
# If tasks that notify restart nginx don't change the state of the remote system
# their corresponding notifications don't get run. If nginx has been stopped for
# any reason, this will ensure that it is started up again.
- name: nginx | make sure nginx has started
- name: make sure nginx has started
service: name=nginx state=started
when: start_services
- name: nginx | make sure nginx has stopped
- name: make sure nginx has stopped
service: name=nginx state=stopped
when: not start_services
---
- name: notifier | restart notifier-scheduler
- name: restart notifier-scheduler
supervisorctl_local: >
name=notifier-scheduler
state=restarted
config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }}
- name: notifier | restart notifier-celery-workers
- name: restart notifier-celery-workers
supervisorctl_local: >
name=notifier-celery-workers
state=restarted
......
......@@ -17,82 +17,82 @@
# - common
# - notifier
#
- name: notifier | install notifier specific system packages
- name: install notifier specific system packages
apt: pkg={{','.join(notifier_debian_pkgs)}} state=present
- name: notifier | check if incommon ca is installed
- name: check if incommon ca is installed
command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt
register: incommon_present
ignore_errors: yes
- name: common | create incommon ca directory
- name: create incommon ca directory
file:
path="/usr/share/ca-certificates/incommon" mode=2775 state=directory
when: incommon_present|failed
- name: common | retrieve incommon server CA
- name: retrieve incommon server CA
shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt
when: incommon_present|failed
- name: common | add InCommon ca cert
- name: add InCommon ca cert
lineinfile:
dest=/etc/ca-certificates.conf
regexp='incommon/InCommonServerCA.crt'
line='incommon/InCommonServerCA.crt'
- name: common | update ca certs globally
- name: update ca certs globally
shell: update-ca-certificates
- name: notifier | create notifier user {{ NOTIFIER_USER }}
- name: create notifier user {{ NOTIFIER_USER }}
user:
name={{ NOTIFIER_USER }} state=present shell=/bin/bash
home={{ NOTIFIER_HOME }} createhome=yes
- name: notifier | setup the notifier env
- name: setup the notifier env
template:
src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env
owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}"
- name: notifier | drop a bash_profile
- name: drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ NOTIFIER_HOME }}/.bash_profile
owner={{ NOTIFIER_USER }}
group={{ NOTIFIER_USER }}
- name: notifier | ensure .bashrc exists
- name: ensure .bashrc exists
shell: touch {{ NOTIFIER_HOME }}/.bashrc
sudo: true
sudo_user: "{{ NOTIFIER_USER }}"
- name: notifier | add source of notifier_env to .bashrc
- name: add source of notifier_env to .bashrc
lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_HOME }}/notifier_env'
line='. {{ NOTIFIER_HOME }}/notifier_env'
- name: notifier | add source venv to .bashrc
- name: add source venv to .bashrc
lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: notifier | create notifier DB directory
- name: create notifier DB directory
file:
path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
- name: notifier | create notifier/bin directory
- name: create notifier/bin directory
file:
path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | supervisord config for celery workers
- name: supervisord config for celery workers
template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-celery-workers
- name: notifier | supervisord config for scheduler
- name: supervisord config for scheduler
template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
......
---
- name: ora | restart ora
- name: restart ora
supervisorctl_local: >
name=ora
supervisorctl_path={{ supervisor_ctl }}
......@@ -7,7 +7,7 @@
state=restarted
when: start_services and ora_installed is defined and not devstack
- name: ora | restart ora_celery
- name: restart ora_celery
supervisorctl_local: >
name=ora_celery
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -3,7 +3,7 @@
# - common/tasks/main.yml
---
- name: ora | create application user
- name: create application user
user: >
name="{{ ora_user }}" home="{{ ora_app_dir }}"
createhome=no shell=/bin/false
......@@ -11,7 +11,7 @@
- ora | restart ora
- ora | restart ora_celery
- name: ora | create ora app dir
- name: create ora app dir
file: >
path="{{ item }}" state=directory
owner="{{ ora_user }}" group="{{ common_web_group }}"
......@@ -22,7 +22,7 @@
- "{{ ora_venvs_dir }}"
- "{{ ora_app_dir }}"
- name: ora | create ora data dir, owned by {{ common_web_user }}
- name: create ora data dir, owned by {{ common_web_user }}
file: >
path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ common_web_group }}"
......@@ -34,14 +34,14 @@
- "{{ ora_data_course_dir }}"
- "{{ ora_app_dir }}/ml_models"
- name: ora | install debian packages that ora needs
- name: install debian packages that ora needs
apt: pkg={{item}} state=present
notify:
- ora | restart ora
- ora | restart ora_celery
with_items: ora_debian_pkgs
- name: ora | install debian packages for ease that ora needs
- name: install debian packages for ease that ora needs
apt: pkg={{item}} state=present
notify:
- ora | restart ora
......
......@@ -12,12 +12,12 @@
# - common
# - oraclejdk
- name: oraclejdk | check for Oracle Java version {{ oraclejdk_base }}
- name: check for Oracle Java version {{ oraclejdk_base }}
command: test -d /usr/lib/jvm/{{ oraclejdk_base }}
ignore_errors: true
register: oraclejdk_present
- name: oraclejdk | download Oracle Java
- name: download Oracle Java
shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }}
executable=/bin/bash
......@@ -25,7 +25,7 @@
creates=/var/tmp/{{ oraclejdk_file }}
when: oraclejdk_present|failed
- name: oraclejdk | install Oracle Java
- name: install Oracle Java
shell: >
mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
creates=/usr/lib/jvm/{{ oraclejdk_base }}
......@@ -34,10 +34,10 @@
sudo: true
when: oraclejdk_present|failed
- name: oraclejdk | create symlink expected by elasticsearch
- name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed
- name: oraclejdk | add JAVA_HOME for Oracle Java
- name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed
......@@ -3,80 +3,80 @@
# There is a bug with initializing multiple nodes in the HA cluster at once
# http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html
- name: rabbitmq | trust rabbit repository
- name: trust rabbit repository
apt_key: url={{rabbitmq_apt_key}} state=present
- name: rabbitmq | install python-software-properties if debian
- name: install python-software-properties if debian
apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present
- name: rabbitmq | add rabbit repository
- name: add rabbit repository
apt_repository: repo="{{rabbitmq_repository}}" state=present
- name: rabbitmq | install rabbitmq
- name: install rabbitmq
apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes
- name: rabbitmq | stop rabbit cluster
- name: stop rabbit cluster
service: name=rabbitmq-server state=stopped
# in case there are lingering processes, ignore errors
# silently
- name: rabbitmq | send sigterm to any running rabbitmq processes
- name: send sigterm to any running rabbitmq processes
shell: pkill -u rabbitmq || true
# Defaulting to /var/lib/rabbitmq
- name: rabbitmq | create cookie directory
- name: create cookie directory
file: >
path={{rabbitmq_cookie_dir}}
owner=rabbitmq group=rabbitmq mode=0755 state=directory
- name: rabbitmq | add rabbitmq erlang cookie
- name: add rabbitmq erlang cookie
template: >
src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}}
owner=rabbitmq group=rabbitmq mode=0400
register: erlang_cookie
# Defaulting to /etc/rabbitmq
- name: rabbitmq | create rabbitmq config directory
- name: create rabbitmq config directory
file: >
path={{rabbitmq_config_dir}}
owner=root group=root mode=0755 state=directory
- name: rabbitmq | add rabbitmq environment configuration
- name: add rabbitmq environment configuration
template: >
src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf
owner=root group=root mode=0644
- name: rabbitmq | add rabbitmq cluster configuration
- name: add rabbitmq cluster configuration
template: >
src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config
owner=root group=root mode=0644
register: cluster_configuration
- name: rabbitmq | install plugins
- name: install plugins
rabbitmq_plugin:
names={{",".join(rabbitmq_plugins)}} state=enabled
# When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'.
# This folder should be deleted before clustering is setup because it retains data
# that can conflict with the clustering information.
- name: rabbitmq | remove mnesia configuration
- name: remove mnesia configuration
file: path={{rabbitmq_mnesia_folder}} state=absent
when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh
- name: rabbitmq | start rabbit nodes
- name: start rabbit nodes
service: name=rabbitmq-server state=restarted
- name: rabbitmq | wait for rabbit to start
- name: wait for rabbit to start
wait_for: port={{ rabbitmq_management_port }} delay=2
- name: rabbitmq | remove guest user
- name: remove guest user
rabbitmq_user: user="guest" state=absent
- name: rabbitmq | add vhosts
- name: add vhosts
rabbitmq_vhost: name={{ item }} state=present
with_items: RABBITMQ_VHOSTS
- name: rabbitmq | add admin users
- name: add admin users
rabbitmq_user: >
user='{{item[0].name}}' password='{{item[0].password}}'
read_priv='.*' write_priv='.*'
......@@ -87,23 +87,23 @@
- RABBITMQ_VHOSTS
when: "'admins' in rabbitmq_auth_config"
- name: rabbitmq | make queues mirrored
- name: make queues mirrored
shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'"
when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1
#
# Depends upon the management plugin
#
- name: rabbitmq | install admin tools
- name: install admin tools
get_url: >
url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin
dest=/usr/local/bin/rabbitmqadmin
- name: rabbitmq | ensure rabbitmqadmin attributes
- name: ensure rabbitmqadmin attributes
file: >
path=/usr/local/bin/rabbitmqadmin owner=root
group=root mode=0655
- name: rabbitmq | stop rabbit nodes
- name: stop rabbit nodes
service: name=rabbitmq-server state=restarted
when: not start_services
......@@ -34,95 +34,95 @@
- fail: rbenv_ruby_version required for role
when: rbenv_ruby_version is not defined
- name: rbenv | create rbenv user {{ rbenv_user }}
- name: create rbenv user {{ rbenv_user }}
user: >
name={{ rbenv_user }} home={{ rbenv_dir }}
shell=/bin/false createhome=no
when: rbenv_user != common_web_user
- name: rbenv | create rbenv dir if it does not exist
- name: create rbenv dir if it does not exist
file: >
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}"
state=directory
- name: rbenv | install build depends
- name: install build depends
apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no
with_items: rbenv_debian_pkgs
- name: rbenv | update rbenv repo
- name: update rbenv repo
git: >
repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
sudo_user: "{{ rbenv_user }}"
- name: rbenv | ensure ruby_env exists
- name: ensure ruby_env exists
template: >
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env
sudo_user: "{{ rbenv_user }}"
- name: rbenv | check ruby-build installed
- name: check ruby-build installed
command: test -x /usr/local/bin/ruby-build
register: rbuild_present
ignore_errors: yes
- name: rbenv | if ruby-build exists, which versions we can install
- name: if ruby-build exists, which versions we can install
command: /usr/local/bin/ruby-build --definitions
when: rbuild_present|success
register: installable_ruby_vers
ignore_errors: yes
### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version
- name: rbenv | create temporary directory
- name: create temporary directory
command: mktemp -d
register: tempdir
sudo_user: "{{ rbenv_user }}"
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | clone ruby-build repo
- name: clone ruby-build repo
git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
sudo_user: "{{ rbenv_user }}"
- name: rbenv | install ruby-build
- name: install ruby-build
command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | remove temporary directory
- name: remove temporary directory
file: path={{ tempdir.stdout }} state=absent
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | check ruby {{ rbenv_ruby_version }} installed
- name: check ruby {{ rbenv_ruby_version }} installed
shell: "rbenv versions | grep {{ rbenv_ruby_version }}"
register: ruby_installed
sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}"
ignore_errors: yes
- name: rbenv | install ruby {{ rbenv_ruby_version }}
- name: install ruby {{ rbenv_ruby_version }}
shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}"
when: ruby_installed|failed
sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}"
- name: rbenv | set global ruby {{ rbenv_ruby_version }}
- name: set global ruby {{ rbenv_ruby_version }}
shell: "rbenv global {{ rbenv_ruby_version }}"
sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}"
- name: rbenv | install bundler
- name: install bundler
shell: "gem install bundler -v {{ rbenv_bundler_version }}"
sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}"
- name: rbenv | remove rbenv version of rake
- name: remove rbenv version of rake
file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent
- name: rbenv | install rake gem
- name: install rake gem
shell: "gem install rake -v {{ rbenv_rake_version }}"
sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}"
- name: rbenv | rehash
- name: rehash
shell: "rbenv rehash"
sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}"
......@@ -53,37 +53,37 @@
# - s3fs
#
- name: s3fs | install system packages
- name: install system packages
apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present
tags:
- s3fs
- install
- update
- name: s3fs | fetch package
- name: fetch package
get_url:
url={{ s3fs_download_url }}
dest={{ s3fs_temp_dir }}
- name: s3fs | extract package
- name: extract package
shell:
/bin/tar -xzf {{ s3fs_archive }}
chdir={{ s3fs_temp_dir }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure
- name: s3fs | configure
- name: configure
shell:
./configure
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status
- name: s3fs | make
- name: make
shell:
/usr/bin/make
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd
- name: s3fs | make install
- name: make install
shell:
/usr/bin/make install
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
......
---
- name: shibboleth | restart shibd
- name: restart shibd
service: name=shibd state=restarted
#Install shibboleth
---
- name: shibboleth | Installs shib and dependencies from apt
- name: Installs shib and dependencies from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items:
- shibboleth-sp2-schemas
......@@ -14,19 +14,19 @@
- shib
- install
- name: shibboleth | Creates /etc/shibboleth/metadata directory
- name: Creates /etc/shibboleth/metadata directory
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
tags:
- shib
- install
- name: shibboleth | Downloads metadata into metadata directory as backup
- name: Downloads metadata into metadata directory as backup
get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd
tags:
- shib
- install
- name: shibboleth | writes out key and pem file
- name: writes out key and pem file
template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600
with_items:
- key
......@@ -36,7 +36,7 @@
- shib
- install
- name: shibboleth | writes out configuration files
- name: writes out configuration files
template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
with_items:
- attribute-map.xml
......@@ -46,7 +46,7 @@
- shib
- install
- name: shibboleth | enables shib
- name: enables shib
command: a2enmod shib2
notify: shibboleth | restart shibd
tags:
......
......@@ -16,5 +16,5 @@
#
# Restart Splunk
- name: splunkforwarder | restart splunkforwarder
- name: restart splunkforwarder
service: name=splunk state=restarted
......@@ -22,44 +22,44 @@
#
# Install Splunk Forwarder
- name: splunkforwarder| install splunkforwarder specific system packages
- name: install splunkforwarder specific system packages
apt: pkg={{','.join(splunk_debian_pkgs)}} state=present
tags:
- splunk
- install
- update
- name: splunkforwarder | download the splunk deb
- name: download the splunk deb
get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}"
register: download_deb
- name: splunkforwarder | install splunk forwarder
- name: install splunk forwarder
shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}}
when: download_deb.changed
# Create splunk user
- name: splunkforwarder | create splunk user
- name: create splunk user
user: name=splunk createhome=no state=present append=yes groups=syslog
when: download_deb.changed
# Need to start splunk manually so that it can create various files
# and directories that aren't created till the first run and are needed
# to run some of the below commands.
- name: splunkforwarder | start splunk manually
- name: start splunk manually
shell: >
{{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{splunkforwarder_output_dir}}/var/lib/splunk
when: download_deb.changed
register: started_manually
- name: splunkforwarder | stop splunk manually
- name: stop splunk manually
shell: >
{{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt
when: download_deb.changed and started_manually.changed
- name: splunkforwarder | create boot script
- name: create boot script
shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk
......@@ -68,24 +68,24 @@
notify: splunkforwarder | restart splunkforwarder
# Update credentials
- name: splunkforwarder | update admin pasword
- name: update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
- name: splunkforwarder | add chkconfig to init script
- name: add chkconfig to init script
shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk'
when: download_deb.changed and create_boot_script.changed
notify: splunkforwarder | restart splunkforwarder
# Ensure permissions on splunk content
- name: splunkforwarder | ensure splunk forder permissions
- name: ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
# Drop template files.
- name: splunkforwarder | drop input configuration
- name: drop input configuration
template:
src=opt/splunkforwarder/etc/system/local/inputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/inputs.conf
......@@ -94,7 +94,7 @@
mode=644
notify: splunkforwarder | restart splunkforwarder
- name: splunkforwarder | create outputs config file
- name: create outputs config file
template:
src=opt/splunkforwarder/etc/system/local/outputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/outputs.conf
......
......@@ -50,19 +50,19 @@
# supervisor_service: upstart-service-name
#
---
- name: supervisor | create application user
- name: create application user
user: >
name="{{ supervisor_user }}"
createhome=no
shell=/bin/false
- name: supervisor | create supervisor service user
- name: create supervisor service user
user: >
name="{{ supervisor_service_user }}"
createhome=no
shell=/bin/false
- name: supervisor | create supervisor directories
- name: create supervisor directories
file: >
name={{ item }}
state=directory
......@@ -73,7 +73,7 @@
- "{{ supervisor_venv_dir }}"
- "{{ supervisor_cfg_dir }}"
- name: supervisor | create supervisor directories
- name: create supervisor directories
file: >
name={{ item }}
state=directory
......@@ -84,29 +84,29 @@
- "{{ supervisor_log_dir }}"
- name: supervisor | install supervisor in its venv
- name: install supervisor in its venv
pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
sudo_user: "{{ supervisor_user }}"
- name: supervisor | create supervisor upstart job
- name: create supervisor upstart job
template: >
src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf
owner=root group=root
- name: supervisor | create supervisor master config
- name: create supervisor master config
template: >
src=supervisord.conf.j2 dest={{ supervisor_cfg }}
owner={{ supervisor_user }} group={{ supervisor_service_user }}
mode=0644
- name: supervisor | create a symlink for supervisortctl
- name: create a symlink for supervisortctl
file: >
src={{ supervisor_ctl }}
dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }}
state=link
when: supervisor_service == "supervisor"
- name: supervisor | create a symlink for supervisor cfg
- name: create a symlink for supervisor cfg
file: >
src={{ item }}
dest={{ COMMON_CFG_DIR }}/{{ item|basename }}
......@@ -116,7 +116,7 @@
- "{{ supervisor_cfg }}"
- "{{ supervisor_cfg_dir }}"
- name: supervisor | start supervisor
- name: start supervisor
service: >
name={{supervisor_service}}
state=started
......@@ -124,7 +124,7 @@
# calling update on supervisor too soon after it
# starts will result in an errror.
- name: supervisor | wait for web port to be available
- name: wait for web port to be available
wait_for: port={{ supervisor_http_bind_port }} timeout=5
when: start_supervisor.changed
......@@ -134,7 +134,7 @@
# we don't use notifications for supervisor because
# they don't work well with parameterized roles.
# See https://github.com/ansible/ansible/issues/4853
- name: supervisor | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
- name: xqueue | restart xqueue
- name: restart xqueue
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -6,7 +6,7 @@
#
#
- name: xqueue | create application user
- name: create application user
user: >
name="{{ xqueue_user }}"
home="{{ xqueue_app_dir }}"
......@@ -15,7 +15,7 @@
notify:
- xqueue | restart xqueue
- name: xqueue | create xqueue app and venv dir
- name: create xqueue app and venv dir
file: >
path="{{ item }}"
state=directory
......@@ -27,12 +27,12 @@
- "{{ xqueue_app_dir }}"
- "{{ xqueue_venvs_dir }}"
- name: xqueue | install a bunch of system packages on which xqueue relies
- name: install a bunch of system packages on which xqueue relies
apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present
notify:
- xqueue | restart xqueue
- name: xqueue | create xqueue db
- name: create xqueue db
mysql_db: >
name={{xqueue_auth_config.DATABASES.default.NAME}}
login_host={{xqueue_auth_config.DATABASES.default.HOST}}
......
......@@ -14,7 +14,7 @@
# Overview:
#
- name: xserver | restart xserver
- name: restart xserver
supervisorctl_local: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -3,28 +3,28 @@
# access to the edX 6.00x repo which is not public
---
- name: xserver | checking for grader info
- name: checking for grader info
fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!"
when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE
- name: xserver | checking for git identity
- name: checking for git identity
fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role"
when: not XSERVER_LOCAL_GIT_IDENTITY
- name: xserver | create application user
- name: create application user
user: >
name="{{ xserver_user }}"
home="{{ xserver_app_dir }}"
createhome=no
shell=/bin/false
- name: xserver | create application sandbox user
- name: create application sandbox user
user: >
name="{{ xserver_sandbox_user }}"
createhome=no
shell=/bin/false
- name: xserver | create xserver app and data dirs
- name: create xserver app and data dirs
file: >
path="{{ item }}"
state=directory
......@@ -36,27 +36,27 @@
- "{{ xserver_data_dir }}"
- "{{ xserver_data_dir }}/data"
- name: xserver | create sandbox sudoers file
- name: create sandbox sudoers file
template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440
# Make sure this line is in the common-session file.
- name: xserver | ensure pam-limits module is loaded
- name: ensure pam-limits module is loaded
lineinfile:
dest=/etc/pam.d/common-session
regexp="session required pam_limits.so"
line="session required pam_limits.so"
- name: xserver | set sandbox limits
- name: set sandbox limits
template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf
first_available_file:
- "{{ secure_dir }}/sandbox.conf.j2"
- "sandbox.conf.j2"
- name: xserver | install system dependencies of xserver
- name: install system dependencies of xserver
apt: pkg={{ item }} state=present
with_items: xserver_debian_pkgs
- name: xserver | load python-sandbox apparmor profile
- name: load python-sandbox apparmor profile
template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox
first_available_file:
- "{{ secure_dir }}/files/edx_apparmor_sandbox.j2"
......
Jinja2==2.7.1
MarkupSafe==0.18
ansible==1.4.4
PyYAML==3.10
ansible==1.3.2
Jinja2==2.7.2
MarkupSafe==0.18
argparse==1.2.1
boto==2.10.0
boto==2.23.0
ecdsa==0.10
paramiko==1.12.0
pycrypto==2.6.1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment