Commit 18a3e442 by John Jarvis

removing more role labels

parent 623f20c9
#
# TODO: Needed while this repo is private
#
- name: analytics-server | upload ssh script
- name: upload ssh script
template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750
......@@ -13,7 +13,7 @@
#
# TODO: Needed while this repo is private
#
- name: analytics-server | install read-only ssh key required for checkout
- name: install read-only ssh key required for checkout
copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
......@@ -22,14 +22,14 @@
- install
- update
- name: analytics-server | checkout code
- name: checkout code
git:
dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true
environment:
GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service
notify: analytics-server | start the analytics service
notify: restart the analytics service
notify: start the analytics service
tags:
- analytics-server
- install
......@@ -38,7 +38,7 @@
#
# TODO: Needed while this repo is private
#
- name: analytics-server | update src permissions
- name: update src permissions
file:
path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes
......@@ -50,7 +50,7 @@
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove read-only ssh key for the content repo
- name: remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent
tags:
- analytics-server
......@@ -60,20 +60,20 @@
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove ssh script
- name: remove ssh script
file: path={{ as_git_ssh }} state=absent
tags:
- analytics-server
- install
- update
- name: analytics-server | install application requirements
- name: install application requirements
pip:
requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present
sudo: true
sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service
notify: start the analytics service
tags:
- analytics-server
- install
......
......@@ -71,7 +71,7 @@
group={{ as_user }}
# Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists
#- name: ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch
# sudo: true
# sudo_user: "{{ as_user }}"
......
#
# TODO: Needed while this repo is private
#
- name: analytics | upload ssh script
- name: upload ssh script
template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750
......@@ -13,7 +13,7 @@
#
# TODO: Needed while this repo is private
#
- name: analytics | install read-only ssh key required for checkout
- name: install read-only ssh key required for checkout
copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
......@@ -22,14 +22,14 @@
- install
- update
- name: analytics | checkout code
- name: checkout code
git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true
environment:
GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service
notify: analytics | start the analytics service
notify: restart the analytics service
notify: start the analytics service
tags:
- analytics
- install
......@@ -38,7 +38,7 @@
#
# TODO: Needed while this repo is private
#
- name: analytics | update src permissions
- name: update src permissions
file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes
......@@ -50,7 +50,7 @@
#
# TODO: Needed while this repo is private
#
- name: analytics | remove read-only ssh key for the content repo
- name: remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent
tags:
- analytics
......@@ -60,20 +60,20 @@
#
# TODO: Needed while this repo is private
#
- name: analytics | remove ssh script
- name: remove ssh script
file: path={{ analytics_git_ssh }} state=absent
tags:
- analytics
- install
- update
- name: analytics | install application requirements
- name: install application requirements
pip:
requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present
sudo: true
sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service
notify: start the analytics service
tags:
- analytics
- install
......
......@@ -71,7 +71,7 @@
group={{ analytics_user }}
# Awaiting next ansible release.
#- name: analytics | ensure .bashrc exists
#- name: ensure .bashrc exists
# file: path={{ analytics_home }}/.bashrc state=touch
# sudo: true
# sudo_user: "{{ analytics_user }}"
......
# Requires nginx package
---
- name: apache | Copying apache config {{ site_name }}
- name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2"
notify: apache | restart apache
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
- name: apache | Creating apache2 config link {{ site_name }}
- name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: apache | restart apache
notify: restart apache
when: apache_role_run is defined
tags:
- apache
......
......@@ -6,25 +6,25 @@
with_items:
- apache2
- libapache2-mod-wsgi
notify: apache | restart apache
notify: restart apache
tags:
- apache
- install
- name: disables default site
command: a2dissite 000-default
notify: apache | restart apache
notify: restart apache
tags:
- apache
- install
- name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: apache | restart apache
notify: restart apache
tags:
- apache
- install
- name: Register the fact that apache role has run
command: echo True
register: apache_role_run
......
---
- name: certs | create certificate application config
- name: create certificate application config
template: >
src=certs.env.json.j2
dest={{ certs_app_dir }}/env.json
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
notify: restart certs
- name: certs | create certificate auth file
- name: create certificate auth file
template: >
src=certs.auth.json.j2
dest={{ certs_app_dir }}/auth.json
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
notify: restart certs
- name: certs | writing supervisor script for certificates
- name: writing supervisor script for certificates
template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644
notify: certs | restart certs
notify: restart certs
- name: certs | create ssh script for git
- name: create ssh script for git
template: >
src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }}
owner={{ certs_user }} mode=750
notify: certs | restart certs
notify: restart certs
- name: certs | install read-only ssh key for the certs repo
- name: install read-only ssh key for the certs repo
copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600
notify: certs | restart certs
notify: restart certs
- name: certs | checkout certificates repo into {{ certs_code_dir }}
- name: checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }}
sudo_user: "{{ certs_user }}"
environment:
GIT_SSH: "{{ certs_git_ssh }}"
notify: certs | restart certs
notify: restart certs
- name: certs | remove read-only ssh key for the certs repo
- name: remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent
notify: certs | restart certs
notify: restart certs
- name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
notify: restart certs
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: certs | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: start_services
- name: certs | ensure certs has started
- name: ensure certs has started
supervisorctl_local: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
......@@ -69,12 +69,12 @@
sudo_user: "{{ supervisor_service_user }}"
when: start_services
- name: certs | create a symlink for venv python
- name: create a symlink for venv python
file: >
src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link
notify: certs | restart certs
notify: restart certs
with_items:
- python
- pip
......
......@@ -41,7 +41,7 @@
home="{{ certs_app_dir }}"
createhome=no
shell=/bin/false
notify: certs | restart certs
notify: restart certs
- name: create certs app and data dirs
file: >
......@@ -49,7 +49,7 @@
state=directory
owner="{{ certs_user }}"
group="{{ common_web_group }}"
notify: certs | restart certs
notify: restart certs
with_items:
- "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}"
......@@ -59,14 +59,14 @@
path="{{ certs_gpg_dir }}" state=directory
owner="{{ common_web_user }}"
mode=0700
notify: certs | restart certs
notify: restart certs
- name: copy the private gpg signing key
copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
owner={{ common_web_user }} mode=0600
notify: certs | restart certs
notify: restart certs
register: certs_gpg_key
......@@ -75,6 +75,6 @@
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed
notify: certs | restart certs
notify: restart certs
- include: deploy.yml tags=deploy
......@@ -2,10 +2,10 @@
#
# datadog
#
#
# Overview:
#
# Installs datadog
#
# Installs datadog
##
# Dependencies:
#
......@@ -43,11 +43,11 @@
- name: update api-key
lineinfile: >
dest="/etc/dd-agent/datadog.conf"
dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}"
notify:
- datadog | restart the datadog service
- restart the datadog service
tags:
- datadog
......
---
- name: demo | check out the demo course
- name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
sudo_user: "{{ edxapp_user }}"
register: demo_checkout
- name: demo | import demo course
- name: import demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: demo | create some test users and enroll them in the course
- name: create some test users and enroll them in the course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
......@@ -20,21 +20,21 @@
with_items: demo_test_users
when: demo_checkout.changed
- name: demo | create staff user
- name: create staff user
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: demo | add test users to the certificate whitelist
- name: add test users to the certificate whitelist
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
- name: demo | seed the forums for the demo course
- name: seed the forums for the demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
......
......@@ -34,7 +34,7 @@
user: >
name={{ devpi_user }}
shell=/bin/false createhome=no
notify: devpi | restart devpi
notify: restart devpi
- name: create devpi application directories
file: >
......@@ -45,7 +45,7 @@
with_items:
- "{{ devpi_app_dir }}"
- "{{ devpi_venv_dir }}"
notify: devpi | restart devpi
notify: restart devpi
- name: create the devpi data directory, needs write access by the service user
file: >
......@@ -56,7 +56,7 @@
with_items:
- "{{ devpi_data_dir }}"
- "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi
notify: restart devpi
- name: install devpi pip pkgs
pip: >
......@@ -65,20 +65,20 @@
virtualenv={{ devpi_venv_dir }}
sudo_user: "{{ devpi_user }}"
with_items: devpi_pip_pkgs
notify: devpi | restart devpi
notify: restart devpi
- name: writing supervisor script
template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi
notify: restart devpi
- name: create a symlink for venv python, pip
file: >
src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link
notify: devpi | restart devpi
notify: restart devpi
with_items:
- python
- pip
......
---
- name: discern | create supervisor scripts - discern, discern_celery
- name: create supervisor scripts - discern, discern_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644
......@@ -8,56 +8,56 @@
with_items: ['discern', 'discern_celery']
#Upload config files for django (auth and env)
- name: discern | create discern application config env.json file
- name: create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
- name: discern | create discern auth file auth.json
- name: create discern auth file auth.json
template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
- name: discern | git checkout discern repo into discern_code_dir
- name: git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
- name: discern | git checkout ease repo into discern_ease_code_dir
- name: git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
#Numpy has to be a pre-requirement in order for scipy to build
- name : discern | install python pre-requirements for discern and ease
- name : install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
with_items:
- "{{ discern_pre_requirements_file }}"
- "{{ discern_ease_pre_requirements_file }}"
- name : discern | install python requirements for discern and ease
- name : install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
with_items:
- "{{ discern_post_requirements_file }}"
- "{{ discern_ease_post_requirements_file }}"
- name: discern | install ease python package
- name: install ease python package
shell: >
{{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install
notify:
- discern | restart discern
- restart discern
- name: discern | download and install nltk
- name: download and install nltk
shell: |
set -e
curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }}
......@@ -68,30 +68,30 @@
chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment
- name: discern | django syncdb migrate and collectstatic for discern
- name: django syncdb migrate and collectstatic for discern
shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
with_items:
- syncdb
- migrate
- collectstatic
#Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern
- name: django update_index for discern
shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
- restart discern
# call supervisorctl update. this reloads
......@@ -99,14 +99,14 @@
# the services if any of the configurations
# have changed.
#
- name: discern | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
when: start_services
changed_when: supervisor_update.stdout != ""
- name: discern | ensure discern, discern_celery has started
- name: ensure discern, discern_celery has started
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
......@@ -117,7 +117,7 @@
- discern
- discern_celery
- name: discern | create a symlink for venv python
- name: create a symlink for venv python
file: >
src="{{ discern_venv_bin }}/python"
dest={{ COMMON_BIN_DIR }}/python.discern
......
......@@ -6,7 +6,7 @@
createhome=no
shell=/bin/false
notify:
- discern | restart discern
- restart discern
- name: create discern app dirs owned by discern
file: >
......@@ -15,7 +15,7 @@
owner="{{ discern_user }}"
group="{{ common_web_group }}"
notify:
- discern | restart discern
- restart discern
with_items:
- "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}"
......@@ -26,18 +26,18 @@
owner="{{ common_web_user }}" group="{{ discern_user }}"
mode=0775
notify:
- discern | restart discern
- restart discern
- name: install debian packages that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
- restart discern
with_items: discern_debian_pkgs
- name: install debian packages for ease that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
- restart discern
with_items: discern_ease_debian_pkgs
- name: copy sudoers file for discern
......@@ -45,12 +45,12 @@
src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root
notify:
- discern | restart discern
- restart discern
#Needed if using redis to prevent memory issues
- name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1
notify:
- discern | restart discern
- restart discern
- include: deploy.yml tags=deploy
---
- name: edx_ansible | git checkout edx_ansible repo into edx_ansible_code_dir
- name: git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
sudo_user: "{{ edx_ansible_user }}"
- name : edx_ansible | install edx_ansible venv requirements
- name : install edx_ansible venv requirements
pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present
sudo_user: "{{ edx_ansible_user }}"
- name: edx_ansible | create update script
- name: create update script
template: >
dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: edx_ansible | create a symlink for update.sh
- name: create a symlink for update.sh
file: >
src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update
state=link
- name: edx_ansible | dump all vars to yaml
- name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
- name: edx_ansible | clean up var file, removing all version vars
- name: clean up var file, removing all version vars
shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }}
with_items:
- edx_platform_version
......@@ -37,10 +37,10 @@
- ease_version
- certs_version
- name: edx_ansible | remove the special _original_file var
- name: remove the special _original_file var
shell: sed -i -e "/_original_file/d" {{ edx_ansible_var_file }}
- name: edxapp | create a symlink for var file
- name: create a symlink for var file
file: >
src={{ edx_ansible_var_file }}
dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }}
......
......@@ -7,24 +7,24 @@
- name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
- name: create application user
user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
createhome=no shell=/bin/false
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
- name: create edxapp user dirs
file: >
path="{{ item }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}"
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
with_items:
- "{{ edxapp_app_dir }}"
- "{{ edxapp_data_dir }}"
......@@ -37,8 +37,8 @@
path="{{ edxapp_log_dir }}" state=directory
owner="{{ common_log_user }}" group="{{ common_log_user }}"
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
- name: create web-writable edxapp data dirs
file: >
......@@ -46,8 +46,8 @@
owner="{{ common_web_user }}" group="{{ edxapp_user }}"
mode="0775"
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
with_items:
- "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}"
......@@ -55,13 +55,13 @@
- name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
- name: create log directories for service variants
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
file: >
path={{ edxapp_log_dir }}/{{ item }} state=directory
owner={{ common_log_user }} group={{ common_log_user }}
......
- name: edxapp | code sandbox | Create edxapp sandbox user
- name: code sandbox | Create edxapp sandbox user
user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }}
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: edxapp | code sandbox | Install apparmor utils system pkg
- name: code sandbox | Install apparmor utils system pkg
apt: pkg=apparmor-utils state=present
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: edxapp | code sandbox | write out apparmor code sandbox config
- name: code sandbox | write out apparmor code sandbox config
template: src=code.sandbox.j2 dest=/etc/apparmor.d/code.sandbox mode=0644 owner=root group=root
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: edxapp | code sandbox | write out sandbox user sudoers config
- name: code sandbox | write out sandbox user sudoers config
template: src=95-sandbox-sudoer.j2 dest=/etc/sudoers.d/95-{{ edxapp_sandbox_user }} mode=0440 owner=root group=root validate='visudo -c -f %s'
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
# we boostrap and enable the apparmor service here. in deploy.yml we disable, deploy, then re-enable
# so we need to enable it in main.yml
- name: edxapp | code sandbox | start apparmor service
- name: code sandbox | start apparmor service
service: name=apparmor state=started
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: edxapp | code sandbox | (bootstrap) load code sandbox profile
- name: code sandbox | (bootstrap) load code sandbox profile
command: apparmor_parser -r /etc/apparmor.d/code.sandbox
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: edxapp | code sandbox | (bootstrap) put code sandbox into aa-enforce or aa-complain mode depending on EDXAPP_SANDBOX_ENFORCE
- name: code sandbox | (bootstrap) put code sandbox into aa-enforce or aa-complain mode depending on EDXAPP_SANDBOX_ENFORCE
command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
......@@ -5,8 +5,8 @@
sudo_user: "{{ edxapp_user }}"
with_items: service_variants_enabled
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
- name: "create {{ item }} auth file"
template: >
......@@ -14,8 +14,8 @@
dest={{ edxapp_app_dir }}/{{ item }}.auth.json
sudo_user: "{{ edxapp_user }}"
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
with_items: service_variants_enabled
# write the supervisor scripts for the service variants
......@@ -28,7 +28,7 @@
when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_user }}"
- name: edxapp | writing edxapp supervisor script
- name: writing edxapp supervisor script
template: >
src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf
owner={{ supervisor_user }}
......@@ -37,7 +37,7 @@
# write the supervisor script for celery workers
- name: edxapp | writing celery worker supervisor script
- name: writing celery worker supervisor script
template: >
src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf
owner={{ supervisor_user }}
......@@ -47,7 +47,7 @@
# Gather assets using rake if possible
- name: edxapp | gather {{ item }} static assets with rake
- name: gather {{ item }} static assets with rake
shell: >
SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws
executable=/bin/bash
......@@ -56,23 +56,23 @@
when: celery_worker is not defined and not devstack and item != "lms-preview"
with_items: service_variants_enabled
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
environment: "{{ edxapp_environment }}"
- name: edxapp | syncdb and migrate
- name: syncdb and migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin}}/django-admin.py syncdb --migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}"
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
- name: edxapp | db migrate
- name: db migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/django-admin.py migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_only is defined and migrate_only|lower == "yes"
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
- "restart edxapp"
- "restart edxapp_workers"
sudo_user: "{{ edxapp_user }}"
---
- name: forum | create the supervisor config
- name: create the supervisor config
template: >
src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf
owner={{ supervisor_user }}
......@@ -9,41 +9,41 @@
when: not devstack
register: forum_supervisor
- name: forum | create the supervisor wrapper
- name: create the supervisor wrapper
template: >
src={{ forum_supervisor_wrapper|basename }}.j2
dest={{ forum_supervisor_wrapper }}
mode=0755
sudo_user: "{{ forum_user }}"
when: not devstack
notify: forum | restart the forum service
notify: restart the forum service
- name: forum | git checkout forum repo into {{ forum_code_dir }}
- name: git checkout forum repo into {{ forum_code_dir }}
git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
sudo_user: "{{ forum_user }}"
notify: forum | restart the forum service
notify: restart the forum service
# TODO: This is done as the common_web_user
# since the process owner needs write access
# to the rbenv
- name: forum | install comments service bundle
- name: install comments service bundle
shell: bundle install chdir={{ forum_code_dir }}
sudo_user: "{{ common_web_user }}"
environment: "{{ forum_environment }}"
notify: forum | restart the forum service
notify: restart the forum service
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: forum | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: start_services and not devstack
- name: forum | ensure forum is started
- name: ensure forum is started
supervisorctl_local: >
name=forum
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -26,13 +26,13 @@
name="{{ forum_user }}" home="{{ forum_app_dir }}"
createhome=no
shell=/bin/false
notify: forum | restart the forum service
notify: restart the forum service
- name: create forum app dir
file: >
path="{{ forum_app_dir }}" state=directory
owner="{{ forum_user }}" group="{{ common_web_group }}"
notify: forum | restart the forum service
notify: restart the forum service
- name: setup the forum env
template: >
......@@ -40,7 +40,7 @@
owner={{ forum_user }} group={{ common_web_user }}
mode=0644
notify:
- forum | restart the forum service
- restart the forum service
- include: deploy.yml tags=deploy
---
- name: forum | test that the required service are listening
- name: test that the required service are listening
wait_for: port={{ item.port }} host={{ item.host }} timeout=30
with_items: forum_services
when: not devstack
- name: forum | test that mongo replica set members are listing
- name: test that mongo replica set members are listing
wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30
with_items: FORUM_MONGO_HOSTS
when: not devstack
......@@ -10,7 +10,7 @@
#
#
# Tasks for role haproxy
#
#
# Overview:
# Installs and configures haproxy for load balancing.
# HAProxy doesn't currently support included configuration
......@@ -19,24 +19,24 @@
- name: Install haproxy
apt: pkg=haproxy state={{ pkgs.haproxy.state }}
notify: haproxy | restart haproxy
notify: restart haproxy
- name: Server configuration file
template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644
notify: haproxy | reload haproxy
notify: reload haproxy
- name: Enabled in default
lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1
notify: haproxy | restart haproxy
notify: restart haproxy
- name: install logrotate
template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644
- name: install rsyslog conf
template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644
notify: haproxy | restart rsyslog
notify: restart rsyslog
- name: make sure haproxy has started
service: name=haproxy state=started
......@@ -49,7 +49,7 @@
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
owner={{ jenkins_user }} group={{ jenkins_group }}
notify:
- jenkins_master | restart Jenkins
- restart Jenkins
- name: make plugins directory
sudo_user: jenkins
......@@ -72,7 +72,7 @@
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_plugins
notify:
- jenkins_master | restart Jenkins
- restart Jenkins
# We had to fork some plugins to workaround
# certain issues. If these changes get merged
......@@ -91,7 +91,7 @@
{{ jenkins_home }}/plugins/{{ item.package }}
with_items: jenkins_custom_plugins
notify:
- jenkins_master | restart Jenkins
- restart Jenkins
- name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }}
......@@ -118,4 +118,4 @@
src=/etc/nginx/sites-available/jenkins
dest=/etc/nginx/sites-enabled/jenkins
state=link
notify: jenkins_master | start nginx
notify: start nginx
---
- name: jenkins_worker | Install Java
- name: Install Java
apt: pkg=openjdk-7-jre-headless state=present
- name: jenkins_worker | Download JSCover
- name: Download JSCover
get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip
- name: jenkins_worker | Unzip JSCover
- name: Unzip JSCover
shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover
creates=/var/tmp/jscover
- name: jenkins_worker | Install JSCover JAR
- name: Install JSCover JAR
command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar
creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar
- name: jenkins_worker | Set JSCover permissions
- name: Set JSCover permissions
file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file
owner=root group=root mode=0755
---
# Install scripts requiring a GitHub OAuth token
- name: jenkins_worker | Install requests Python library
- name: Install requests Python library
pip: name=requests state=present
- fail: jenkins_worker | OAuth token not defined
- fail: OAuth token not defined
when: github_oauth_token is not defined
- name: jenkins_worker | Install Python GitHub PR auth script
- name: Install Python GitHub PR auth script
template: src="github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py"
owner=root group=root
mode=755
- name: jenkins_worker | Install Python GitHub post status script
- name: Install Python GitHub post status script
template: src="github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py"
owner=root group=root
mode=755
# Create wheelhouse to enable fast virtualenv creation
- name: jenkins_worker | Create wheel virtualenv
- name: Create wheel virtualenv
command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }}
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Install wheel
- name: Install wheel
pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Create wheelhouse dir
- name: Create wheelhouse dir
file:
path={{ jenkins_wheel_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# (need to install each one in the venv to satisfy dependencies)
- name: jenkins_worker | Create wheel archives
- name: Create wheel archives
shell:
"{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" &&
{{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\"
......@@ -40,7 +40,7 @@
sudo_user: "{{ jenkins_user }}"
with_items: jenkins_wheels
- name: jenkins_worker | Add wheel_venv.sh script
- name: Add wheel_venv.sh script
template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
---
- name: jenkins_worker | Create jenkins group
- name: Create jenkins group
group: name={{ jenkins_group }} state=present
# The Jenkins account needs a login shell because Jenkins uses scp
- name: jenkins_worker | Add the jenkins user to the group and configure shell
- name: Add the jenkins user to the group and configure shell
user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash
# Because of a bug in the latest release of the EC2 plugin
# we need to use a key generated by Amazon (not imported)
# To satisfy this, we allow users to log in as Jenkins
# using the same keypair the instance was started with.
- name: jenkins_worker | Create .ssh directory
- name: Create .ssh directory
file:
path={{ jenkins_home }}/.ssh state=directory
owner={{ jenkins_user }} group={{ jenkins_group }}
ignore_errors: yes
- name: jenkins_worker | Copy ssh keys for jenkins
- name: Copy ssh keys for jenkins
command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys
ignore_errors: yes
- name: jenkins_worker | Set key permissions
- name: Set key permissions
file:
path={{ jenkins_home }}/.ssh/authorized_keys
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes
- name: jenkins_worker | Install system packages
- name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
- name: jenkins_worker | Add script to set up environment variables
- name: Add script to set up environment variables
template:
src=jenkins_env.j2 dest={{ jenkins_home }}/jenkins_env
owner={{ jenkins_user }} group={{ jenkins_group }} mode=0500
# Need to add Github to known_hosts to avoid
# being prompted when using git through ssh
- name: jenkins_worker | Add github.com to known_hosts if it does not exist
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
......@@ -12,7 +12,7 @@
- "{{ nginx_app_dir }}"
- "{{ nginx_sites_available_dir }}"
- "{{ nginx_sites_enabled_dir }}"
notify: nginx | restart nginx
notify: restart nginx
- name: create nginx data dirs
file: >
......@@ -23,36 +23,36 @@
with_items:
- "{{ nginx_data_dir }}"
- "{{ nginx_log_dir }}"
notify: nginx | restart nginx
notify: restart nginx
- name: Install nginx packages
apt: pkg={{','.join(nginx_debian_pkgs)}} state=present
notify: nginx | restart nginx
notify: restart nginx
- name: Server configuration file
template: >
src=nginx.conf.j2 dest=/etc/nginx/nginx.conf
owner=root group={{ common_web_user }} mode=0644
notify: nginx | reload nginx
notify: reload nginx
- name: Creating common nginx configuration
template: >
src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release
owner=root group=root mode=0600
notify: nginx | reload nginx
notify: reload nginx
- name: Creating link for common nginx configuration
file: >
src={{ nginx_sites_available_dir }}/edx-release
dest={{ nginx_sites_enabled_dir }}/edx-release
state=link owner=root group=root
notify: nginx | reload nginx
notify: reload nginx
- name: Copying nginx configs for {{ nginx_sites }}
template: >
src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: nginx | reload nginx
notify: reload nginx
with_items: nginx_sites
- name: Creating nginx config links for {{ nginx_sites }}
......@@ -60,7 +60,7 @@
src={{ nginx_sites_available_dir }}/{{ item }}
dest={{ nginx_sites_enabled_dir }}/{{ item }}
state=link owner=root group=root
notify: nginx | reload nginx
notify: reload nginx
with_items: nginx_sites
- name: Write out htpasswd file
......@@ -93,7 +93,7 @@
# removing default link
- name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: nginx | reload nginx
notify: reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
......
---
- name: notifier | checkout code
- name: checkout code
git:
dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }}
version={{ NOTIFIER_VERSION }}
sudo: true
sudo_user: "{{ NOTIFIER_USER }}"
notify:
- notifier | restart notifier-scheduler
- notifier | restart notifier-celery-workers
- restart notifier-scheduler
- restart notifier-celery-workers
- name: notifier | source repo group perms
- name: source repo group perms
file:
path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory
- name: notifier | install application requirements
- name: install application requirements
pip:
requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present
sudo: true
sudo_user: "{{ NOTIFIER_USER }}"
notify:
- notifier | restart notifier-scheduler
- notifier | restart notifier-celery-workers
- restart notifier-scheduler
- restart notifier-celery-workers
# Syncdb for whatever reason always creates the file owned by www-data:www-data, and then
# complains it can't write because it's running as notifier. So this is to touch the file into
# place with proper perms first.
- name: notifier | fix permissions on notifer db file
- name: fix permissions on notifer db file
file: >
path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
mode=0664
sudo: true
notify:
- notifier | restart notifier-scheduler
- notifier | restart notifier-celery-workers
- restart notifier-scheduler
- restart notifier-celery-workers
tags:
- deploy
- name: notifier | syncdb
- name: syncdb
shell: >
cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb
sudo: true
sudo_user: "{{ NOTIFIER_USER }}"
environment: notifier_env_vars
notify:
- notifier | restart notifier-scheduler
- notifier | restart notifier-celery-workers
- restart notifier-scheduler
- restart notifier-celery-workers
......@@ -90,13 +90,13 @@
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-celery-workers
notify: restart notifier-celery-workers
- name: supervisord config for scheduler
template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-scheduler
notify: restart notifier-scheduler
- include: deploy.yml tags=deploy
- name: ora | create supervisor scripts - ora, ora_celery
- name: create supervisor scripts - ora, ora_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
with_items: ['ora', 'ora_celery']
when: not devstack
- include: ease.yml
- name: ora | create ora application config
- name: create ora application config
template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json
sudo_user: "{{ ora_user }}"
- name: ora | create ora auth file
- name: create ora auth file
template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json
sudo_user: "{{ ora_user }}"
- name: ora | setup the ora env
- name: setup the ora env
notify:
- "ora | restart ora"
- "ora | restart ora_celery"
- "restart ora"
- "restart ora_celery"
template: >
src=ora_env.j2 dest={{ ora_app_dir }}/ora_env
owner={{ ora_user }} group={{ common_web_user }}
mode=0644
# Do A Checkout
- name: ora | git checkout ora repo into {{ ora_app_dir }}
- name: git checkout ora repo into {{ ora_app_dir }}
git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }}
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
# TODO: Check git.py _run_if_changed() to see if the logic there to skip running certain
# portions of the deploy needs to be incorporated here.
# Install the python pre requirements into {{ ora_venv_dir }}
- name: ora | install python pre-requirements
- name: install python pre-requirements
pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
# Install the python post requirements into {{ ora_venv_dir }}
- name: ora | install python post-requirements
- name: install python post-requirements
pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
#Needed if using redis to prevent memory issues
- name: ora | change memory commit settings -- needed for redis
- name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
- name: ora | syncdb and migrate
- name: syncdb and migrate
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
- name: ora | create users
- name: create users
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
# call supervisorctl update. this reloads
......@@ -83,13 +83,13 @@
# the services if any of the configurations
# have changed.
#
- name: ora | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
when: start_services and not devstack
changed_when: supervisor_update.stdout != ""
- name: ora | ensure ora is started
- name: ensure ora is started
supervisorctl_local: >
name=ora
supervisorctl_path={{ supervisor_ctl }}
......@@ -97,7 +97,7 @@
state=started
when: start_services and not devstack
- name: ora | ensure ora_celery is started
- name: ensure ora_celery is started
supervisorctl_local: >
name=ora_celery
supervisorctl_path={{ supervisor_ctl }}
......
# Do A Checkout
- name: ora | git checkout ease repo into its base dir
- name: git checkout ease repo into its base dir
git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}}
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
- name: ora | install ease system packages
- name: install ease system packages
apt: pkg={{item}} state=present
with_items: ora_ease_debian_pkgs
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
# Install the python pre requirements into {{ ora_ease_venv_dir }}
- name: ora | install ease python pre-requirements
- name: install ease python pre-requirements
pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
# Install the python post requirements into {{ ora_ease_venv_dir }}
- name: ora | install ease python post-requirements
- name: install ease python post-requirements
pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
- name: ora | install ease python package
- name: install ease python package
shell: >
. {{ ora_ease_venv_dir }}/bin/activate; cd {{ ora_ease_code_dir }}; python setup.py install
sudo_user: "{{ ora_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
- name: ora | download and install nltk
- name: download and install nltk
shell: |
set -e
curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }}
......@@ -49,5 +49,5 @@
chdir={{ ora_data_dir }}
sudo_user: "{{ common_web_user }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
......@@ -8,16 +8,16 @@
name="{{ ora_user }}" home="{{ ora_app_dir }}"
createhome=no shell=/bin/false
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
- name: create ora app dir
file: >
path="{{ item }}" state=directory
owner="{{ ora_user }}" group="{{ common_web_group }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
with_items:
- "{{ ora_venvs_dir }}"
- "{{ ora_app_dir }}"
......@@ -27,8 +27,8 @@
path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ common_web_group }}"
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
with_items:
- "{{ ora_data_dir }}"
- "{{ ora_data_course_dir }}"
......@@ -37,15 +37,15 @@
- name: install debian packages that ora needs
apt: pkg={{item}} state=present
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
with_items: ora_debian_pkgs
- name: install debian packages for ease that ora needs
apt: pkg={{item}} state=present
notify:
- ora | restart ora
- ora | restart ora_celery
- restart ora
- restart ora_celery
with_items: ora_ease_debian_pkgs
- include: deploy.yml tags=deploy
......
......@@ -25,13 +25,13 @@
#
# The role would need to include tasks like the following
#
# - name: my_role | create s3fs mount points
# - name: create s3fs mount points
# file:
# path={{ item.mount_point }} owner={{ item.owner }}
# group={{ item.group }} mode={{ item.mode }} state="directory"
# with_items: my_role_s3fs_mounts
#
# - name: my_role | mount s3 buckets
# - name: mount s3 buckets
# mount:
# name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs
# opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted
......
......@@ -9,7 +9,7 @@
- libshibsp-doc
- libapache2-mod-shib2
- opensaml2-tools
notify: shibboleth | restart shibd
notify: restart shibd
tags:
- shib
- install
......@@ -31,7 +31,7 @@
with_items:
- key
- pem
notify: shibboleth | restart shibd
notify: restart shibd
tags:
- shib
- install
......@@ -41,15 +41,15 @@
with_items:
- attribute-map.xml
- shibboleth2.xml
notify: shibboleth | restart shibd
notify: restart shibd
tags:
- shib
- install
- name: enables shib
command: a2enmod shib2
notify: shibboleth | restart shibd
notify: restart shibd
tags:
- shib
- install
......@@ -10,13 +10,13 @@
#
#
# Tasks for role splunk
#
#
# Overview:
#
#
#
# Dependencies:
#
#
#
# Example play:
#
#
......@@ -62,27 +62,27 @@
- name: create boot script
shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk
creates=/etc/init.d/splunk
register: create_boot_script
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
notify: restart splunkforwarder
# Update credentials
- name: update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
notify: restart splunkforwarder
- name: add chkconfig to init script
shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk'
when: download_deb.changed and create_boot_script.changed
notify: splunkforwarder | restart splunkforwarder
notify: restart splunkforwarder
# Ensure permissions on splunk content
- name: ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
notify: restart splunkforwarder
# Drop template files.
- name: drop input configuration
......@@ -92,7 +92,7 @@
owner=splunk
group=splunk
mode=644
notify: splunkforwarder | restart splunkforwarder
notify: restart splunkforwarder
- name: create outputs config file
template:
......@@ -101,4 +101,4 @@
owner=splunk
group=splunk
mode=644
notify: splunkforwarder | restart splunkforwarder
notify: restart splunkforwarder
- name: "xqueue | writing supervisor scripts - xqueue, xqueue consumer"
- name: "writing supervisor scripts - xqueue, xqueue consumer"
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
with_items: ['xqueue', 'xqueue_consumer']
- name: xqueue | create xqueue application config
- name: create xqueue application config
template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
- name: xqueue | create xqueue auth file
- name: create xqueue auth file
template: src=xqueue.auth.json.j2 dest={{ xqueue_app_dir }}/xqueue.auth.json mode=0644
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
# Do A Checkout
- name: xqueue | git checkout xqueue repo into xqueue_code_dir
- name: git checkout xqueue repo into xqueue_code_dir
git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }}
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
# Install the python pre requirements into {{ xqueue_venv_dir }}
- name : xqueue | install python pre-requirements
- name : install python pre-requirements
pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
# Install the python post requirements into {{ xqueue_venv_dir }}
- name : xqueue | install python post-requirements
- name : install python post-requirements
pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
- name: xqueue | syncdb and migrate
- name: syncdb and migrate
shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
- name: xqueue | create users
- name: create users
shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
sudo_user: "{{ xqueue_user }}"
notify:
- xqueue | restart xqueue
- restart xqueue
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: xqueue | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: start_services
- name: xqueue | ensure xqueue, consumer is running
- name: ensure xqueue, consumer is running
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
......
......@@ -13,7 +13,7 @@
createhome=no
shell=/bin/false
notify:
- xqueue | restart xqueue
- restart xqueue
- name: create xqueue app and venv dir
file: >
......@@ -22,7 +22,7 @@
owner="{{ xqueue_user }}"
group="{{ common_web_group }}"
notify:
- xqueue | restart xqueue
- restart xqueue
with_items:
- "{{ xqueue_app_dir }}"
- "{{ xqueue_venvs_dir }}"
......@@ -30,7 +30,7 @@
- name: install a bunch of system packages on which xqueue relies
apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present
notify:
- xqueue | restart xqueue
- restart xqueue
- name: create xqueue db
mysql_db: >
......@@ -41,7 +41,7 @@
state=present
encoding=utf8
notify:
- xqueue | restart xqueue
- restart xqueue
when: xqueue_create_db is defined and xqueue_create_db|lower == "yes"
- include: deploy.yml tags=deploy
......
- name: "xserver | writing supervisor script"
- name: "writing supervisor script"
template: >
src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
- name: xserver | checkout code
- name: checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver
notify: restart xserver
- name: xserver | install requirements
- name: install requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver
notify: restart xserver
- name: xserver | install sandbox requirements
- name: install sandbox requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present
sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver
notify: restart xserver
- name: xserver | create xserver application config
- name: create xserver application config
template: src=xserver.env.json.j2 dest={{ xserver_app_dir }}/env.json
sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver
notify: restart xserver
- name: xserver | install read-only ssh key for the content repo that is required for grading
- name: install read-only ssh key for the content repo that is required for grading
copy: >
src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }}
owner={{ xserver_user }} group={{ xserver_user }} mode=0600
notify: xserver | restart xserver
notify: restart xserver
- name: xserver | upload ssh script
- name: upload ssh script
template: >
src=git_ssh.sh.j2 dest=/tmp/git_ssh.sh
owner={{ xserver_user }} mode=750
notify: xserver | restart xserver
notify: restart xserver
- name: xserver | checkout grader code
- name: checkout grader code
git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }}
environment:
GIT_SSH: /tmp/git_ssh.sh
notify: xserver | restart xserver
notify: restart xserver
sudo_user: "{{ xserver_user }}"
- name: xserver | remove read-only ssh key for the content repo
- name: remove read-only ssh key for the content repo
file: path={{ xserver_git_identity }} state=absent
notify: xserver | restart xserver
notify: restart xserver
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: xserver | update supervisor configuration
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
when: start_services
changed_when: supervisor_update.stdout != ""
- name: xserver | ensure xserver is started
- name: ensure xserver is started
supervisorctl_local: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
......@@ -65,7 +65,7 @@
state=started
when: start_services
- name: xserver | create a symlink for venv python
- name: create a symlink for venv python
file: >
src="{{ xserver_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.xserver
......@@ -74,5 +74,5 @@
- python
- pip
- name: xserver | enforce app-armor rules
- name: enforce app-armor rules
command: aa-enforce {{ xserver_venv_sandbox_dir }}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment