Commit 1a5f7364 by Brian Beggs Committed by Feanil Patel

Update playbooks to new format

parent 0326e11f
......@@ -13,25 +13,27 @@
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
- fail:
msg: "You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
- fail:
msg: "public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
copy:
content: "{{ ubuntu_public_keys[public_key] }}"
dest: "{{ mktemp.stdout }}"
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file:
path: "{{ mktemp.stdout }}"
state: absent
- lineinfile:
dest: "{{ keyfile }}"
line: "{{ ubuntu_public_keys[public_key] }}"
- file:
path: "{{ keyfile }}"
owner: "{{ owner }}"
mode: 0600
......@@ -13,9 +13,9 @@
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
lineinfile:
dest: /edx/app/edx_ansible/server-vars.yml
line: "{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
......@@ -24,9 +24,9 @@
tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
lineinfile:
dest: /edx/app/edx_ansible/server-vars.yml
line: "{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
......@@ -35,6 +35,5 @@
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform {{ edxapp_version }}
shell: "/edx/bin/update edx-platform {{ edxapp_version }}"
tags: update_edxapp_mysql_host
......@@ -53,26 +53,26 @@
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.name}}
state={{ item.state }}
encoding={{ item.encoding }}
login_host={{ item.login_host }}
login_user={{ item.login_user }}
login_password={{ item.login_password }}
mysql_db:
db: "{{ item.name}}"
state: "{{ item.state }}"
encoding: "{{ item.encoding }}"
login_host: "{{ item.login_host }}"
login_user: "{{ item.login_user }}"
login_password: "{{ item.login_password }}"
with_items: databases
tags:
- dbs
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.name }}"
priv="{{ '/'.join(item.privileges) }}"
password="{{ item.password }}"
host={{ item.host }}
login_host={{ item.login_host }}
login_user={{ item.login_user }}
login_password={{ item.login_password }}
mysql_user:
name: "{{ item.name }}"
priv: "{{ '/'.join(item.privileges) }}"
password: "{{ item.password }}"
host: "{{ item.host }}"
login_host: "{{ item.login_host }}"
login_user: "{{ item.login_user }}"
login_password: "{{ item.login_password }}"
append_privs=yes
with_items: database_users
tags:
......
......@@ -47,10 +47,10 @@
elb: false
pre_tasks:
- name: Wait for cloud-init to finish
wait_for: >
path=/var/log/cloud-init.log
timeout=15
search_regex="final-message"
wait_for:
path: /var/log/cloud-init.log
timeout: 15
search_regex: "final-message"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/xqueue/defaults/main.yml
......
......@@ -8,9 +8,9 @@
- edxapp
tasks:
- name: migrate lms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws
shell: "python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......@@ -21,9 +21,9 @@
tags:
- always
- name: migrate cms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws
shell: "python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
......@@ -14,11 +14,11 @@
- name: stop certs service
service: name="certificates" state="stopped"
- name: checkout code
git_2_0_1: >
repo="{{ repo_url }}"
dest="{{ repo_path }}"
version="{{ certificates_version }}"
accept_hostkey=yes
git_2_0_1:
repo: "{{ repo_url }}"
dest: "{{ repo_path }}"
version: "{{ certificates_version }}"
accept_hostkey: yes
environment:
GIT_SSH: "{{ git_ssh_script }}"
- name: install requirements
......@@ -29,11 +29,11 @@
# Need to do this because the www-data user is not properly setup
# and can't run ssh.
- name: change owner to www-data
file: >
path="{{ repo_path }}"
owner="www-data"
group="www-data"
recurse=yes
state="directory"
file:
path: "{{ repo_path }}"
owner: "www-data"
group: "www-data"
recurse: yes
state: "directory"
- name: start certs service
service: name="certificates" state="started"
......@@ -46,9 +46,7 @@
dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}"
register: xblock_config_file
- name: Manage xblock configurations
shell: >
{{ python_path }} {{ manage_path }} lms --settings=aws
populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}
shell: "{{ python_path }} {{ manage_path }} lms --settings=aws populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}"
register: command_result
changed_when: "'Import complete, 0 new entries created' not in command_result.stdout"
- debug: msg="{{ command_result.stdout }}"
......
......@@ -17,22 +17,21 @@
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
- shell: "grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}"
- shell: "while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}"
args:
executable: /bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- file:
path: "{{ keyfile }}"
owner: "{{ owner }}"
mode: 0600
- file:
path: "{{ mktemp.stdout }}"
state: absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
......
......@@ -7,6 +7,6 @@
- roles/supervisor/defaults/main.yml
tasks:
- name: supervisor | restart supervisor
service: >
name={{ supervisor_service }}
state=restarted
service:
name: "{{ supervisor_service }}"
state: restarted
......@@ -12,8 +12,8 @@
- name: Set hostname
hostname: name={{ hostname_fqdn.split('.')[0] }}
- name: Update /etc/hosts
lineinfile: >
dest=/etc/hosts
regexp="^127\.0\.1\.1"
line="127.0.1.1{{'\t'}}{{ hostname_fqdn.split('.')[0] }}{{'\t'}}{{ hostname_fqdn }}{{'\t'}}localhost"
state=present
lineinfile:
dest: /etc/hosts
regexp: "^127\.0\.1\.1"
line: "127.0.1.1{{'\t'}}{{ hostname_fqdn.split('.')[0] }}{{'\t'}}{{ hostname_fqdn }}{{'\t'}}localhost"
state: present
......@@ -33,30 +33,30 @@
#
- name: setup the analytics_api env file
template: >
src="edx/app/analytics_api/analytics_api_env.j2"
dest="{{ analytics_api_home }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
template:
src: "edx/app/analytics_api/analytics_api_env.j2"
dest: "{{ analytics_api_home }}/analytics_api_env"
owner: "{{ analytics_api_user }}"
group: "{{ analytics_api_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: "add gunicorn configuration file"
template: >
src=edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest={{ analytics_api_home }}/analytics_api_gunicorn.py
template:
src: edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest: "{{ analytics_api_home }}/analytics_api_gunicorn.py"
become_user: "{{ analytics_api_user }}"
tags:
- install
- install:configuration
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state=present
pip:
requirements: "{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state: present
become_user: "{{ analytics_api_user }}"
with_items: analytics_api_requirements
tags:
......@@ -64,11 +64,9 @@
- install:app-requirements
- name: migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}'
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput
shell:
args: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput"
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......@@ -77,9 +75,9 @@
- migrate:db
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput
shell:
args: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput"
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
tags:
......@@ -87,9 +85,9 @@
- assets:gather
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}
shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
......@@ -98,29 +96,32 @@
- manage:app-users
- name: write out the supervisor wrapper
template: >
src=edx/app/analytics_api/analytics_api.sh.j2
dest={{ analytics_api_home }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
template:
src: edx/app/analytics_api/analytics_api.sh.j2
dest: "{{ analytics_api_home }}/{{ analytics_api_service_name }}.sh"
mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags:
- install
- install:configuration
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner: "{{ supervisor_user }} group={{ common_web_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
file:
src: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
dest: "{{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
......@@ -134,10 +135,10 @@
- manage:start
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link
file:
src: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state: link
with_items:
- python
- pip
......@@ -147,10 +148,10 @@
- install:base
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link
file:
src: "{{ analytics_api_code_dir }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state: link
with_items:
- manage.py
tags:
......@@ -158,11 +159,11 @@
- install:base
- name: restart analytics_api
supervisorctl: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name={{ analytics_api_service_name }}
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
name: "{{ analytics_api_service_name }}"
when: not disable_edx_services
become_user: "{{ supervisor_service_user }}"
tags:
......
......@@ -173,20 +173,24 @@
- manage:start
- name: Copying nginx configs for {{ role_name }}
template: >
src=edx/app/nginx/sites-available/{{ role_name }}.j2
dest={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}
owner=root group={{ '{{' }} common_web_user }} mode=0640
template:
src: edx/app/nginx/sites-available/{{ role_name }}.j2
dest: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
owner: root
group: "{{ '{{' }} common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for {{ role_name }}
file: >
src={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}
dest={{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}
state=link owner=root group=root
file:
src: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
dest: "{{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
......
......@@ -26,38 +26,38 @@
with_items: antivirus_debian_pkgs
- name: create antivirus scanner user
user: >
name="{{ antivirus_user }}"
home="{{ antivirus_app_dir }}"
createhome=no
shell=/bin/false
user:
name: "{{ antivirus_user }}"
home: "{{ antivirus_app_dir }}"
createhome: no
shell: /bin/false
- name: create antivirus app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ antivirus_user }}"
group="{{ antivirus_user }}"
file:
path: "{{ item }}"
state: directory
owner: "{{ antivirus_user }}"
group: "{{ antivirus_user }}"
with_items:
- "{{ antivirus_app_dir }}"
- "{{ antivirus_app_dir }}/data"
- name: install antivirus s3 scanner script
template: >
src=s3_bucket_virus_scan.sh.j2
dest={{ antivirus_app_dir }}/s3_bucket_virus_scan.sh
template:
src: s3_bucket_virus_scan.sh.j2
dest: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh"
mode=0555
owner={{ antivirus_user }}
group={{ antivirus_user }}
owner: "{{ antivirus_user }}"
group: "{{ antivirus_user }}"
- name: install antivirus s3 scanner cronjob
cron: >
name="antivirus-{{ item }}"
job="{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup=yes
cron_file=antivirus-{{ item }}
user={{ antivirus_user }}
hour="*"
minute="0"
day="*"
cron:
name: "antivirus-{{ item }}"
job: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup: yes
cron_file: "antivirus-{{ item }}"
user: "{{ antivirus_user }}"
hour: "*"
minute: "0"
day: "*"
with_items: ANTIVIRUS_BUCKETS
......@@ -102,7 +102,7 @@
file:
path: "{{ item.item }}"
mode: "0644"
when: >
when:
vagrant_home_dir.stat.exists == false and
ansible_distribution in common_debian_variants and
item.stat.exists
......
# Install browsermob-proxy, which is used for page performance testing with bok-choy
---
- name: get zip file
get_url: >
url={{ browsermob_proxy_url }}
dest=/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
get_url:
url: "{{ browsermob_proxy_url }}"
dest: "/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
register: download_browsermob_proxy
- name: unzip into /var/tmp/
shell: >
unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
shell: "unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
args:
chdir=/var/tmp
when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/
shell: >
mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy
shell: "mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy"
when: download_browsermob_proxy.changed
- name: change permissions of main script
file: >
path=/etc/browsermob-proxy/bin/browsermob-proxy
mode=0755
file:
path: "/etc/browsermob-proxy/bin/browsermob-proxy"
mode: 0755
when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy
copy: >
src=browsermob-proxy
dest=/usr/local/bin/browsermob-proxy
copy:
src: browsermob-proxy
dest: /usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of wrapper script
file: >
path=/usr/local/bin/browsermob-proxy
mode=0755
file:
path: /usr/local/bin/browsermob-proxy
mode: 0755
when: download_browsermob_proxy.changed
......@@ -50,15 +50,15 @@
- "chromedriver.stat.mode == '0755'"
- name: download PhantomJS
get_url: >
url={{ phantomjs_url }}
dest=/var/tmp/{{ phantomjs_tarfile }}
get_url:
url: "{{ phantomjs_url }}"
dest: "/var/tmp/{{ phantomjs_tarfile }}"
register: download_phantom_js
- name: unpack the PhantomJS tarfile
shell: >
tar -xjf /var/tmp/{{ phantomjs_tarfile }}
chdir=/var/tmp
shell: "tar -xjf /var/tmp/{{ phantomjs_tarfile }}"
args:
chdir: "/var/tmp"
when: download_phantom_js.changed
- name: move PhantomJS binary to /usr/local
......
......@@ -43,9 +43,9 @@
- install:app-requirements
- name: create nodeenv
shell: >
creates={{ credentials_nodeenv_dir }}
{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt
shell:
args: "{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt"
creates: "{{ credentials_nodeenv_dir }}"
become_user: "{{ credentials_user }}"
tags:
- install
......@@ -74,9 +74,12 @@
# var should have more permissive permissions than the rest
- name: create credentials var dirs
file: >
path="{{ item }}" state=directory mode=0775
owner="{{ credentials_user }}" group="{{ common_web_group }}"
file:
path: "{{ item }}"
state: directory
mode: 0775
owner: "{{ credentials_user }}"
group: "{{ common_web_group }}"
with_items:
- "{{ CREDENTIALS_MEDIA_ROOT }}"
tags:
......@@ -192,20 +195,23 @@
- manage:start
- name: Copying nginx configs for credentials
template: >
src=edx/app/nginx/sites-available/credentials.j2
dest={{ nginx_sites_available_dir }}/credentials
owner=root group={{ common_web_user }} mode=0640
template:
src: edx/app/nginx/sites-available/credentials.j2
dest: "{{ nginx_sites_available_dir }}/credentials"
owner: root group={{ common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for credentials
file: >
src={{ nginx_sites_available_dir }}/credentials
dest={{ nginx_sites_enabled_dir }}/credentials
state=link owner=root group=root
file:
src: "{{ nginx_sites_available_dir }}/credentials"
dest: "{{ nginx_sites_enabled_dir }}/credentials"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
......
---
- name: check out the demo course
git_2_0_1: >
dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
accept_hostkey=yes
git_2_0_1:
dest: "{{ demo_code_dir }}"
repo: "{{ demo_repo }}"
version: "{{ demo_version }}"
accept_hostkey: yes
become_user: "{{ demo_edxapp_user }}"
register: demo_checkout
- name: import demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: create some test users
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items: demo_test_users
when: demo_checkout.changed
- name: create staff user
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when:
- demo_checkout.changed
- DEMO_CREATE_STAFF_USER
- name: enroll test users in the demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items:
- "{{ demo_test_users }}"
......@@ -43,15 +45,15 @@
- name: add test users to the certificate whitelist
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
with_items: demo_test_users
when: demo_checkout.changed
- name: seed the forums for the demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
with_items: demo_test_users
when: demo_checkout.changed
......@@ -31,8 +31,10 @@
# - demo
- name: create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ demo_edxapp_user }}" group="{{ common_web_group }}"
file:
path: "{{ demo_app_dir }}"
state: directory
owner: "{{ demo_edxapp_user }}"
group: "{{ common_web_group }}"
- include: deploy.yml tags=deploy
......@@ -77,9 +77,9 @@
- devstack:install
- name: create nodeenv
shell: >
creates={{ discovery_nodeenv_dir }}
{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt
shell: "{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt"
args:
creates: "{{ discovery_nodeenv_dir }}"
become_user: "{{ discovery_user }}"
tags:
- install
......@@ -94,9 +94,9 @@
- install:app-requirements
- name: install bower dependencies
shell: >
chdir={{ discovery_code_dir }}
. {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false
shell: ". {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false"
args:
chdir: "{{ discovery_code_dir }}"
become_user: "{{ discovery_user }}"
tags:
- install
......
......@@ -84,11 +84,9 @@
- migrate:db
- name: Populate countries
shell: >
chdir={{ ecommerce_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries
shell: "DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }} DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }} {{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries"
args:
chdir: {{ ecommerce_code_dir }}
become_user: "{{ ecommerce_user }}"
environment: "{{ ecommerce_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......
......@@ -110,10 +110,10 @@
- install:app-requirements
- name: Create the virtualenv to install the Python requirements
command: >
virtualenv {{ edxapp_venv_dir }}
chdir={{ edxapp_code_dir }}
creates={{ edxapp_venv_dir }}/bin/pip
command: "virtualenv {{ edxapp_venv_dir }}"
args:
chdir: "{{ edxapp_code_dir }}"
creates: "{{ edxapp_venv_dir }}/bin/pip"
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
tags:
......@@ -134,9 +134,9 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
command: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}
chdir={{ edxapp_code_dir }}
command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}"
args:
chdir: "{{ edxapp_code_dir }}"
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
when: item.stat.exists
......@@ -151,9 +151,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ private_requirements_file }}"
become_user: "{{ edxapp_user }}"
......@@ -197,9 +197,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}"
......@@ -211,8 +211,7 @@
- install:app-requirements
- name: create nodeenv
shell: >
{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt
shell: "{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt"
args:
creates: "{{ edxapp_nodeenv_dir }}"
tags:
......@@ -223,8 +222,7 @@
# This needs to be done as root since npm is weird about
# chown - https://github.com/npm/npm/issues/3565
- name: Set the npm registry
shell: >
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'"
args:
creates: "{{ edxapp_app_dir }}/.npmrc"
environment: "{{ edxapp_environment }}"
......@@ -279,9 +277,9 @@
- install:app-requirements
- name: code sandbox | Install sandbox requirements into sandbox venv
shell: >
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}"
......
......@@ -21,31 +21,28 @@
#
#
- name: download elasticsearch plugin
shell: >
./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y
shell: "./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
creates: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}.compressed"
become_user: "{{ NEWRELIC_USER }}"
- name: prepare elasticsearch plugin
shell: >
./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n
shell: "./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure elasticsearch plugin
template: >
src=plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2
dest={{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json
owner={{ NEWRELIC_USER }}
mode=0644
template:
src: "plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2"
dest: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json"
owner: "{{ NEWRELIC_USER }}"
mode: 0644
- name: register/start elasticsearch plugin
shell: >
./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
shell: "./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "root"
......@@ -33,33 +33,35 @@
with_items: gh_mirror_pip_pkgs
- name: install debian packages
apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present
update_cache=yes
apt:
pkg: "{{ ",".join(gh_mirror_debian_pkgs) }}"
state: present
update_cache: yes
- name: create gh_mirror user
user: >
name={{ gh_mirror_user }}
state=present
user:
name: "{{ gh_mirror_user }}"
state: present
- name: create the gh_mirror data directory
file: >
path={{ gh_mirror_data_dir }}
state=directory
owner={{ gh_mirror_user }}
group={{ gh_mirror_group }}
file:
path: "{{ gh_mirror_data_dir }}"
state: directory
owner: "{{ gh_mirror_user }}"
group: "{{ gh_mirror_group }}"
- name: create the gh_mirror app directory
file: >
path={{ gh_mirror_app_dir }}
state=directory
file:
path: "{{ gh_mirror_app_dir }}"
state: directory
- name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts
copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }}
copy:
src: "{{ item }}"
dest: "{{ gh_mirror_app_dir }}/{{ item }}"
with_items: gh_mirror_app_files
- name: creating cron job to update repos
......
......@@ -15,9 +15,9 @@
#
#
- name: restart gitreload
supervisorctl: >
name=gitreload
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
supervisorctl:
name: gitreload
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
state: restarted
when: not disable_edx_services
......@@ -6,29 +6,29 @@
with_items: GITRELOAD_REPOS
- name: do import of courses
shell: >
executable=/bin/bash
chdir="{{ edxapp_code_dir }}"
SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/python manage.py lms --settings=aws git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }}
shell:
executable: /bin/bash
chdir: "{{ edxapp_code_dir }}"
SERVICE_VARIANT: "lms {{ edxapp_venv_bin }}/python manage.py lms --settings=aws git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }}"
become_user: "{{ common_web_user }}"
with_items: GITRELOAD_REPOS
- name: change ownership on repos for access by edxapp and www-data
file: >
path={{ GITRELOAD_REPODIR }}
state=directory
owner={{ common_web_user }}
owner={{ common_web_group }}
recurse=yes
file:
path: "{{ GITRELOAD_REPODIR }}"
state: directory
owner: "{{ common_web_user }}"
owner: "{{ common_web_group }}"
recurse: yes
- name: change group on repos if using devstack
file: >
path={{ GITRELOAD_REPODIR }}
state=directory
group={{ edxapp_user }}
recurse=yes
file:
path: "{{ GITRELOAD_REPODIR }}"
state: directory
group: "{{ edxapp_user }}"
recurse: yes
when: devstack
- name: change mode on repos with using devstack
command: chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }}
command: "chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }}"
when: devstack
......@@ -11,10 +11,10 @@
tags: course_pull
- name: install gitreload
pip: >
name=git+{{ gitreload_repo }}@{{ gitreload_version }}#egg=gitreload
virtualenv={{ gitreload_venv }}
extra_args="--exists-action w"
pip:
name: "git+{{ gitreload_repo }}@{{ gitreload_version }}#egg=gitreload"
virtualenv: "{{ gitreload_venv }}"
extra_args: "--exists-action w"
become_user: "{{ gitreload_user }}"
notify: restart gitreload
......@@ -24,22 +24,22 @@
notify: restart gitreload
- name: "add gunicorn configuration file"
template: >
src=edx/app/gitreload/gitreload_gunicorn.py.j2 dest={{ gitreload_dir }}/gitreload_gunicorn.py
template:
src: "edx/app/gitreload/gitreload_gunicorn.py.j2 dest={{ gitreload_dir }}/gitreload_gunicorn.py"
become_user: "{{ gitreload_user }}"
notify: restart gitreload
- name: "writing supervisor script"
template: >
src=edx/app/supervisor/conf.available.d/gitreload.conf.j2 dest={{ supervisor_available_dir }}/gitreload.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: "edx/app/supervisor/conf.available.d/gitreload.conf.j2 dest={{ supervisor_available_dir }}/gitreload.conf"
owner: "{{ supervisor_user }} group={{ common_web_user }} mode=0644"
- name: "enable supervisor script"
file: >
src={{ supervisor_available_dir }}/gitreload.conf
dest={{ supervisor_cfg_dir }}/gitreload.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
state=link force=yes
file:
src: "{{ supervisor_available_dir }}/gitreload.conf"
dest: "{{ supervisor_cfg_dir }}/gitreload.conf"
owner: "{{ supervisor_user }} group={{ common_web_user }} mode=0644"
state: link force=yes
when: not disable_edx_services
# call supervisorctl update. this reloads
......@@ -54,9 +54,9 @@
when: not disable_edx_services
- name: ensure gitreload is started
supervisorctl: >
name=gitreload
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
supervisorctl:
name: gitreload
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
state: started
when: not disable_edx_services
......@@ -38,25 +38,25 @@
- deploy
- name: create gitreload user
user: >
name="{{ gitreload_user }}"
home="{{ gitreload_dir }}"
createhome=no
shell=/bin/false
user:
name: "{{ gitreload_user }}"
home: "{{ gitreload_dir }}"
createhome: no
shell: /bin/false
- name: ensure home folder exists
file: >
path={{ gitreload_dir }}
state=directory
owner={{ gitreload_user }}
group={{ gitreload_user }}
file:
path: "{{ gitreload_dir }}"
state: directory
owner: "{{ gitreload_user }}"
group: "{{ gitreload_user }}"
- name: ensure repo dir exists
file: >
path={{ GITRELOAD_REPODIR }}
state=directory
owner={{ common_web_user }}
group={{ common_web_group }}
file:
path: "{{ GITRELOAD_REPODIR }}"
state: directory
owner: "{{ common_web_user }}"
group: "{{ common_web_group }}"
- name: grab ssh host keys
shell: ssh-keyscan {{ item }}
......@@ -65,17 +65,17 @@
register: gitreload_repo_host_keys
- name: add host keys if needed to known_hosts
lineinfile: >
create=yes
dest=~/.ssh/known_hosts
line="{{ item.stdout }}"
lineinfile:
create: yes
dest: ~/.ssh/known_hosts
line: "{{ item.stdout }}"
become_user: "{{ common_web_user }}"
with_items: gitreload_repo_host_keys.results
- name: create a symlink for venv python
file: >
src="{{ gitreload_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.gitreload
file:
src: "{{ gitreload_venv_bin }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item }}.gitreload"
state=link
with_items:
- python
......
......@@ -63,21 +63,21 @@
tags: gluster
- name: all | mount volume
mount: >
name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }}
fstype=glusterfs
state=mounted
opts=defaults,_netdev
mount:
name: "{{ item.mount_location }}"
src: "{{ gluster_primary_ip }}:{{ item.name }}"
fstype: glusterfs
state: mounted
opts: defaults,_netdev
with_items: gluster_volumes
tags: gluster
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1
- name: all | sleep mount
lineinfile: >
dest=/etc/rc.local
line='sleep 5; /bin/mount -a'
regexp='sleep 5; /bin/mount -a'
insertbefore='exit 0'
lineinfile:
dest: /etc/rc.local
line: 'sleep 5; /bin/mount -a'
regexp: 'sleep 5; /bin/mount -a'
insertbefore: 'exit 0'
tags: gluster
......@@ -76,20 +76,17 @@
- { url: "{{ GO_SERVER_GITHUB_PR_PLUGIN_JAR_URL }}", md5: "{{ GO_SERVER_GITHUB_PR_PLUGIN_MD5 }}" }
- name: generate line for go-server password file for admin user
command: >
/usr/bin/htpasswd -nbs "{{ GO_SERVER_ADMIN_USERNAME }}" "{{ GO_SERVER_ADMIN_PASSWORD }}"
command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_ADMIN_USERNAME }}\" \"{{ GO_SERVER_ADMIN_PASSWORD }}\""
register: admin_user_password_line
when: GO_SERVER_ADMIN_USERNAME and GO_SERVER_ADMIN_PASSWORD
- name: generate line for go-server password file for backup user
command: >
/usr/bin/htpasswd -nbs "{{ GO_SERVER_BACKUP_USERNAME }}" "{{ GO_SERVER_BACKUP_PASSWORD }}"
command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_BACKUP_USERNAME }}\" \"{{ GO_SERVER_BACKUP_PASSWORD }}\""
register: backup_user_password_line
when: GO_SERVER_BACKUP_USERNAME and GO_SERVER_BACKUP_PASSWORD
- name: generate line for go-server password file for gomatic user
command: >
/usr/bin/htpasswd -nbs "{{ GO_SERVER_GOMATIC_USERNAME }}" "{{ GO_SERVER_GOMATIC_PASSWORD }}"
command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_GOMATIC_USERNAME }}\" \"{{ GO_SERVER_GOMATIC_PASSWORD }}\""
register: gomatic_user_password_line
when: GO_SERVER_GOMATIC_USERNAME and GO_SERVER_GOMATIC_PASSWORD
......
......@@ -23,68 +23,83 @@
#
- name: install system packages
apt: >
pkg={{ item }}
state=present
apt:
pkg: "{{ item }}"
state: present
with_items: hadoop_common_debian_pkgs
- name: ensure group exists
group: name={{ hadoop_common_group }} system=yes state=present
group:
name: "{{ hadoop_common_group }}"
system: yes
state: present
- name: ensure user exists
user: >
name={{ hadoop_common_user }}
group={{ hadoop_common_group }}
home={{ HADOOP_COMMON_USER_HOME }} createhome=yes
shell=/bin/bash system=yes generate_ssh_key=yes
state=present
user:
name: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
home: "{{ HADOOP_COMMON_USER_HOME }}"
createhome: yes
shell: /bin/bash
system: yes
generate_ssh_key: yes
state: present
- name: own key authorized
file: >
src={{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub
dest={{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link
file:
src: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub"
dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: ssh configured
template: >
src=hadoop_user_ssh_config.j2
dest={{ HADOOP_COMMON_USER_HOME }}/.ssh/config
mode=0600 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
template:
src: hadoop_user_ssh_config.j2
dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/config"
mode: 0600
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
- name: ensure user is in sudoers
lineinfile: >
dest=/etc/sudoers state=present
regexp='^%hadoop ALL\=' line='%hadoop ALL=(ALL) NOPASSWD:ALL'
validate='visudo -cf %s'
lineinfile:
dest: /etc/sudoers
state: present
regexp: "'^%hadoop ALL\=' line='%hadoop ALL=(ALL) NOPASSWD:ALL'"
validate: 'visudo -cf %s'
- name: check if downloaded and extracted
stat: path={{ HADOOP_COMMON_HOME }}
register: extracted_hadoop_dir
- name: distribution downloaded
get_url: >
url={{ hadoop_common_dist.url }}
sha256sum={{ hadoop_common_dist.sha256sum }}
dest={{ hadoop_common_temporary_dir }}
get_url:
url: "{{ hadoop_common_dist.url }}"
sha256sum: "{{ hadoop_common_dist.sha256sum }}"
dest: "{{ hadoop_common_temporary_dir }}"
when: not extracted_hadoop_dir.stat.exists
- name: distribution extracted
shell: >
chdir={{ HADOOP_COMMON_USER_HOME }}
tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }}
shell: "tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }}"
args:
chdir: "{{ HADOOP_COMMON_USER_HOME }}"
when: not extracted_hadoop_dir.stat.exists
- name: versioned directory symlink created
file: >
src={{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }}
dest={{ HADOOP_COMMON_HOME }}
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link
file:
src: "{{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }}"
dest: "{{ HADOOP_COMMON_HOME }}"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: configuration installed
template: >
src={{ item }}.j2
dest={{ HADOOP_COMMON_CONF_DIR }}/{{ item }}
mode=0640 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
template:
src: "{{ item }}.j2"
dest: "{{ HADOOP_COMMON_CONF_DIR }}/{{ item }}"
mode: 0640
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
with_items:
- hadoop-env.sh
- mapred-site.xml
......@@ -93,79 +108,84 @@
- yarn-site.xml
- name: upstart scripts installed
template: >
src={{ item }}.j2
dest=/etc/init/{{ item }}
mode=0640 owner=root group=root
template:
src: "{{ item }}.j2"
dest: "/etc/init/{{ item }}"
mode: 0640
owner: root
group: root
with_items:
- hdfs.conf
- yarn.conf
- name: hadoop env file exists
file: >
path={{ hadoop_common_env }} state=touch
owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
file:
path: "{{ hadoop_common_env }}"
state: touch
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
- name: env vars sourced in bashrc
lineinfile: >
dest={{ HADOOP_COMMON_USER_HOME }}/.bashrc
state=present
regexp="^. {{ hadoop_common_env }}"
line=". {{ hadoop_common_env }}"
insertbefore=BOF
lineinfile:
dest: "{{ HADOOP_COMMON_USER_HOME }}/.bashrc"
state: present
regexp: "^. {{ hadoop_common_env }}"
line: ". {{ hadoop_common_env }}"
insertbefore: BOF
- name: env vars sourced in hadoop env
lineinfile: >
dest={{ hadoop_common_env }} state=present
regexp="^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh" line=". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
lineinfile:
dest: "{{ hadoop_common_env }}"
state: present
regexp: "^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
line: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
- name: check if native libraries need to be built
stat: path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built
register: native_libs_built
- name: protobuf downloaded
get_url: >
url={{ hadoop_common_protobuf_dist.url }}
sha256sum={{ hadoop_common_protobuf_dist.sha256sum }}
dest={{ hadoop_common_temporary_dir }}
get_url:
url: "{{ hadoop_common_protobuf_dist.url }}"
sha256sum: "{{ hadoop_common_protobuf_dist.sha256sum }}"
dest: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists
- name: protobuf extracted
shell: >
chdir={{ hadoop_common_temporary_dir }}
tar -xzf {{ hadoop_common_protobuf_dist.filename }}
shell: "tar -xzf {{ hadoop_common_protobuf_dist.filename }}"
args:
chdir: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists
- name: protobuf installed
shell: >
chdir={{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}
./configure --prefix=/usr/local && make && make install
shell: "./configure --prefix=/usr/local && make && make install"
args:
chdir: "{{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}"
when: not native_libs_built.stat.exists
- name: native lib source downloaded
get_url: >
url={{ hadoop_common_native_dist.url }}
sha256sum={{ hadoop_common_native_dist.sha256sum }}
dest={{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }}
get_url:
url: "{{ hadoop_common_native_dist.url }}"
sha256sum: "{{ hadoop_common_native_dist.sha256sum }}"
dest: "{{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }}"
when: not native_libs_built.stat.exists
- name: native lib source extracted
shell: >
chdir={{ hadoop_common_temporary_dir }}
tar -xzf {{ hadoop_common_native_dist.filename }}
shell: "tar -xzf {{ hadoop_common_native_dist.filename }}"
args:
chdir: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists
- name: native lib built
shell: >
chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project
mvn package -X -Pnative -DskipTests
shell: "mvn package -X -Pnative -DskipTests"
args:
chdir: "{{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project"
environment:
LD_LIBRARY_PATH: /usr/local/lib
when: not native_libs_built.stat.exists
- name: old native libs renamed
shell: >
mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}
shell: "mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}"
with_items:
- { name: libhadoop.a, new_name: libhadoop32.a }
- { name: libhadoop.so, new_name: libhadoop32.so }
......@@ -173,9 +193,9 @@
when: not native_libs_built.stat.exists
- name: new native libs installed
shell: >
shell: "chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}"
args:
chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project/hadoop-common/target/native/target/usr/local/lib
chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}
with_items:
- libhadoop.a
- libhadoop.so
......@@ -183,13 +203,17 @@
when: not native_libs_built.stat.exists
- name: native lib marker touched
file: >
path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=touch
file:
path: "{{ HADOOP_COMMON_USER_HOME }}/.native_libs_built"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: touch
when: not native_libs_built.stat.exists
- name: service directory exists
file: >
path={{ HADOOP_COMMON_SERVICES_DIR }}
mode=0750 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
state=directory
file:
path: "{{ HADOOP_COMMON_SERVICES_DIR }}"
mode=0750
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: directory
......@@ -22,9 +22,11 @@
notify: restart haproxy
- name: Server configuration file
template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644
template:
src: "{{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg"
owner: root
group: root
mode: 0644
notify: reload haproxy
- name: Enabled in default
......
---
# Installs the harprofiler
- name: create harprofiler user
user: >
name="{{ harprofiler_user }}"
createhome=no
home={{ harprofiler_dir }}
shell=/bin/bash
user:
name: "{{ harprofiler_user }}"
createhome: no
home: "{{ harprofiler_dir }}"
shell: /bin/bash
- name: create harprofiler repo
file: >
path={{ harprofiler_dir }} state=directory
owner="{{ harprofiler_user }}" group="{{ common_web_group }}"
mode=0755
file:
path: "{{ harprofiler_dir }}"
state: directory
owner: "{{ harprofiler_user }}"
group: "{{ common_web_group }}"
mode: 0755
- name: check out the harprofiler
git_2_0_1: >
dest={{ harprofiler_dir }}
repo={{ harprofiler_github_url }} version={{ harprofiler_version }}
accept_hostkey=yes
git_2_0_1:
dest: "{{ harprofiler_dir }}"
repo: "{{ harprofiler_github_url }}"
version: "{{ harprofiler_version }}"
accept_hostkey: yes
become_user: "{{ harprofiler_user }}"
- name: set bashrc for harprofiler user
template: >
src=bashrc.j2 dest="{{ harprofiler_dir }}/.bashrc" owner="{{ harprofiler_user }}"
mode=0755
template:
src: bashrc.j2
dest: "{{ harprofiler_dir }}/.bashrc"
owner: "{{ harprofiler_user }}"
mode: 0755
- name: install requirements
pip: >
requirements="{{ harprofiler_dir }}/requirements.txt" virtualenv="{{ harprofiler_venv_dir }}"
pip:
requirements: "{{ harprofiler_dir }}/requirements.txt"
virtualenv: "{{ harprofiler_venv_dir }}"
become_user: "{{ harprofiler_user }}"
- name: update config file
# harprofiler ships with a default config file. Doing a line-replace for the default
# configuration that does not match what this machine will have
lineinfile: >
dest={{ harprofiler_dir }}/config.yaml
regexp="browsermob_dir"
line="browsermob_dir: /usr/local"
state=present
lineinfile:
dest: "{{ harprofiler_dir }}/config.yaml"
regexp: "browsermob_dir"
line: "browsermob_dir: /usr/local"
state: present
- name: create validation shell script
template:
......@@ -49,6 +55,5 @@
- name: test install
shell: >
./{{ harprofiler_validation_script }} chdir={{ harprofiler_dir }}
shell: "./{{ harprofiler_validation_script }} chdir={{ harprofiler_dir }}"
become_user: "{{ harprofiler_user }}"
......@@ -25,59 +25,66 @@
register: extracted_dir
- name: distribution downloaded
get_url: >
url={{ hive_dist.url }}
sha256sum={{ hive_dist.sha256sum }}
dest={{ hive_temporary_dir }}
get_url:
url: "{{ hive_dist.url }}"
sha256sum: "{{ hive_dist.sha256sum }}"
dest: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists
- name: distribution extracted
shell: >
shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hive-{{ HIVE_VERSION }}-bin"
args:
chdir={{ HADOOP_COMMON_USER_HOME }}
tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hive-{{ HIVE_VERSION }}-bin
when: not extracted_dir.stat.exists
- name: versioned directory symlink created
file: >
src={{ HADOOP_COMMON_USER_HOME }}/hive-{{ HIVE_VERSION }}-bin
dest={{ HIVE_HOME }}
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link
file:
src: "{{ HADOOP_COMMON_USER_HOME }}/hive-{{ HIVE_VERSION }}-bin"
dest: "{{ HIVE_HOME }}"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: hive mysql connector distribution downloaded
get_url: >
url={{ hive_mysql_connector_dist.url }}
sha256sum={{ hive_mysql_connector_dist.sha256sum }}
dest={{ hive_temporary_dir }}
get_url:
url: "{{ hive_mysql_connector_dist.url }}"
sha256sum: "{{ hive_mysql_connector_dist.sha256sum }}"
dest: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists
- name: hive mysql connector distribution extracted
shell: >
shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }}"
args:
chdir={{ hive_temporary_dir }}
tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }}
when: not extracted_dir.stat.exists
- name: hive lib exists
file: >
path={{ HIVE_LIB }}
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=directory
file:
path: "{{ HIVE_LIB }}"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: directory
- name: hive mysql connector installed
shell: >
chdir=/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}
cp mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ HIVE_LIB }} &&
chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar
shell: "cp mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ HIVE_LIB }} && chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar"
args:
chdir: "/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}"
when: not extracted_dir.stat.exists
- name: configuration installed
template: >
src={{ item }}.j2
dest={{ HIVE_CONF }}/{{ item }}
mode=0640 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
template:
src: "{{ item }}.j2"
dest: "{{ HIVE_CONF }}/{{ item }}"
mode: 0640
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
with_items:
- hive-env.sh
- hive-site.xml
- name: env vars sourced in hadoop env
lineinfile: >
dest={{ hadoop_common_env }} state=present
regexp="^. {{ HIVE_CONF }}/hive-env.sh" line=". {{ HIVE_CONF }}/hive-env.sh"
lineinfile:
dest: "{{ hadoop_common_env }}"
state: present
regexp: "^. {{ HIVE_CONF }}/hive-env.sh"
line: ". {{ HIVE_CONF }}/hive-env.sh"
......@@ -22,21 +22,22 @@
#
- name: setup the insights env file
template: >
src="edx/app/insights/insights_env.j2"
dest="{{ insights_app_dir }}/insights_env"
owner={{ insights_user }}
group={{ insights_user }}
mode=0644
template:
src: "edx/app/insights/insights_env.j2"
dest: "{{ insights_app_dir }}/insights_env"
owner: "{{ insights_user }}"
group: "{{ insights_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: install application requirements
pip: >
requirements="{{ insights_requirements_base }}/{{ item }}"
virtualenv="{{ insights_venv_dir }}"
state=present extra_args="--exists-action w"
pip:
requirements: "{{ insights_requirements_base }}/{{ item }}"
virtualenv: "{{ insights_venv_dir }}"
state: present
extra_args: "--exists-action w"
become_user: "{{ insights_user }}"
with_items: insights_requirements
tags:
......@@ -44,9 +45,9 @@
- install:app-requirements
- name: create nodeenv
shell: >
creates={{ insights_nodeenv_dir }}
{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --prebuilt
shell: "{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --prebuilt"
args:
creates: "{{ insights_nodeenv_dir }}"
become_user: "{{ insights_user }}"
tags:
- install
......@@ -61,21 +62,19 @@
environment: "{{ insights_environment }}"
- name: install bower dependencies
shell: >
chdir={{ insights_code_dir }}
. {{ insights_venv_dir }}/bin/activate &&
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false
shell: ". {{ insights_venv_dir }}/bin/activate && . {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
tags:
- install
- install:app-requirements
- name: migrate
shell: >
chdir={{ insights_code_dir }}
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}'
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput
shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}'DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
environment: "{{ insights_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......@@ -84,18 +83,18 @@
- migrate:db
- name: run r.js optimizer
shell: >
chdir={{ insights_code_dir }}
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js
shell: ". {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
tags:
- assets
- assets:gather
- name: run collectstatic
shell: >
chdir={{ insights_code_dir }}
{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }}
shell: "{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }}"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
environment: "{{ insights_environment }}"
with_items:
......@@ -106,38 +105,40 @@
- assets:gather
- name: compile translations
shell: >
chdir={{ insights_code_dir }}/analytics_dashboard
. {{ insights_venv_dir }}/bin/activate && i18n_tool generate -v
shell: ". {{ insights_venv_dir }}/bin/activate && i18n_tool generate -v"
args:
chdir: {{ insights_code_dir }}/analytics_dashboard
become_user: "{{ insights_user }}"
tags:
- assets
- assets:gather
- name: write out the supervisior wrapper
template: >
src=edx/app/insights/insights.sh.j2
dest={{ insights_app_dir }}/{{ insights_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
template:
src: "edx/app/insights/insights.sh.j2"
dest: "{{ insights_app_dir }}/{{ insights_service_name }}.sh"
mode: "0650 owner={{ supervisor_user }} group={{ common_web_user }}"
tags:
- install
- install:configuration
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/insights.conf.j2
dest="{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: edx/app/supervisor/conf.d.available/insights.conf.j2
dest: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ insights_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf
state=link
force=yes
file:
src: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
dest: "{{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
......@@ -151,10 +152,10 @@
- manage:start
- name: create symlinks from the venv bin dir
file: >
src="{{ insights_venv_dir }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_service_name }}"
state=link
file:
src: "{{ insights_venv_dir }}/bin/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_service_name }}"
state: link
with_items:
- python
- pip
......@@ -164,20 +165,20 @@
- install:base
- name: create manage.py symlink
file: >
src="{{ insights_manage }}"
dest="{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}"
state=link
file:
src: "{{ insights_manage }}"
dest: "{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}"
state: link
tags:
- install
- install:base
- name: restart insights
supervisorctl: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name={{ insights_service_name }}
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
name: "{{ insights_service_name }}"
when: not disable_edx_services
become_user: "{{ supervisor_service_user }}"
tags:
......
......@@ -42,20 +42,20 @@
owner={{ jenkins_user }} group={{ jenkins_group }} mode=755
- name: configure s3 plugin
template: >
src="./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest="{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml"
owner={{ jenkins_user }}
group={{ jenkins_group }}
template:
src: "./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest: "{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode=0644
- name: configure the boto profiles for jenkins
template: >
src="./{{ jenkins_home }}/boto.j2"
dest="{{ jenkins_home }}/.boto"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
template:
src: "./{{ jenkins_home }}/boto.j2"
dest: "{{ jenkins_home }}/.boto"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: "0600"
tags:
- aws-config
......@@ -66,53 +66,52 @@
- aws-config
- name: configure the awscli profiles for jenkins
template: >
src="./{{ jenkins_home }}/aws_config.j2"
dest="{{ jenkins_home }}/.aws/config"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
template:
src: "./{{ jenkins_home }}/aws_config.j2"
dest: "{{ jenkins_home }}/.aws/config"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: "0600"
tags:
- aws-config
- name: create the ssh directory
file: >
path={{ jenkins_home }}/.ssh
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0700
state=directory
file:
path: "{{ jenkins_home }}/.ssh"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0700
state: directory
# Need to add Github to known_hosts to avoid
# being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts"
- name: create job directory
file: >
path="{{ jenkins_home }}/jobs"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode=0755
state=directory
file:
path: "{{ jenkins_home }}/jobs"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0755
state: directory
- name: create admin job directories
file: >
path="{{ jenkins_home }}/jobs/{{ item }}"
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0755
state=directory
file:
path: "{{ jenkins_home }}/jobs/{{ item }}"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0755
state: directory
with_items: jenkins_admin_jobs
- name: create admin job config files
template: >
src="./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2"
dest="{{ jenkins_home }}/jobs/{{ item }}/config.xml"
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0644
template:
src: "./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2"
dest: "{{ jenkins_home }}/jobs/{{ item }}/config.xml"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0644
with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo
......@@ -125,17 +124,18 @@
# This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins
- name: grant sudo access to the jenkins user
copy: >
content="{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL"
dest=/etc/sudoers.d/99-jenkins owner=root group=root
mode=0440 validate='visudo -cf %s'
copy:
content: "{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL"
dest: "/etc/sudoers.d/99-jenkins owner=root group=root"
mode: 0440
validate: "visudo -cf %s"
- name: install global gem dependencies
gem: >
name={{ item.name }}
state=present
version={{ item.version }}
user_install=no
gem:
name: "{{ item.name }}"
state: present
version: "{{ item.version }}"
user_install: no
with_items: jenkins_admin_gem_pkgs
- name: get s3 one time url
......
......@@ -170,9 +170,9 @@
- jenkins-seed-job
- name: generate seed job xml
shell: >
cd {{ jenkins_seed_job_root }} &&
GRADLE_OPTS="-Dorg.gradle.daemon=true" ./gradlew run -Pargs={{ jenkins_seed_job_script }}
shell:
args: "GRADLE_OPTS=\"-Dorg.gradle.daemon=true\" ./gradlew run -Pargs={{ jenkins_seed_job_script }}"
chdir: "{{ jenkins_seed_job_root }}"
become: yes
become_user: "{{ jenkins_user }}"
tags:
......
......@@ -6,22 +6,21 @@
# refers to the --depth-setting of git clone. A value of 1
# will truncate all history prior to the last revision.
- name: Create shallow clone of edx-platform
git_2_0_1: >
repo=https://github.com/edx/edx-platform.git
dest={{ jenkins_home }}/shallow-clone
version={{ jenkins_edx_platform_version }}
depth=1
git_2_0_1:
repo: https://github.com/edx/edx-platform.git
dest: "{{ jenkins_home }}/shallow-clone"
version: "{{ jenkins_edx_platform_version }}"
depth: 1
become_user: "{{ jenkins_user }}"
# Install the platform requirements using pip.
- name: Install edx-platform requirements using pip
pip: >
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}
extra_args="--exists-action=w"
virtualenv={{ jenkins_home }}/edx-venv
virtualenv_command=virtualenv
executable=pip
pip:
requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}"
extra_args: "--exists-action=w"
virtualenv: "{{ jenkins_home }}/edx-venv"
virtualenv_command: virtualenv
executable: pip
with_items:
- pre.txt
- github.txt
......@@ -39,12 +38,12 @@
become_user: "{{ jenkins_user }}"
- name: Install edx-platform post requirements using pip
pip: >
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}
extra_args="--exists-action=w"
virtualenv={{ jenkins_home }}/edx-venv
virtualenv_command=virtualenv
executable=pip
pip:
requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}"
extra_args: "--exists-action=w"
virtualenv: "{{ jenkins_home }}/edx-venv"
virtualenv_command: virtualenv
executable: pip
with_items:
- post.txt
become_user: "{{ jenkins_user }}"
......@@ -55,9 +54,9 @@
# The edx-venv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean virtualenv archive
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
command: "tar -cpzf edx-venv_clean.tar.gz edx-venv"
args:
chdir: "{{ jenkins_home }}"
become_user: "{{ jenkins_user }}"
# Remove the shallow-clone directory now that we are
......
......@@ -39,8 +39,7 @@
# Need to add Github to known_hosts to avoid
# being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts"
# Edit the /etc/hosts file so that the Preview button will work in Studio
- name: add preview.localhost to /etc/hosts
......
......@@ -12,28 +12,42 @@
- nginx
- name: Ensure {{ kibana_app_dir }} exists
file: path={{ kibana_app_dir }} state=directory owner=root group=root mode=0755
file:
path: "{{ kibana_app_dir }}"
state: directory
owner: root
group: root
mode: 0755
- name: Ensure subdirectories exist
file: path={{ kibana_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
file:
path: "{{ kibana_app_dir }}/{{ item }}"
owner: root
group: root
mode: 0755
state: directory
with_items:
- htdocs
- share
- name: ensure we have the specified kibana release
get_url: url={{ kibana_url }} dest={{ kibana_app_dir }}/share/{{ kibana_file }}
get_url:
url: "{{ kibana_url }}"
dest: "{{ kibana_app_dir }}/share/{{ kibana_file }}"
- name: extract
shell: >
chdir={{ kibana_app_dir }}/share
tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}
creates={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
shell: "tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}"
args:
chdir: "{{ kibana_app_dir }}/share"
creates: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}"
- name: install
shell: >
chdir={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
cp -R * {{ kibana_app_dir }}/htdocs/
shell: "cp -R * {{ kibana_app_dir }}/htdocs/"
args:
chdir: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}"
- name: copy config
template: src=config.js.j2 dest={{ kibana_app_dir }}/htdocs/config.js
template:
src: config.js.j2
dest: "{{ kibana_app_dir }}/htdocs/config.js"
......@@ -56,19 +56,19 @@
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index rotation"
hour={{ logstash_rotate_cron.hour }}
minute={{ logstash_rotate_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
cron:
user: root
name: "Elasticsearch logstash index rotation"
hour: "{{ logstash_rotate_cron.hour }}"
minute: "{{ logstash_rotate_cron.minute }}"
job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index optimization"
hour={{ logstash_optimize_cron.hour }}
minute={{ logstash_optimize_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
cron:
user: root
name: "Elasticsearch logstash index optimization"
hour: "{{ logstash_optimize_cron.hour }}"
minute: "{{ logstash_optimize_cron.minute }}"
job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
when: LOGSTASH_ROTATE|bool
- name: copy galera cluster config
template: >
src="etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf"
owner="root"
group="root"
mode=0600
template:
src: "etc/mysql/conf.d/galera.cnf.j2"
dest: "/etc/mysql/conf.d/galera.cnf"
owner: "root"
group: "root"
mode: 0600
- name: check if we have already bootstrapped the cluster
stat: path=/etc/mysql/ansible_cluster_started
......@@ -15,18 +15,18 @@
when: not mariadb_bootstrap.stat.exists
- name: setup bootstrap on primary
lineinfile: >
dest="/etc/mysql/conf.d/galera.cnf"
regexp="^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$"
line="wsrep_cluster_address=gcomm://"
lineinfile:
dest: "/etc/mysql/conf.d/galera.cnf"
regexp: "^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$"
line: "wsrep_cluster_address=gcomm://"
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
- name: fetch debian.cnf file so start-stop will work properly
fetch: >
src=/etc/mysql/debian.cnf
dest=/tmp/debian.cnf
fail_on_missing=yes
flat=yes
fetch:
src: /etc/mysql/debian.cnf
dest: /tmp/debian.cnf
fail_on_missing: yes
flat: yes
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
register: mariadb_new_debian_cnf
......@@ -39,12 +39,12 @@
when: not mariadb_bootstrap.stat.exists
- name: reset galera cluster config since we are bootstrapped
template: >
src="etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf"
owner="root"
group="root"
mode=0600
template:
src: "etc/mysql/conf.d/galera.cnf.j2"
dest: "/etc/mysql/conf.d/galera.cnf"
owner: "root"
group: "root"
mode: 0600
when: not mariadb_bootstrap.stat.exists
- name: touch bootstrap file to confirm we are fully up
......@@ -53,6 +53,5 @@
# This is needed for mysql-check in haproxy or other mysql monitor
# scripts to prevent haproxy checks exceeding `max_connect_errors`.
- name: create haproxy monitor user
command: >
mysql -e "INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;"
command: "mysql -e \"INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;\""
with_items: MARIADB_HAPROXY_HOSTS
......@@ -29,9 +29,9 @@
apt_key: url="{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}"
- name: add the mariadb repo to the sources list
apt_repository: >
repo='{{ MARIADB_REPO }}'
state=present
apt_repository:
repo: "{{ MARIADB_REPO }}"
state: present
- name: install mariadb solo packages
apt: name={{ item }} update_cache=yes
......@@ -44,10 +44,10 @@
when: MARIADB_CLUSTERED|bool
- name: remove bind-address
lineinfile: >
dest=/etc/mysql/my.cnf
regexp="^bind-address\s+=\s+127\.0\.0\.1$"
state=absent
lineinfile:
dest: /etc/mysql/my.cnf
regexp: "^bind-address\s+=\s+127\.0\.0\.1$"
state: absent
when: MARIADB_LISTEN_ALL|bool or MARIADB_CLUSTERED|bool
- include: cluster.yml
......@@ -57,37 +57,37 @@
service: name=mysql state=started
- name: create all databases
mysql_db: >
db={{ item }}
state=present
encoding=utf8
mysql_db:
db: "{{ item }}"
state: present
encoding: utf8
with_items: MARIADB_DATABASES
when: MARIADB_CREATE_DBS|bool
- name: create all analytics dbs
mysql_db: >
db={{ item }}
state=present
encoding=utf8
mysql_db:
db: {{ item }}
state: present
encoding: utf8
with_items: MARIADB_ANALYTICS_DATABASES
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
- name: create all users/privs
mysql_user: >
name="{{ item.name }}"
password="{{ item.pass }}"
priv="{{ item.priv }}"
host="{{ item.host }}"
append_privs=yes
mysql_user:
name: "{{ item.name }}"
password: "{{ item.pass }}"
priv: "{{ item.priv }}"
host: "{{ item.host }}"
append_privs: yes
with_items: MARIADB_USERS
when: MARIADB_CREATE_DBS|bool
- name: create all analytics users/privs
mysql_user: >
name="{{ item.name }}"
password="{{ item.pass }}"
priv="{{ item.priv }}"
host="{{ item.host }}"
append_privs=yes
mysql_user:
name: "{{ item.name }}"
password: "{{ item.pass }}"
priv: "{{ item.priv }}"
host: "{{ item.host }}"
append_privs: yes
with_items: MARIADB_ANALYTICS_USERS
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
......@@ -11,31 +11,33 @@
when: MMSAPIKEY is not defined
- name: download mongo mms agent
get_url: >
url="{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ pkg_format }}"
dest="/tmp/{{ item.agent }}-{{ item.version }}.{{ pkg_format }}"
get_url:
url: "{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ pkg_format }}"
dest: "/tmp/{{ item.agent }}-{{ item.version }}.{{ pkg_format }}"
register: download_mms_deb
with_items:
agents
- name: install mongo mms agent
apt: >
deb="/tmp/{{ item.agent }}-{{ item.version }}.deb"
apt:
deb: "/tmp/{{ item.agent }}-{{ item.version }}.deb"
when: download_mms_deb.changed
notify: restart mms
with_items:
agents
- name: add key to monitoring-agent.config
lineinfile: >
dest="{{ item.config }}"
regexp="^mmsApiKey="
line="mmsApiKey={{ MMSAPIKEY }}"
lineinfile:
dest: "{{ item.config }}"
regexp: "^mmsApiKey="
line: "mmsApiKey={{ MMSAPIKEY }}"
notify: restart mms
with_items:
agents
- name: start mms service
service: name="{{ item.agent }}" state=started
service:
name: "{{ item.agent }}"
state: started
with_items:
agents
......@@ -49,7 +49,7 @@
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: unmounted
when: >
when:
UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount
......@@ -59,7 +59,7 @@
# that is an errorable condition, since it can easily allow us to double mount a disk.
- name: Check that we don't want to unmount disks when UNMOUNT_DISKS is false
fail: msg="Found disks mounted in the wrong place, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe."
when: >
when:
not UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount
......
......@@ -22,41 +22,37 @@
#
- name: Download newrelic NPI
get_url: >
dest="/tmp/{{ newrelic_npi_installer }}"
url="{{ NEWRELIC_NPI_URL }}"
get_url:
dest: "/tmp/{{ newrelic_npi_installer }}"
url: "{{ NEWRELIC_NPI_URL }}"
register: download_npi_installer
- name: create npi install directory {{ NEWRELIC_NPI_PREFIX }}
file: >
path="{{ NEWRELIC_NPI_PREFIX }}"
state=directory
mode=0755
owner="{{ NEWRELIC_USER }}"
file:
path: "{{ NEWRELIC_NPI_PREFIX }}"
state: directory
mode: 0755
owner: "{{ NEWRELIC_USER }}"
- name: install newrelic npi
shell: >
tar -xzf /tmp/{{ newrelic_npi_installer }} --strip-components=1 -C "{{NEWRELIC_NPI_PREFIX}}"
shell: "tar -xzf /tmp/{{ newrelic_npi_installer }} --strip-components=1 -C \"{{NEWRELIC_NPI_PREFIX}}\""
when: download_npi_installer.changed
become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the default user
shell: >
{{ NEWRELIC_NPI_PREFIX }}/bin/node {{ NEWRELIC_NPI_PREFIX }}/npi.js "set user {{ NEWRELIC_USER }}"
shell: "{{ NEWRELIC_NPI_PREFIX }}/bin/node {{ NEWRELIC_NPI_PREFIX }}/npi.js \"set user {{ NEWRELIC_USER }}\""
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the license key
shell: >
./npi set license_key {{ NEWRELIC_LICENSE_KEY }}
shell: "./npi set license_key {{ NEWRELIC_LICENSE_KEY }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the distro
shell: >
./npi set distro {{ NEWRELIC_NPI_DISTRO }}
shell: "./npi set distro {{ NEWRELIC_NPI_DISTRO }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
......
......@@ -10,17 +10,18 @@
- deploy
- name: download nltk data
get_url: >
dest={{ NLTK_DATA_DIR }}/{{ item.url|basename }}
url={{ item.url }}
get_url:
dest: "{{ NLTK_DATA_DIR }}/{{ item.url|basename }}"
url: "{{ item.url }}"
with_items: NLTK_DATA
register: nltk_download
tags:
- deploy
- name: unarchive nltk data
shell: >
unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
shell: "unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }}"
args:
chdir: "{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: NLTK_DATA
when: nltk_download|changed
tags:
......
......@@ -55,8 +55,8 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory;
# some requirements are pathed relative to the edx-platform repo.
# Using the pip from inside the virtual environment implicitly installs everything into that virtual environment.
command: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }}
command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }}"
args:
chdir={{ edxapp_code_dir }}
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
......
......@@ -56,9 +56,9 @@
- migrate:db
- name: run collectstatic
shell: >
chdir={{ programs_code_dir }}
{{ programs_venv_dir }}/bin/python manage.py collectstatic --noinput
shell: "{{ programs_venv_dir }}/bin/python manage.py collectstatic --noinput"
args:
chdir: "{{ programs_code_dir }}"
become_user: "{{ programs_user }}"
environment: "{{ programs_environment }}"
when: not devstack
......@@ -68,9 +68,12 @@
# NOTE this isn't used or needed when s3 is used for PROGRAMS_MEDIA_STORAGE_BACKEND
- name: create programs media dir
file: >
path="{{ item }}" state=directory mode=0775
owner="{{ programs_user }}" group="{{ common_web_group }}"
file:
path: "{{ item }}"
state: directory
mode: 0775
owner: "{{ programs_user }}"
group: "{{ common_web_group }}"
with_items:
- "{{ PROGRAMS_MEDIA_ROOT }}"
tags:
......
......@@ -171,8 +171,7 @@
- maintenance
- name: Make queues mirrored
shell: >
/usr/sbin/rabbitmqctl -p {{ item }} set_policy HA "" '{"ha-mode":"all","ha-sync-mode":"automatic"}'
shell: "/usr/sbin/rabbitmqctl -p {{ item }} set_policy HA \"\" '{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}'"
when: RABBITMQ_CLUSTERED_HOSTS|length > 1
with_items: "{{ RABBITMQ_VHOSTS }}"
tags:
......
......@@ -38,18 +38,21 @@
when: rbenv_ruby_version is not defined
- name: create rbenv user {{ rbenv_user }}
user: >
name={{ rbenv_user }} home={{ rbenv_dir }}
shell=/bin/false createhome=no
user:
name: "{{ rbenv_user }}"
home: "{{ rbenv_dir }}"
shell: /bin/false
createhome: no
when: rbenv_user != common_web_user
tags:
- install
- install:base
- name: create rbenv dir if it does not exist
file: >
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}"
state=directory
file:
path: "{{ rbenv_dir }}"
owner: "{{ rbenv_user }}"
state: directory
tags:
- install
- install:base
......@@ -62,18 +65,20 @@
- install:base
- name: update rbenv repo
git_2_0_1: >
repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
accept_hostkey=yes
git_2_0_1:
repo: https://github.com/sstephenson/rbenv.git
dest: "{{ rbenv_dir }}/.rbenv"
version: "{{ rbenv_version }}"
accept_hostkey: yes
become_user: "{{ rbenv_user }}"
tags:
- install
- install:base
- name: ensure ruby_env exists
template: >
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env
template:
src: ruby_env.j2
dest: "{{ rbenv_dir }}/ruby_env"
become_user: "{{ rbenv_user }}"
tags:
- install
......@@ -107,9 +112,10 @@
- install:base
- name: clone ruby-build repo
git: >
repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
accept_hostkey=yes
git:
repo: https://github.com/sstephenson/ruby-build.git
dest: "{{ tempdir.stdout }}/ruby-build"
accept_hostkey: yes
when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers))
become_user: "{{ rbenv_user }}"
tags:
......
......@@ -15,12 +15,12 @@
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
- name: Downloads metadata into metadata directory as backup
get_url: >
url={{ shib_metadata_backup_url }}
dest=/etc/shibboleth/metadata/idp-metadata.xml
mode=0640
group=_shibd
owner=_shibd
get_url:
url: "{{ shib_metadata_backup_url }}"
dest: "/etc/shibboleth/metadata/idp-metadata.xml"
mode: 0640
group: _shibd
owner: _shibd
when: shib_download_metadata
- name: writes out key and pem file
......
......@@ -9,39 +9,51 @@
- oinkmaster
- name: configure snort
template: >
src=etc/snort/snort.conf.j2 dest=/etc/snort/snort.conf
owner=root group=root mode=0644
template:
src: etc/snort/snort.conf.j2
dest: /etc/snort/snort.conf
owner: root
group: root
mode: 0644
- name: configure snort (debian)
template: >
src=etc/snort/snort.debian.conf.j2 dest=/etc/snort/snort.debian.conf
owner=root group=root mode=0644
template:
src: etc/snort/snort.debian.conf.j2
dest: /etc/snort/snort.debian.conf
owner: root
group: root
mode: 0644
- name: configure oinkmaster
template: >
src=etc/oinkmaster.conf.j2 dest=/etc/oinkmaster.conf
owner=root group=root mode=0644
template:
src: etc/oinkmaster.conf.j2
dest: /etc/oinkmaster.conf
owner: root
group: root
mode: 0644
- name: update snort
shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/
become: yes
- name: snort service
service: >
name="snort"
state="started"
service:
name: "snort"
state: "started"
- name: open read permissions on snort logs
file: >
name="/var/log/snort"
state="directory"
mode="755"
file:
name: "/var/log/snort"
state: "directory"
mode: "755"
- name: install oinkmaster cronjob
template: >
src=etc/cron.daily/oinkmaster.j2 dest=/etc/cron.daily/oinkmaster
owner=root group=root mode=0755
template:
src: etc/cron.daily/oinkmaster.j2
dest: /etc/cron.daily/oinkmaster
owner: root
group: root
mode: 0755
---
tanguru_debian_pkgs:
- openjdk-7-jre
- unzip
- libmysql-java
- python-mysqldb
- tomcat7
- libspring-instrument-java
- xvfb
- mailutils
- postfix
tanaguru_download_link: "http://download.tanaguru.org/Tanaguru/tanaguru-3.1.0.i386.tar.gz"
# Go this link to find your desired ESR Firefox
# http://download-origin.cdn.mozilla.net/pub/firefox/releases/24.0esr/linux-x86_64/
# Default is en-US in our example
fixfox_esr_link: "http://download-origin.cdn.mozilla.net/pub/firefox/releases/24.0esr/linux-x86_64/en-US/firefox-24.0esr.tar.bz2"
TANAGURU_DATABASE_NAME: 'tgdatabase'
TANAGURU_DATABASE_USER: 'tguser'
TANAGURU_DATABASE_PASSWORD: 'tgPassword'
TANAGURU_URL: 'http://localhost:8080/tanaguru/'
TANAGURU_ADMIN_EMAIL: 'admin@example.com'
TANAGURU_ADMIN_PASSWORD: 'tanaguru15'
tanaguru_parameters:
db_name: "{{ TANAGURU_DATABASE_NAME }}"
db_user: "{{ TANAGURU_DATABASE_USER }}"
db_password: "{{ TANAGURU_DATABASE_PASSWORD }}"
url: "{{ TANAGURU_URL }}"
admin_email: "{{ TANAGURU_ADMIN_EMAIL }}"
admin_passwd: "{{ TANAGURU_ADMIN_PASSWORD }}"
\ No newline at end of file
---
- name: Add the Partner repository
apt_repository:
repo: "{{ item }}"
state: present
with_items:
- "deb http://archive.canonical.com/ubuntu {{ ansible_distribution_release }} partner"
- "deb-src http://archive.canonical.com/ubuntu {{ ansible_distribution_release }} partner"
tags:
- install
- install:base
- name: Set Postfix options
debconf:
name: postifx
question: "{{ item.question }}"
value: "{{ item.value }} "
vtype: "string"
with_items:
- { question: "postfix/mailname", value: " " }
- { question: "postfix/main_mailer_type", value: "Satellite system" }
tags:
- install
- install:configuration
- name: Install the TanaGuru Prerequisites
apt:
name: "{{ item }}"
update_cache: yes
state: installed
with_items: tanguru_debian_pkgs
tags:
- install
- install:base
- name: Modify the my.cnf file for max_allowed_packet option
lineinfile:
dest: /etc/mysql/my.cnf
regexp: '^max_allowed_packet'
line: 'max_allowed_packet = 64M'
state: present
register: my_cnf
tags:
- install
- install:configuration
- name: Restart MySQL
service:
name: mysql
state: restarted
when: my_cnf.changed
- name: Create a soft link for tomcat jar and mysql connector
file:
dest: "{{ item.dest }}"
src: "{{ item.src }}"
state: link
with_items:
- { src: '/usr/share/java/spring3-instrument-tomcat.jar', dest: '/usr/share/tomcat7/lib/spring3-instrument-tomcat.jar' }
- { src: '/usr/share/java/mysql-connector-java.jar', dest: '/usr/share/tomcat7/lib/mysql-connector-java.jar'}
tags:
- install
- install:configuration
- name: Copy the xvfb template to /etc/init.d
template:
dest: /etc/init.d/xvfb
src: xvfb.j2
owner: root
group: root
mode: 0755
register: xvfb
tags:
- install
- install:configuration
- name: Restart xvfb
service:
name: xvfb
pattern: /etc/init.d/xvfb
state: restarted
when: xvfb.changed
- name: Configure xvfb to run at startup
command: update-rc.d xvfb defaults
ignore_errors: yes
when: xvfb.changed
- name: Download the latest ESR Firfox
get_url:
url: "{{ fixfox_esr_link }}"
dest: "/tmp/{{ fixfox_esr_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded Firfox zipped file
unarchive:
src: "/tmp/{{ fixfox_esr_link | basename }}"
dest: /opt
copy: no
tags:
- install
- install:base
- name: Download the latest TanaGuru tarball
get_url:
url: "{{ tanaguru_download_link }}"
dest: "/tmp/{{ tanaguru_download_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded TanaGuru tarball
unarchive:
src: "/tmp/{{ tanaguru_download_link | basename }}"
dest: "/tmp/"
copy: no
tags:
- install
- install:base
- name: Create MySQL database for TanaGuru
mysql_db:
name: "{{ tanaguru_parameters.db_name }}"
state: present
encoding: utf8
collation: utf8_general_ci
tags:
- install
- install:base
- name: Create MySQL user for TanaGuru
mysql_user:
name: "{{ tanaguru_parameters.db_user }}"
password: "{{ tanaguru_parameters.db_password }}"
host: localhost
priv: "{{ tanaguru_parameters.db_name }}.*:ALL"
state: present
tags:
- install
- install:base
- name: Check that tanaguru app is running
shell: >
/bin/ps aux | grep -i tanaguru
register: tanaguru_app
changed_when: no
tags:
- install
- name: Install the TanaGuru
shell: >
/bin/echo "yes" | ./install.sh --mysql-tg-user "{{ tanaguru_parameters.db_user }}" \
--mysql-tg-passwd "{{ tanaguru_parameters.db_password }}" \
--mysql-tg-db "{{ tanaguru_parameters.db_name }}" \
--tanaguru-url "{{ tanaguru_parameters.url }}" \
--tomcat-webapps /var/lib/tomcat7/webapps \
--tomcat-user tomcat7 \
--tg-admin-email "{{ tanaguru_parameters.admin_email }}" \
--tg-admin-passwd "{{ tanaguru_parameters.admin_passwd }}" \
--firefox-esr-path /opt/firefox/firefox \
--display-port ":99.1"
args:
chdir: "/tmp/{{ tanaguru_download_link | basename | regex_replace('.tar.gz$', '') }}"
when: "tanaguru_app.stdout.find('/etc/tanaguru/') == -1"
register: tanaguru_install
tags:
- install
- install:base
- name: Restart tomcat7
service:
name: tomcat7
state: restarted
when: tanaguru_install.changed
\ No newline at end of file
#!/bin/sh
set -e
RUN_AS_USER=tomcat7
OPTS=":99 -screen 1 1024x768x24 -nolisten tcp"
XVFB_DIR=/usr/bin
PIDFILE=/var/run/xvfb
case $1 in
start)
start-stop-daemon --chuid $RUN_AS_USER -b --start --exec $XVFB_DIR/Xvfb --make-pidfile --pidfile $PIDFILE -- $OPTS &
;;
stop)
start-stop-daemon --stop --user $RUN_AS_USER --pidfile $PIDFILE
rm -f $PIDFILE
;;
restart)
if start-stop-daemon --test --stop --user $RUN_AS_USER --pidfile $PIDFILE >/dev/null; then
$0 stop
fi;
$0 start
;;
*)
echo "Usage: $0 (start|restart|stop)"
exit 1
;;
esac
exit 0
\ No newline at end of file
......@@ -21,20 +21,20 @@
#
- name: Create clone of edx-platform
git_2_0_1: >
repo=https://github.com/edx/edx-platform.git
dest={{ test_build_server_repo_path }}/edx-platform-clone
version={{ test_edx_platform_version }}
git_2_0_1:
repo: "https://github.com/edx/edx-platform.git"
dest: "{{ test_build_server_repo_path }}/edx-platform-clone"
version: "{{ test_edx_platform_version }}"
become_user: "{{ test_build_server_user }}"
- name: get xargs limit
shell: "xargs --show-limits"
- name: Copy test-development-environment.sh to somewhere the jenkins user can access it
copy: >
src=test-development-environment.sh
dest="{{ test_build_server_repo_path }}"
mode=0755
copy:
src: test-development-environment.sh
dest: "{{ test_build_server_repo_path }}"
mode: 0755
- name: Validate build environment
shell: "bash test-development-environment.sh {{ item }}"
......
---
- name: import the test courses from github
shell: >
{{ demo_edxapp_venv_bin }}/python /edx/bin/manage.edxapp lms git_add_course --settings=aws "{{ item.github_url }}"
shell: "{{ demo_edxapp_venv_bin }}/python /edx/bin/manage.edxapp lms git_add_course --settings=aws \"{{ item.github_url }}\""
become_user: "{{ common_web_user }}"
when: item.install == True
with_items: TESTCOURSES_EXPORTS
- name: enroll test users in the testcourses
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when: item[1].install == True
with_nested:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment