Commit 9e11e5b8 by Feanil Patel Committed by GitHub

Merge pull request #3108 from edx/feanil/ansible_2_x

Update to ansible 2.x
parents b338caa4 74bb95fa
# Travis CI configuration file for running tests # Travis CI configuration file for running tests
language: python language: python
branches:
only:
- master
python: python:
- "2.7" - "2.7"
......
...@@ -26,7 +26,9 @@ test: docker.test ...@@ -26,7 +26,9 @@ test: docker.test
pkg: docker.pkg pkg: docker.pkg
clean: clean: docker.clean
docker.clean:
rm -rf .build rm -rf .build
docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image)) docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))
......
...@@ -42,8 +42,8 @@ RUN apt-get update && apt-get install -y \ ...@@ -42,8 +42,8 @@ RUN apt-get update && apt-get install -y \
php5-common \ php5-common \
php5-cli php5-cli
# Install libffi-dev - a dependency for Ansible 2.x # Install dependencies needed for Ansible 2.x
RUN apt-get update && apt-get install -y libffi-dev RUN apt-get update && apt-get install -y libffi-dev libssl-dev
# Install drush (drupal shell) for access to Drupal commands/Acquia # Install drush (drupal shell) for access to Drupal commands/Acquia
RUN php -r "readfile('http://files.drush.org/drush.phar');" > drush && \ RUN php -r "readfile('http://files.drush.org/drush.phar');" > drush && \
...@@ -59,7 +59,19 @@ RUN /bin/bash /tmp/docker/docker_install.sh ...@@ -59,7 +59,19 @@ RUN /bin/bash /tmp/docker/docker_install.sh
RUN usermod -aG docker go RUN usermod -aG docker go
# Assign the go user root privlidges # Assign the go user root privlidges
RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip\n" >> /etc/sudoers RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip, /usr/local/bin/pip\n" >> /etc/sudoers
# Upgrade pip and setup tools. Needed for Ansible 2.x
# Must upgrade to latest before pinning to work around bug
# https://github.com/pypa/pip/issues/3862
RUN \
pip install --upgrade pip && \
#pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
hash -r && \
pip install --upgrade pip==8.1.2 && \
# upgrade setuptools early to avoid no distribution errors
pip install --upgrade setuptools==24.0.3
# Install AWS command-line interface - for AWS operations in a go-agent task. # Install AWS command-line interface - for AWS operations in a go-agent task.
RUN pip install awscli RUN pip install awscli
......
...@@ -9,9 +9,10 @@ try: ...@@ -9,9 +9,10 @@ try:
import hipchat import hipchat
except ImportError: except ImportError:
hipchat = None hipchat = None
from ansible.plugins.callback import CallbackBase
class CallbackModule(object): class CallbackModule(CallbackBase):
"""Send status updates to a HipChat channel during playbook execution. """Send status updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables: This plugin makes use of the following environment variables:
......
...@@ -28,9 +28,10 @@ except ImportError: ...@@ -28,9 +28,10 @@ except ImportError:
else: else:
import boto.sqs import boto.sqs
from boto.exception import NoAuthHandlerFound from boto.exception import NoAuthHandlerFound
from ansible.plugins.callback import CallbackBase
class CallbackModule(object): class CallbackModule(CallbackBase):
""" """
This Ansible callback plugin sends task events This Ansible callback plugin sends task events
to SQS. to SQS.
......
...@@ -238,7 +238,7 @@ class CallbackModule(CallbackBase): ...@@ -238,7 +238,7 @@ class CallbackModule(CallbackBase):
Record the start of a play. Record the start of a play.
""" """
self.playbook_name, _ = splitext( self.playbook_name, _ = splitext(
basename(self.play.playbook.filename) basename(self.play.get_name())
) )
self.playbook_timestamp = Timestamp() self.playbook_timestamp = Timestamp()
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations --database "{{ item }}" --output_file "{{ temp_output_dir.stdout }}/{{ item }}_{{ migration_plan }}" --settings "{{ EDX_PLATFORM_SETTINGS }}"' shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations --database "{{ item }}" --output_file "{{ temp_output_dir.stdout }}/{{ item }}_{{ migration_plan }}" --settings "{{ EDX_PLATFORM_SETTINGS }}"'
become_user: "{{ APPLICATION_USER }}" become_user: "{{ APPLICATION_USER }}"
when: APPLICATION_NAME == "edxapp" and item != "read_replica" when: APPLICATION_NAME == "edxapp" and item != "read_replica"
with_items: edxapp_databases.keys() with_items: "{{ edxapp_databases.keys() }}"
- name: migrate to apply any unapplied migrations - name: migrate to apply any unapplied migrations
shell: '{{ COMMAND_PREFIX }} run_migrations --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"' shell: '{{ COMMAND_PREFIX }} run_migrations --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"'
...@@ -72,7 +72,7 @@ ...@@ -72,7 +72,7 @@
shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} run_migrations --database "{{ item }}" --settings "{{ EDX_PLATFORM_SETTINGS }}" --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"' shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} run_migrations --database "{{ item }}" --settings "{{ EDX_PLATFORM_SETTINGS }}" --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"'
become_user: "{{ APPLICATION_USER }}" become_user: "{{ APPLICATION_USER }}"
when: APPLICATION_NAME == "edxapp" and item != "read_replica" when: APPLICATION_NAME == "edxapp" and item != "read_replica"
with_items: edxapp_databases.keys() with_items: "{{ edxapp_databases.keys() }}"
- name: List all migration files - name: List all migration files
action: "command ls -1 {{ temp_output_dir.stdout }}" action: "command ls -1 {{ temp_output_dir.stdout }}"
......
...@@ -13,25 +13,27 @@ ...@@ -13,25 +13,27 @@
keyfile: "/home/{{ owner }}/.ssh/authorized_keys" keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}" serial: "{{ serial_count }}"
tasks: tasks:
- fail: msg="You must pass in a public_key" - fail:
msg: "You must pass in a public_key"
when: public_key is not defined when: public_key is not defined
- fail: msg="public does not exist in secrets" - fail:
msg: "public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined when: ubuntu_public_keys[public_key] is not defined
- command: mktemp - command: mktemp
register: mktemp register: mktemp
- name: Validate the public key before we add it to authorized_keys - name: Validate the public key before we add it to authorized_keys
copy: > copy:
content="{{ ubuntu_public_keys[public_key] }}" content: "{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }} dest: "{{ mktemp.stdout }}"
# This tests the public key and will not continue if it does not look valid # This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }} - command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: > - file:
path={{ mktemp.stdout }} path: "{{ mktemp.stdout }}"
state=absent state: absent
- lineinfile: > - lineinfile:
dest={{ keyfile }} dest: "{{ keyfile }}"
line="{{ ubuntu_public_keys[public_key] }}" line: "{{ ubuntu_public_keys[public_key] }}"
- file: > - file:
path={{ keyfile }} path: "{{ keyfile }}"
owner={{ owner }} owner: "{{ owner }}"
mode=0600 mode: 0600
...@@ -14,7 +14,8 @@ ...@@ -14,7 +14,8 @@
serial: "{{ serial_count }}" serial: "{{ serial_count }}"
pre_tasks: pre_tasks:
- action: ec2_facts - action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post when: elb_pre_post
- name: Instance De-register - name: Instance De-register
local_action: ec2_elb local_action: ec2_elb
...@@ -26,8 +27,9 @@ ...@@ -26,8 +27,9 @@
become: False become: False
when: elb_pre_post when: elb_pre_post
tasks: tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}" - debug:
with_items: list.results var: "{{ ansible_ec2_local_ipv4 }}"
with_items: "{{ list.results }}"
- command: rabbitmqctl stop_app - command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }} - command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }}
when: hostvars.keys()[0] != ansible_ec2_local_ipv4 when: hostvars.keys()[0] != ansible_ec2_local_ipv4
...@@ -39,10 +41,9 @@ ...@@ -39,10 +41,9 @@
local_action: ec2_elb local_action: ec2_elb
args: args:
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60 wait_timeout: 60
with_items: ec2_elbs
become: False become: False
when: elb_pre_post when: elb_pre_post
...@@ -47,11 +47,10 @@ ...@@ -47,11 +47,10 @@
local_action: ec2_elb local_action: ec2_elb
args: args:
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60 wait_timeout: 60
with_items: ec2_elbs
become: False become: False
when: elb_pre_post when: elb_pre_post
# #
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# is called it will use the new MYSQL connection # is called it will use the new MYSQL connection
# info. # info.
- name: Update RDS to point to the sandbox clone - name: Update RDS to point to the sandbox clone
lineinfile: > lineinfile:
dest=/edx/app/edx_ansible/server-vars.yml dest: /edx/app/edx_ansible/server-vars.yml
line="{{ item }}" line: "{{ item }}"
with_items: with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}" - "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}" - "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
...@@ -24,9 +24,9 @@ ...@@ -24,9 +24,9 @@
tags: update_edxapp_mysql_host tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone - name: Update mongo to point to the sandbox mongo clone
lineinfile: > lineinfile:
dest=/edx/app/edx_ansible/server-vars.yml dest: /edx/app/edx_ansible/server-vars.yml
line="{{ item }}" line: "{{ item }}"
with_items: with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}" - "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}" - "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
...@@ -35,6 +35,5 @@ ...@@ -35,6 +35,5 @@
tags: update_edxapp_mysql_host tags: update_edxapp_mysql_host
- name: call update on edx-platform - name: call update on edx-platform
shell: > shell: "/edx/bin/update edx-platform {{ edxapp_version }}"
/edx/bin/update edx-platform {{ edxapp_version }}
tags: update_edxapp_mysql_host tags: update_edxapp_mysql_host
...@@ -53,27 +53,27 @@ ...@@ -53,27 +53,27 @@
- MySQL-python - MySQL-python
- name: create mysql databases - name: create mysql databases
mysql_db: > mysql_db:
db={{ item.name}} db: "{{ item.name}}"
state={{ item.state }} state: "{{ item.state }}"
encoding={{ item.encoding }} encoding: "{{ item.encoding }}"
login_host={{ item.login_host }} login_host: "{{ item.login_host }}"
login_user={{ item.login_user }} login_user: "{{ item.login_user }}"
login_password={{ item.login_password }} login_password: "{{ item.login_password }}"
with_items: databases with_items: "{{ databases }}"
tags: tags:
- dbs - dbs
- name: create mysql users and assign privileges - name: create mysql users and assign privileges
mysql_user: > mysql_user:
name="{{ item.name }}" name: "{{ item.name }}"
priv="{{ '/'.join(item.privileges) }}" priv: "{{ '/'.join(item.privileges) }}"
password="{{ item.password }}" password: "{{ item.password }}"
host={{ item.host }} host: "{{ item.host }}"
login_host={{ item.login_host }} login_host: "{{ item.login_host }}"
login_user={{ item.login_user }} login_user: "{{ item.login_user }}"
login_password={{ item.login_password }} login_password: "{{ item.login_password }}"
append_privs=yes append_privs: yes
with_items: database_users with_items: "{{ database_users }}"
tags: tags:
- users - users
...@@ -41,4 +41,4 @@ ...@@ -41,4 +41,4 @@
roles: "{{ item.roles }}" roles: "{{ item.roles }}"
state: present state: present
replica_set: "{{ repl_set }}" replica_set: "{{ repl_set }}"
with_items: MONGO_USERS with_items: "{{ MONGO_USERS }}"
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
dns_zone: sandbox.edx.org dns_zone: sandbox.edx.org
name_tag: sandbox-temp name_tag: sandbox-temp
elb: false elb: false
vpc_subnet_id: subnet-cd867aba ec2_vpc_subnet_id: subnet-cd867aba
roles: roles:
- role: launch_ec2 - role: launch_ec2
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
dns_name: "{{ dns_name }}" dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}" dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}" zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}" vpc_subnet_id: "{{ ec2_vpc_subnet_id }}"
assign_public_ip: yes assign_public_ip: yes
terminate_instance: true terminate_instance: true
instance_profile_name: sandbox instance_profile_name: sandbox
...@@ -47,10 +47,10 @@ ...@@ -47,10 +47,10 @@
elb: false elb: false
pre_tasks: pre_tasks:
- name: Wait for cloud-init to finish - name: Wait for cloud-init to finish
wait_for: > wait_for:
path=/var/log/cloud-init.log path: /var/log/cloud-init.log
timeout=15 timeout: 15
search_regex="final-message" search_regex: "final-message"
vars_files: vars_files:
- roles/edxapp/defaults/main.yml - roles/edxapp/defaults/main.yml
- roles/xqueue/defaults/main.yml - roles/xqueue/defaults/main.yml
......
...@@ -8,9 +8,9 @@ ...@@ -8,9 +8,9 @@
- edxapp - edxapp
tasks: tasks:
- name: migrate lms - name: migrate lms
shell: > shell: "python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
chdir={{ edxapp_code_dir }} args:
python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws chdir: "{{ edxapp_code_dir }}"
environment: environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}" DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}" DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
...@@ -21,9 +21,9 @@ ...@@ -21,9 +21,9 @@
tags: tags:
- always - always
- name: migrate cms - name: migrate cms
shell: > shell: "python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
chdir={{ edxapp_code_dir }} args:
python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws chdir: "{{ edxapp_code_dir }}"
environment: environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}" DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}" DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
pre_tasks: pre_tasks:
- action: ec2_facts - action: ec2_facts
when: elb_pre_post when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: ansible_ec2_instance_id
when: elb_pre_post when: elb_pre_post
- name: Instance De-register - name: Instance De-register
local_action: ec2_elb local_action: ec2_elb
...@@ -29,16 +30,16 @@ ...@@ -29,16 +30,16 @@
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
post_tasks: post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: ansible_ec2_instance_id
when: elb_pre_post when: elb_pre_post
- name: Register instance in the elb - name: Register instance in the elb
local_action: ec2_elb local_action: ec2_elb
args: args:
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60 wait_timeout: 60
with_items: ec2_elbs
become: False become: False
when: elb_pre_post when: elb_pre_post
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
- name: stop certs service - name: stop certs service
service: name="certificates" state="stopped" service: name="certificates" state="stopped"
- name: checkout code - name: checkout code
git_2_0_1: > git_2_0_1:
repo="{{ repo_url }}" repo: "{{ repo_url }}"
dest="{{ repo_path }}" dest: "{{ repo_path }}"
version="{{ certificates_version }}" version: "{{ certificates_version }}"
accept_hostkey=yes accept_hostkey: yes
environment: environment:
GIT_SSH: "{{ git_ssh_script }}" GIT_SSH: "{{ git_ssh_script }}"
- name: install requirements - name: install requirements
...@@ -29,11 +29,11 @@ ...@@ -29,11 +29,11 @@
# Need to do this because the www-data user is not properly setup # Need to do this because the www-data user is not properly setup
# and can't run ssh. # and can't run ssh.
- name: change owner to www-data - name: change owner to www-data
file: > file:
path="{{ repo_path }}" path: "{{ repo_path }}"
owner="www-data" owner: "www-data"
group="www-data" group: "www-data"
recurse=yes recurse: yes
state="directory" state: "directory"
- name: start certs service - name: start certs service
service: name="certificates" state="started" service: name="certificates" state="started"
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
manage_group {{ item.name | quote }} manage_group {{ item.name | quote }}
{% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %} {% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %}
{% if item.get('remove') %}--remove{% endif %} {% if item.get('remove') %}--remove{% endif %}
with_items: django_groups with_items: "{{ django_groups }}"
- name: Manage users - name: Manage users
shell: > shell: >
...@@ -98,6 +98,6 @@ ...@@ -98,6 +98,6 @@
{% if item.get('staff') %}--staff{% endif %} {% if item.get('staff') %}--staff{% endif %}
{% if item.get('unusable_password') %}--unusable-password{% endif %} {% if item.get('unusable_password') %}--unusable-password{% endif %}
{% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %}
with_items: django_users with_items: "{{ django_users }}"
register: manage_users_result register: manage_users_result
failed_when: (manage_users_result | failed) and not (ignore_user_creation_errors | bool) failed_when: (manage_users_result | failed) and not (ignore_user_creation_errors | bool)
...@@ -72,7 +72,7 @@ ...@@ -72,7 +72,7 @@
install_recommends: yes install_recommends: yes
force: yes force: yes
update_cache: yes update_cache: yes
with_items: mongodb_debian_pkgs with_items: "{{ mongodb_debian_pkgs }}"
- name: wait for mongo server to start - name: wait for mongo server to start
wait_for: wait_for:
port: 27017 port: 27017
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
install_recommends: yes install_recommends: yes
force: yes force: yes
update_cache: yes update_cache: yes
with_items: mongodb_debian_pkgs with_items: "{{ mongodb_debian_pkgs }}"
- name: wait for mongo server to start - name: wait for mongo server to start
wait_for: wait_for:
port: 27017 port: 27017
......
...@@ -9,5 +9,6 @@ ...@@ -9,5 +9,6 @@
- "roles/ecommerce/defaults/main.yml" - "roles/ecommerce/defaults/main.yml"
- "roles/programs/defaults/main.yml" - "roles/programs/defaults/main.yml"
- "roles/credentials/defaults/main.yml" - "roles/credentials/defaults/main.yml"
- "roles/discovery/defaults/main.yml"
roles: roles:
- oauth_client_setup - oauth_client_setup
...@@ -46,9 +46,7 @@ ...@@ -46,9 +46,7 @@
dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}" dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}"
register: xblock_config_file register: xblock_config_file
- name: Manage xblock configurations - name: Manage xblock configurations
shell: > shell: "{{ python_path }} {{ manage_path }} lms --settings=aws populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}"
{{ python_path }} {{ manage_path }} lms --settings=aws
populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}
register: command_result register: command_result
changed_when: "'Import complete, 0 new entries created' not in command_result.stdout" changed_when: "'Import complete, 0 new entries created' not in command_result.stdout"
- debug: msg="{{ command_result.stdout }}" - debug: msg="{{ command_result.stdout }}"
......
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
pre_tasks: pre_tasks:
- action: ec2_facts - action: ec2_facts
when: elb_pre_post when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: ansible_ec2_instance_id
when: elb_pre_post when: elb_pre_post
- name: Instance De-register - name: Instance De-register
local_action: ec2_elb local_action: ec2_elb
...@@ -32,16 +33,16 @@ ...@@ -32,16 +33,16 @@
- aws - aws
- rabbitmq - rabbitmq
post_tasks: post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: ansible_ec2_instance_id
when: elb_pre_post when: elb_pre_post
- name: Register instance in the elb - name: Register instance in the elb
local_action: ec2_elb local_action: ec2_elb
args: args:
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60 wait_timeout: 60
with_items: ec2_elbs
become: False become: False
when: elb_pre_post when: elb_pre_post
...@@ -17,22 +17,21 @@ ...@@ -17,22 +17,21 @@
register: mktemp register: mktemp
# This command will fail if this returns zero lines which will prevent # This command will fail if this returns zero lines which will prevent
# the last key from being removed # the last key from being removed
- shell: > - shell: "grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}"
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }} - shell: "while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}"
- shell: > args:
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }} executable: /bin/bash
executable=/bin/bash
register: keycheck register: keycheck
- fail: msg="public key check failed!" - fail: msg="public key check failed!"
when: keycheck.stderr != "" when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }} - command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: > - file:
path={{ keyfile }} path: "{{ keyfile }}"
owner={{ owner }} owner: "{{ owner }}"
mode=0600 mode: 0600
- file: > - file:
path={{ mktemp.stdout }} path: "{{ mktemp.stdout }}"
state=absent state: absent
- shell: wc -l < {{ keyfile }} - shell: wc -l < {{ keyfile }}
register: line_count register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys" - fail: msg="There should only be one line in ubuntu's authorized_keys"
......
...@@ -7,6 +7,6 @@ ...@@ -7,6 +7,6 @@
- roles/supervisor/defaults/main.yml - roles/supervisor/defaults/main.yml
tasks: tasks:
- name: supervisor | restart supervisor - name: supervisor | restart supervisor
service: > service:
name={{ supervisor_service }} name: "{{ supervisor_service }}"
state=restarted state: restarted
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
- name: Set hostname - name: Set hostname
hostname: name={{ hostname_fqdn.split('.')[0] }} hostname: name={{ hostname_fqdn.split('.')[0] }}
- name: Update /etc/hosts - name: Update /etc/hosts
lineinfile: > lineinfile:
dest=/etc/hosts dest: /etc/hosts
regexp="^127\.0\.1\.1" regexp: "^127\\.0\\.1\\.1"
line="127.0.1.1{{'\t'}}{{ hostname_fqdn.split('.')[0] }}{{'\t'}}{{ hostname_fqdn }}{{'\t'}}localhost" line: "127.0.1.1{{ '\t' }}{{ hostname_fqdn.split('.')[0] }}{{ '\t' }}{{ hostname_fqdn }}{{ '\t' }}localhost"
state=present state: present
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
pre_tasks: pre_tasks:
- action: ec2_facts - action: ec2_facts
when: elb_pre_post when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post when: elb_pre_post
- name: Instance De-register - name: Instance De-register
local_action: ec2_elb local_action: ec2_elb
...@@ -25,16 +26,16 @@ ...@@ -25,16 +26,16 @@
tasks: tasks:
- shell: echo "test" - shell: echo "test"
post_tasks: post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post when: elb_pre_post
- name: Register instance in the elb - name: Register instance in the elb
local_action: ec2_elb local_action: ec2_elb
args: args:
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60 wait_timeout: 60
with_items: ec2_elbs
become: False become: False
when: elb_pre_post when: elb_pre_post
...@@ -14,7 +14,8 @@ ...@@ -14,7 +14,8 @@
pre_tasks: pre_tasks:
- action: ec2_facts - action: ec2_facts
when: elb_pre_post when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post when: elb_pre_post
- name: Instance De-register - name: Instance De-register
local_action: ec2_elb local_action: ec2_elb
...@@ -38,16 +39,16 @@ ...@@ -38,16 +39,16 @@
- role: newrelic - role: newrelic
when: COMMON_ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
post_tasks: post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}" - debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post when: elb_pre_post
- name: Register instance in the elb - name: Register instance in the elb
local_action: ec2_elb local_action: ec2_elb
args: args:
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60 wait_timeout: 60
with_items: ec2_elbs
become: False become: False
when: elb_pre_post when: elb_pre_post
...@@ -96,22 +96,10 @@ def main(): ...@@ -96,22 +96,10 @@ def main():
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True), no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None), tags=dict(default=None, type='dict'),
) )
) )
tags_param = module.params.get('tags')
tags = {}
if isinstance(tags_param, list):
for item in module.params.get('tags'):
for k,v in item.iteritems():
tags[k] = v
elif isinstance(tags_param, dict):
tags = tags_param
else:
module.fail_json(msg="Invalid format for tags")
aws_secret_key = module.params.get('aws_secret_key') aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key') aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region') region = module.params.get('region')
...@@ -137,7 +125,7 @@ def main(): ...@@ -137,7 +125,7 @@ def main():
instances = [] instances = []
instance_ids = [] instance_ids = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}): for tag, value in module.params.get('tags').iteritems()}):
for inst in res.instances: for inst in res.instances:
if inst.state == "running": if inst.state == "running":
instances.append({k: v for k, v in inst.__dict__.iteritems() instances.append({k: v for k, v in inst.__dict__.iteritems()
......
...@@ -66,7 +66,7 @@ tasks: ...@@ -66,7 +66,7 @@ tasks:
- name: tag my launched instances - name: tag my launched instances
local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances with_items: "{{ ec2.instances }}"
args: args:
tags: tags:
Name: webserver Name: webserver
...@@ -76,7 +76,7 @@ tasks: ...@@ -76,7 +76,7 @@ tasks:
tasks: tasks:
- name: tag my instance - name: tag my instance
local_action: ec2_ntag resource={{ item.id }} region=us-east-1 state=present local_action: ec2_ntag resource={{ item.id }} region=us-east-1 state=present
with_items: ec2.instances with_items: "{{ ec2.instances }}"
args: args:
tags: tags:
- Name: "{{ some_variable }}" - Name: "{{ some_variable }}"
...@@ -101,7 +101,7 @@ def main(): ...@@ -101,7 +101,7 @@ def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()
argument_spec.update(dict( argument_spec.update(dict(
resource = dict(required=True), resource = dict(required=True),
tags = dict(), tags = dict(required=False, type='list'),
state = dict(default='present', choices=['present', 'absent', 'list']), state = dict(default='present', choices=['present', 'absent', 'list']),
) )
) )
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
with_items: ad_hoc_reporting_debian_pkgs with_items: "{{ ad_hoc_reporting_debian_pkgs }}"
tags: tags:
- install:system-requirements - install:system-requirements
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
name: "{{ item }}" name: "{{ item }}"
state: present state: present
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: ad_hoc_reporting_pip_pkgs with_items: "{{ ad_hoc_reporting_pip_pkgs }}"
tags: tags:
- install:app-requirements - install:app-requirements
...@@ -92,7 +92,7 @@ ...@@ -92,7 +92,7 @@
- scripts - scripts
- scripts:mysql - scripts:mysql
- install:code - install:code
with_items: AD_HOC_REPORTING_REPLICA_DB_HOSTS with_items: "{{ AD_HOC_REPORTING_REPLICA_DB_HOSTS }}"
# These templates rely on there being a global # These templates rely on there being a global
# read_only mongo user, you must override the default # read_only mongo user, you must override the default
......
...@@ -27,3 +27,6 @@ ...@@ -27,3 +27,6 @@
## ##
# Defaults for role add_user # Defaults for role add_user
# #
#
#
dirs: []
...@@ -65,8 +65,7 @@ ...@@ -65,8 +65,7 @@
owner: "{{ item.owner }}" owner: "{{ item.owner }}"
group: "{{ item.group }}" group: "{{ item.group }}"
mode: "{{ item.mode | default('0755') }}" mode: "{{ item.mode | default('0755') }}"
with_items: dirs with_items: "{{ dirs }}"
when: dirs is defined
tags: tags:
- install - install
- install:base - install:base
...@@ -33,42 +33,40 @@ ...@@ -33,42 +33,40 @@
# #
- name: setup the analytics_api env file - name: setup the analytics_api env file
template: > template:
src="edx/app/analytics_api/analytics_api_env.j2" src: "edx/app/analytics_api/analytics_api_env.j2"
dest="{{ analytics_api_home }}/analytics_api_env" dest: "{{ analytics_api_home }}/analytics_api_env"
owner={{ analytics_api_user }} owner: "{{ analytics_api_user }}"
group={{ analytics_api_user }} group: "{{ analytics_api_user }}"
mode=0644 mode: 0644
tags: tags:
- install - install
- install:configuration - install:configuration
- name: "add gunicorn configuration file" - name: "add gunicorn configuration file"
template: > template:
src=edx/app/analytics_api/analytics_api_gunicorn.py.j2 src: edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest={{ analytics_api_home }}/analytics_api_gunicorn.py dest: "{{ analytics_api_home }}/analytics_api_gunicorn.py"
become_user: "{{ analytics_api_user }}" become_user: "{{ analytics_api_user }}"
tags: tags:
- install - install
- install:configuration - install:configuration
- name: install application requirements - name: install application requirements
pip: > pip:
requirements="{{ analytics_api_requirements_base }}/{{ item }}" requirements: "{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}" virtualenv: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state=present state: present
become_user: "{{ analytics_api_user }}" become_user: "{{ analytics_api_user }}"
with_items: analytics_api_requirements with_items: "{{ analytics_api_requirements }}"
tags: tags:
- install - install
- install:app-requirements - install:app-requirements
- name: migrate - name: migrate
shell: > shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput"
chdir={{ analytics_api_code_dir }} args:
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' chdir: "{{ analytics_api_code_dir }}"
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput
become_user: "{{ analytics_api_user }}" become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}" environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
...@@ -77,9 +75,9 @@ ...@@ -77,9 +75,9 @@
- migrate:db - migrate:db
- name: run collectstatic - name: run collectstatic
shell: > shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput"
chdir={{ analytics_api_code_dir }} args:
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}" become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}" environment: "{{ analytics_api_environment }}"
tags: tags:
...@@ -87,40 +85,44 @@ ...@@ -87,40 +85,44 @@
- assets:gather - assets:gather
- name: create api users - name: create api users
shell: > shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}"
chdir={{ analytics_api_code_dir }} args:
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }} chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}" become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}" environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS with_dict: "{{ ANALYTICS_API_USERS }}"
tags: tags:
- manage - manage
- manage:app-users - manage:app-users
- name: write out the supervisor wrapper - name: write out the supervisor wrapper
template: > template:
src=edx/app/analytics_api/analytics_api.sh.j2 src: edx/app/analytics_api/analytics_api.sh.j2
dest={{ analytics_api_home }}/{{ analytics_api_service_name }}.sh dest: "{{ analytics_api_home }}/{{ analytics_api_service_name }}.sh"
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }} mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags: tags:
- install - install
- install:configuration - install:configuration
- name: write supervisord config - name: write supervisord config
template: > template:
src=edx/app/supervisor/conf.d.available/analytics_api.conf.j2 src: edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf" dest: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags: tags:
- install - install
- install:configuration - install:configuration
- name: enable supervisor script - name: enable supervisor script
file: > file:
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf src: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf dest: "{{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf"
state=link state: link
force=yes force: yes
when: not disable_edx_services when: not disable_edx_services
tags: tags:
- install - install
...@@ -134,10 +136,10 @@ ...@@ -134,10 +136,10 @@
- manage:start - manage:start
- name: create symlinks from the venv bin dir - name: create symlinks from the venv bin dir
file: > file:
src="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}" src: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api" dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link state: link
with_items: with_items:
- python - python
- pip - pip
...@@ -147,10 +149,10 @@ ...@@ -147,10 +149,10 @@
- install:base - install:base
- name: create symlinks from the repo dir - name: create symlinks from the repo dir
file: > file:
src="{{ analytics_api_code_dir }}/{{ item }}" src: "{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api" dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link state: link
with_items: with_items:
- manage.py - manage.py
tags: tags:
...@@ -158,11 +160,11 @@ ...@@ -158,11 +160,11 @@
- install:base - install:base
- name: restart analytics_api - name: restart analytics_api
supervisorctl: > supervisorctl:
state=restarted state: restarted
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path: "{{ supervisor_ctl }}"
config={{ supervisor_cfg }} config: "{{ supervisor_cfg }}"
name={{ analytics_api_service_name }} name: "{{ analytics_api_service_name }}"
when: not disable_edx_services when: not disable_edx_services
become_user: "{{ supervisor_service_user }}" become_user: "{{ supervisor_service_user }}"
tags: tags:
......
...@@ -173,20 +173,24 @@ ...@@ -173,20 +173,24 @@
- manage:start - manage:start
- name: Copying nginx configs for {{ role_name }} - name: Copying nginx configs for {{ role_name }}
template: > template:
src=edx/app/nginx/sites-available/{{ role_name }}.j2 src: "edx/app/nginx/sites-available/{{ role_name }}.j2"
dest={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }} dest: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
owner=root group={{ '{{' }} common_web_user }} mode=0640 owner: root
group: "{{ '{{' }} common_web_user }}"
mode: 0640
notify: reload nginx notify: reload nginx
tags: tags:
- install - install
- install:vhosts - install:vhosts
- name: Creating nginx config links for {{ role_name }} - name: Creating nginx config links for {{ role_name }}
file: > file:
src={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }} src: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
dest={{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }} dest: "{{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}"
state=link owner=root group=root state: link
owner: root
group: root
notify: reload nginx notify: reload nginx
tags: tags:
- install - install
......
...@@ -23,41 +23,41 @@ ...@@ -23,41 +23,41 @@
- name: install antivirus system packages - name: install antivirus system packages
apt: pkg={{ item }} install_recommends=yes state=present apt: pkg={{ item }} install_recommends=yes state=present
with_items: antivirus_debian_pkgs with_items: "{{ antivirus_debian_pkgs }}"
- name: create antivirus scanner user - name: create antivirus scanner user
user: > user:
name="{{ antivirus_user }}" name: "{{ antivirus_user }}"
home="{{ antivirus_app_dir }}" home: "{{ antivirus_app_dir }}"
createhome=no createhome: no
shell=/bin/false shell: /bin/false
- name: create antivirus app and data dirs - name: create antivirus app and data dirs
file: > file:
path="{{ item }}" path: "{{ item }}"
state=directory state: directory
owner="{{ antivirus_user }}" owner: "{{ antivirus_user }}"
group="{{ antivirus_user }}" group: "{{ antivirus_user }}"
with_items: with_items:
- "{{ antivirus_app_dir }}" - "{{ antivirus_app_dir }}"
- "{{ antivirus_app_dir }}/data" - "{{ antivirus_app_dir }}/data"
- name: install antivirus s3 scanner script - name: install antivirus s3 scanner script
template: > template:
src=s3_bucket_virus_scan.sh.j2 src: s3_bucket_virus_scan.sh.j2
dest={{ antivirus_app_dir }}/s3_bucket_virus_scan.sh dest: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh"
mode=0555 mode: "0555"
owner={{ antivirus_user }} owner: "{{ antivirus_user }}"
group={{ antivirus_user }} group: "{{ antivirus_user }}"
- name: install antivirus s3 scanner cronjob - name: install antivirus s3 scanner cronjob
cron: > cron:
name="antivirus-{{ item }}" name: "antivirus-{{ item }}"
job="{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'" job: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup=yes backup: yes
cron_file=antivirus-{{ item }} cron_file: "antivirus-{{ item }}"
user={{ antivirus_user }} user: "{{ antivirus_user }}"
hour="*" hour: "*"
minute="0" minute: "0"
day="*" day: "*"
with_items: ANTIVIRUS_BUCKETS with_items: "{{ ANTIVIRUS_BUCKETS }}"
...@@ -102,8 +102,8 @@ ...@@ -102,8 +102,8 @@
file: file:
path: "{{ item.item }}" path: "{{ item.item }}"
mode: "0644" mode: "0644"
when: > when:
vagrant_home_dir.stat.exists == false and vagrant_home_dir.stat.exists == false and
ansible_distribution in common_debian_variants and ansible_distribution in common_debian_variants and
item.stat.exists item.stat.exists
with_items: motd_files_exist.results with_items: "{{ motd_files_exist.results }}"
# Install browsermob-proxy, which is used for page performance testing with bok-choy # Install browsermob-proxy, which is used for page performance testing with bok-choy
--- ---
- name: get zip file - name: get zip file
get_url: > get_url:
url={{ browsermob_proxy_url }} url: "{{ browsermob_proxy_url }}"
dest=/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip dest: "/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
register: download_browsermob_proxy register: download_browsermob_proxy
- name: unzip into /var/tmp/ - name: unzip into /var/tmp/
shell: > shell: "unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip args:
chdir=/var/tmp chdir: "/var/tmp"
when: download_browsermob_proxy.changed when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/ - name: move to /etc/browsermob-proxy/
shell: > shell: "mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy"
mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy
when: download_browsermob_proxy.changed when: download_browsermob_proxy.changed
- name: change permissions of main script - name: change permissions of main script
file: > file:
path=/etc/browsermob-proxy/bin/browsermob-proxy path: "/etc/browsermob-proxy/bin/browsermob-proxy"
mode=0755 mode: 0755
when: download_browsermob_proxy.changed when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy - name: add wrapper script /usr/local/bin/browsermob-proxy
copy: > copy:
src=browsermob-proxy src: browsermob-proxy
dest=/usr/local/bin/browsermob-proxy dest: /usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed when: download_browsermob_proxy.changed
- name: change permissions of wrapper script - name: change permissions of wrapper script
file: > file:
path=/usr/local/bin/browsermob-proxy path: /usr/local/bin/browsermob-proxy
mode=0755 mode: 0755
when: download_browsermob_proxy.changed when: download_browsermob_proxy.changed
...@@ -8,12 +8,12 @@ ...@@ -8,12 +8,12 @@
- name: download browser debian packages from S3 - name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}" get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb register: download_deb
with_items: browser_s3_deb_pkgs with_items: "{{ browser_s3_deb_pkgs }}"
- name: install browser debian packages - name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }} shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed when: download_deb.changed
with_items: browser_s3_deb_pkgs with_items: "{{ browser_s3_deb_pkgs }}"
# Because the source location has been deprecated, we need to # Because the source location has been deprecated, we need to
# ensure it does not interfere with subsequent apt commands # ensure it does not interfere with subsequent apt commands
...@@ -50,15 +50,15 @@ ...@@ -50,15 +50,15 @@
- "chromedriver.stat.mode == '0755'" - "chromedriver.stat.mode == '0755'"
- name: download PhantomJS - name: download PhantomJS
get_url: > get_url:
url={{ phantomjs_url }} url: "{{ phantomjs_url }}"
dest=/var/tmp/{{ phantomjs_tarfile }} dest: "/var/tmp/{{ phantomjs_tarfile }}"
register: download_phantom_js register: download_phantom_js
- name: unpack the PhantomJS tarfile - name: unpack the PhantomJS tarfile
shell: > shell: "tar -xjf /var/tmp/{{ phantomjs_tarfile }}"
tar -xjf /var/tmp/{{ phantomjs_tarfile }} args:
chdir=/var/tmp chdir: "/var/tmp"
when: download_phantom_js.changed when: download_phantom_js.changed
- name: move PhantomJS binary to /usr/local - name: move PhantomJS binary to /usr/local
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
file: file:
path: "{{ cassandra_data_dir_prefix }}/{{ item }}" path: "{{ cassandra_data_dir_prefix }}/{{ item }}"
state: directory state: directory
with_items: cassandra_data_dirs with_items: "{{ cassandra_data_dirs }}"
- name: Mount ephemeral disks - name: Mount ephemeral disks
mount: mount:
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
path: "{{ cassandra_data_dir_prefix }}/{{ item }}" path: "{{ cassandra_data_dir_prefix }}/{{ item }}"
owner: "{{ cassandra_user }}" owner: "{{ cassandra_user }}"
group: "{{ cassandra_group }}" group: "{{ cassandra_group }}"
with_items: cassandra_data_dirs with_items: "{{ cassandra_data_dirs }}"
- name: Add the datastax repository apt-key - name: Add the datastax repository apt-key
apt_key: apt_key:
......
...@@ -4,3 +4,4 @@ ...@@ -4,3 +4,4 @@
# role depends. This is to allow sharing vars without creating # role depends. This is to allow sharing vars without creating
# side-effects. Any vars requred by this role should be added to # side-effects. Any vars requred by this role should be added to
# common_vars/defaults/main.yml # common_vars/defaults/main.yml
#
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
fail: fail:
msg: "Configuration Sources Checking (COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING) is enabled, you must define {{ item }}" msg: "Configuration Sources Checking (COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING) is enabled, you must define {{ item }}"
when: COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING and ({{ item }} is not defined or {{ item }} != True) when: COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING and ({{ item }} is not defined or {{ item }} != True)
with_items: COMMON_EXTRA_CONFIGURATION_SOURCES with_items: "{{ COMMON_EXTRA_CONFIGURATION_SOURCES }}"
tags: tags:
- "install" - "install"
- "install:configuration" - "install:configuration"
......
...@@ -43,9 +43,9 @@ ...@@ -43,9 +43,9 @@
- install:app-requirements - install:app-requirements
- name: create nodeenv - name: create nodeenv
shell: > shell: "{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt"
creates={{ credentials_nodeenv_dir }} args:
{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt creates: "{{ credentials_nodeenv_dir }}"
become_user: "{{ credentials_user }}" become_user: "{{ credentials_user }}"
tags: tags:
- install - install
...@@ -74,9 +74,12 @@ ...@@ -74,9 +74,12 @@
# var should have more permissive permissions than the rest # var should have more permissive permissions than the rest
- name: create credentials var dirs - name: create credentials var dirs
file: > file:
path="{{ item }}" state=directory mode=0775 path: "{{ item }}"
owner="{{ credentials_user }}" group="{{ common_web_group }}" state: directory
mode: 0775
owner: "{{ credentials_user }}"
group: "{{ common_web_group }}"
with_items: with_items:
- "{{ CREDENTIALS_MEDIA_ROOT }}" - "{{ CREDENTIALS_MEDIA_ROOT }}"
tags: tags:
...@@ -192,20 +195,24 @@ ...@@ -192,20 +195,24 @@
- manage:start - manage:start
- name: Copying nginx configs for credentials - name: Copying nginx configs for credentials
template: > template:
src=edx/app/nginx/sites-available/credentials.j2 src: edx/app/nginx/sites-available/credentials.j2
dest={{ nginx_sites_available_dir }}/credentials dest: "{{ nginx_sites_available_dir }}/credentials"
owner=root group={{ common_web_user }} mode=0640 owner: root
group: "{{ common_web_user }}"
mode: 0640
notify: reload nginx notify: reload nginx
tags: tags:
- install - install
- install:vhosts - install:vhosts
- name: Creating nginx config links for credentials - name: Creating nginx config links for credentials
file: > file:
src={{ nginx_sites_available_dir }}/credentials src: "{{ nginx_sites_available_dir }}/credentials"
dest={{ nginx_sites_enabled_dir }}/credentials dest: "{{ nginx_sites_enabled_dir }}/credentials"
state=link owner=root group=root state: link
owner: root
group: root
notify: reload nginx notify: reload nginx
tags: tags:
- install - install
......
--- ---
DATADOG_API_KEY: "SPECIFY_KEY_HERE" DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_agent_version: '1:5.1.1-546' datadog_agent_version: '1:5.10.1-1'
datadog_apt_key: "0x226AE980C7A7DA52" datadog_apt_key: "0x226AE980C7A7DA52"
datadog_debian_pkgs: datadog_debian_pkgs:
......
--- ---
- name: check out the demo course - name: check out the demo course
git_2_0_1: > git_2_0_1:
dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }} dest: "{{ demo_code_dir }}"
accept_hostkey=yes repo: "{{ demo_repo }}"
version: "{{ demo_version }}"
accept_hostkey: yes
become_user: "{{ demo_edxapp_user }}" become_user: "{{ demo_edxapp_user }}"
register: demo_checkout register: demo_checkout
- name: import demo course - name: import demo course
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}"
{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }} args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
when: demo_checkout.changed when: demo_checkout.changed
- name: create some test users - name: create some test users
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}"
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }} args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
with_items: demo_test_users with_items: "{{ demo_test_users }}"
when: demo_checkout.changed when: demo_checkout.changed
- name: create staff user - name: create staff user
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff"
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
when: when:
- demo_checkout.changed - demo_checkout.changed
- DEMO_CREATE_STAFF_USER - DEMO_CREATE_STAFF_USER
- name: enroll test users in the demo course - name: enroll test users in the demo course
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}"
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }} args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
with_items: with_items:
- "{{ demo_test_users }}" - "{{ demo_test_users }}"
...@@ -43,15 +45,15 @@ ...@@ -43,15 +45,15 @@
- name: add test users to the certificate whitelist - name: add test users to the certificate whitelist
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}"
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }} args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
with_items: demo_test_users with_items: "{{ demo_test_users }}"
when: demo_checkout.changed when: demo_checkout.changed
- name: seed the forums for the demo course - name: seed the forums for the demo course
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}"
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }} args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
with_items: demo_test_users with_items: "{{ demo_test_users }}"
when: demo_checkout.changed when: demo_checkout.changed
...@@ -31,8 +31,10 @@ ...@@ -31,8 +31,10 @@
# - demo # - demo
- name: create demo app and data dirs - name: create demo app and data dirs
file: > file:
path="{{ demo_app_dir }}" state=directory path: "{{ demo_app_dir }}"
owner="{{ demo_edxapp_user }}" group="{{ common_web_group }}" state: directory
owner: "{{ demo_edxapp_user }}"
group: "{{ common_web_group }}"
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
...@@ -77,9 +77,9 @@ ...@@ -77,9 +77,9 @@
- devstack:install - devstack:install
- name: create nodeenv - name: create nodeenv
shell: > shell: "{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt"
creates={{ discovery_nodeenv_dir }} args:
{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt creates: "{{ discovery_nodeenv_dir }}"
become_user: "{{ discovery_user }}" become_user: "{{ discovery_user }}"
tags: tags:
- install - install
...@@ -94,9 +94,9 @@ ...@@ -94,9 +94,9 @@
- install:app-requirements - install:app-requirements
- name: install bower dependencies - name: install bower dependencies
shell: > shell: ". {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false"
chdir={{ discovery_code_dir }} args:
. {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false chdir: "{{ discovery_code_dir }}"
become_user: "{{ discovery_user }}" become_user: "{{ discovery_user }}"
tags: tags:
- install - install
......
...@@ -21,16 +21,20 @@ ECOMMERCE_NGINX_PORT: "18130" ...@@ -21,16 +21,20 @@ ECOMMERCE_NGINX_PORT: "18130"
ECOMMERCE_SSL_NGINX_PORT: 48130 ECOMMERCE_SSL_NGINX_PORT: 48130
ECOMMERCE_DEFAULT_DB_NAME: 'ecommerce' ECOMMERCE_DEFAULT_DB_NAME: 'ecommerce'
ECOMMERCE_DATABASE_USER: "ecomm001"
ECOMMERCE_DATABASE_PASSWORD: "password"
ECOMMERCE_DATABASE_HOST: "localhost"
ECOMMERCE_DATABASE_PORT: 3306
ECOMMERCE_DATABASES: ECOMMERCE_DATABASES:
# rw user # rw user
default: default:
ENGINE: 'django.db.backends.mysql' ENGINE: 'django.db.backends.mysql'
NAME: '{{ ECOMMERCE_DEFAULT_DB_NAME }}' NAME: '{{ ECOMMERCE_DEFAULT_DB_NAME }}'
USER: 'ecomm001' USER: '{{ ECOMMERCE_DATABASE_USER }}'
PASSWORD: 'password' PASSWORD: '{{ ECOMMERCE_DATABASE_PASSWORD }}'
HOST: 'localhost' HOST: '{{ ECOMMERCE_DATABASE_HOST }}'
PORT: '3306' PORT: '{{ ECOMMERCE_DATABASE_PORT }}'
ATOMIC_REQUESTS: true ATOMIC_REQUESTS: true
CONN_MAX_AGE: 60 CONN_MAX_AGE: 60
......
...@@ -84,11 +84,9 @@ ...@@ -84,11 +84,9 @@
- migrate:db - migrate:db
- name: Populate countries - name: Populate countries
shell: > shell: "DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }} DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }} {{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries"
chdir={{ ecommerce_code_dir }} args:
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }} chdir: "{{ ecommerce_code_dir }}"
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries
become_user: "{{ ecommerce_user }}" become_user: "{{ ecommerce_user }}"
environment: "{{ ecommerce_environment }}" environment: "{{ ecommerce_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
virtualenv: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}' virtualenv: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}'
state: present state: present
become_user: '{{ ecommerce_worker_user }}' become_user: '{{ ecommerce_worker_user }}'
with_items: ecommerce_worker_requirements with_items: "{{ ecommerce_worker_requirements }}"
- name: write out the supervisor wrapper - name: write out the supervisor wrapper
template: template:
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
state: present state: present
extra_args: "--exists-action w" extra_args: "--exists-action w"
become_user: "{{ edx_notes_api_user }}" become_user: "{{ edx_notes_api_user }}"
with_items: edx_notes_api_requirements with_items: "{{ edx_notes_api_requirements }}"
- name: Migrate - name: Migrate
shell: > shell: >
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
# #
edx_service_name: edx_service edx_service_name: edx_service
edx_service_repos: []
# #
# OS packages # OS packages
# #
......
...@@ -127,18 +127,19 @@ ...@@ -127,18 +127,19 @@
action: ec2_facts action: ec2_facts
tags: tags:
- to-remove - to-remove
#old syntax - should be fixed
- name: Tag instance - name: Tag instance
ec2_tag_local: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }} ec2_tag_local:
args: args:
resource: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}"
tags: tags:
- Name: version:{{ edx_service_name }} - Name: "version:{{ edx_service_name }}"
Value: "{{ item.0.DOMAIN }}/{{ item.0.PATH }}/{{ item.0.REPO }} {{ item.1.after |truncate(7,True,'') }}" Value: "{{ item.0.DOMAIN }}/{{ item.0.PATH }}/{{ item.0.REPO }} {{ item.1.after |truncate(7,True,'') }}"
when: item.1.after is defined and COMMON_TAG_EC2_INSTANCE and edx_service_repos is defined when: item.1.after is defined and COMMON_TAG_EC2_INSTANCE and edx_service_repos is defined
with_together: with_together:
- edx_service_repos - "{{ edx_service_repos }}"
- code_checkout.results - "{{ code_checkout.results }}"
tags: tags:
- to-remove - to-remove
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
shell: /bin/bash shell: /bin/bash
groups: "{{ themes_group }}" groups: "{{ themes_group }}"
append: yes append: yes
with_items: theme_users with_items: "{{ theme_users }}"
when: theme_users is defined when: theme_users is defined
- name: update .bashrc to set umask value - name: update .bashrc to set umask value
......
...@@ -499,8 +499,8 @@ EDXAPP_CELERY_WORKERS: ...@@ -499,8 +499,8 @@ EDXAPP_CELERY_WORKERS:
monitor: False monitor: False
max_tasks_per_child: 1 max_tasks_per_child: 1
EDXAPP_RECALCULATE_GRADES_ROUTING_KEY: 'edx.lms.core.default' EDXAPP_RECALCULATE_GRADES_ROUTING_KEY: 'edx.lms.core.default'
EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\\\1')|list }}" EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\1')|list }}"
EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\\\1')|list }}" EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\1')|list }}"
EDXAPP_DEFAULT_CACHE_VERSION: "1" EDXAPP_DEFAULT_CACHE_VERSION: "1"
EDXAPP_OAUTH_ENFORCE_SECURE: True EDXAPP_OAUTH_ENFORCE_SECURE: True
...@@ -802,8 +802,6 @@ edxapp_generic_auth_config: &edxapp_generic_auth ...@@ -802,8 +802,6 @@ edxapp_generic_auth_config: &edxapp_generic_auth
generic_cache_config: &default_generic_cache generic_cache_config: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key' KEY_FUNCTION: 'util.memcache.safe_key'
KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
generic_env_config: &edxapp_generic_env generic_env_config: &edxapp_generic_env
ECOMMERCE_PUBLIC_URL_ROOT: "{{ EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT }}" ECOMMERCE_PUBLIC_URL_ROOT: "{{ EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT }}"
...@@ -888,23 +886,29 @@ generic_env_config: &edxapp_generic_env ...@@ -888,23 +886,29 @@ generic_env_config: &edxapp_generic_env
default: default:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'default' KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
VERSION: "{{ EDXAPP_DEFAULT_CACHE_VERSION }}" VERSION: "{{ EDXAPP_DEFAULT_CACHE_VERSION }}"
general: general:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'general' KEY_PREFIX: 'general'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
mongo_metadata_inheritance: mongo_metadata_inheritance:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'mongo_metadata_inheritance' KEY_PREFIX: 'mongo_metadata_inheritance'
TIMEOUT: 300 TIMEOUT: 300
LOCATION: "{{ EDXAPP_MEMCACHE }}"
staticfiles: staticfiles:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general" KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general"
LOCATION: "{{ EDXAPP_MEMCACHE }}"
configuration: configuration:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: "{{ ansible_hostname|default('configuration') }}" KEY_PREFIX: "{{ ansible_hostname|default('configuration') }}"
LOCATION: "{{ EDXAPP_MEMCACHE }}"
celery: celery:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'celery' KEY_PREFIX: 'celery'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
TIMEOUT: "7200" TIMEOUT: "7200"
course_structure_cache: course_structure_cache:
<<: *default_generic_cache <<: *default_generic_cache
......
...@@ -110,10 +110,10 @@ ...@@ -110,10 +110,10 @@
- install:app-requirements - install:app-requirements
- name: Create the virtualenv to install the Python requirements - name: Create the virtualenv to install the Python requirements
command: > command: "virtualenv {{ edxapp_venv_dir }}"
virtualenv {{ edxapp_venv_dir }} args:
chdir={{ edxapp_code_dir }} chdir: "{{ edxapp_code_dir }}"
creates={{ edxapp_venv_dir }}/bin/pip creates: "{{ edxapp_venv_dir }}/bin/pip"
become_user: "{{ edxapp_user }}" become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
tags: tags:
...@@ -134,9 +134,9 @@ ...@@ -134,9 +134,9 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory; some # Need to use command rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
command: > command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}"
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }} args:
chdir={{ edxapp_code_dir }} chdir: "{{ edxapp_code_dir }}"
become_user: "{{ edxapp_user }}" become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
when: item.stat.exists when: item.stat.exists
...@@ -151,9 +151,9 @@ ...@@ -151,9 +151,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: > shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }} args:
chdir={{ edxapp_code_dir }} chdir: "{{ edxapp_code_dir }}"
with_items: with_items:
- "{{ private_requirements_file }}" - "{{ private_requirements_file }}"
become_user: "{{ edxapp_user }}" become_user: "{{ edxapp_user }}"
...@@ -172,7 +172,7 @@ ...@@ -172,7 +172,7 @@
extra_args: "--exists-action w {{ item.extra_args|default('') }}" extra_args: "--exists-action w {{ item.extra_args|default('') }}"
virtualenv: "{{ edxapp_venv_dir }}" virtualenv: "{{ edxapp_venv_dir }}"
state: present state: present
with_items: EDXAPP_EXTRA_REQUIREMENTS with_items: "{{ EDXAPP_EXTRA_REQUIREMENTS }}"
become_user: "{{ edxapp_user }}" become_user: "{{ edxapp_user }}"
tags: tags:
- install - install
...@@ -197,9 +197,9 @@ ...@@ -197,9 +197,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: > shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }} args:
chdir={{ edxapp_code_dir }} chdir: "{{ edxapp_code_dir }}"
with_items: with_items:
- "{{ sandbox_base_requirements }}" - "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}" - "{{ sandbox_local_requirements }}"
...@@ -211,8 +211,7 @@ ...@@ -211,8 +211,7 @@
- install:app-requirements - install:app-requirements
- name: create nodeenv - name: create nodeenv
shell: > shell: "{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt"
{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt
args: args:
creates: "{{ edxapp_nodeenv_dir }}" creates: "{{ edxapp_nodeenv_dir }}"
tags: tags:
...@@ -223,8 +222,7 @@ ...@@ -223,8 +222,7 @@
# This needs to be done as root since npm is weird about # This needs to be done as root since npm is weird about
# chown - https://github.com/npm/npm/issues/3565 # chown - https://github.com/npm/npm/issues/3565
- name: Set the npm registry - name: Set the npm registry
shell: > shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'"
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
args: args:
creates: "{{ edxapp_app_dir }}/.npmrc" creates: "{{ edxapp_app_dir }}/.npmrc"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
...@@ -279,9 +277,9 @@ ...@@ -279,9 +277,9 @@
- install:app-requirements - install:app-requirements
- name: code sandbox | Install sandbox requirements into sandbox venv - name: code sandbox | Install sandbox requirements into sandbox venv
shell: > shell: "{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }} args:
chdir={{ edxapp_code_dir }} chdir: "{{ edxapp_code_dir }}"
with_items: with_items:
- "{{ sandbox_local_requirements }}" - "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}" - "{{ sandbox_post_requirements }}"
......
...@@ -3,7 +3,10 @@ ...@@ -3,7 +3,10 @@
template: template:
src: "{{ item[0] }}.{{ item[1] }}.json.j2" src: "{{ item[0] }}.{{ item[1] }}.json.j2"
dest: "{{ edxapp_app_dir }}/{{ item[0] }}.{{ item[1] }}.json" dest: "{{ edxapp_app_dir }}/{{ item[0] }}.{{ item[1] }}.json"
become_user: "{{ edxapp_user }}" owner: "{{ edxapp_user }}"
group: "{{ common_web_group }}"
mode: 0640
become: true
with_nested: with_nested:
- "{{ service_variants_enabled }}" - "{{ service_variants_enabled }}"
- [ 'env', 'auth' ] - [ 'env', 'auth' ]
...@@ -17,7 +20,10 @@ ...@@ -17,7 +20,10 @@
template: template:
src: "{{ item[0] }}.{{ item[1] }}.yaml.j2" src: "{{ item[0] }}.{{ item[1] }}.yaml.j2"
dest: "{{ EDXAPP_CFG_DIR }}/{{ item[0] }}.{{ item[1] }}.yaml" dest: "{{ EDXAPP_CFG_DIR }}/{{ item[0] }}.{{ item[1] }}.yaml"
become_user: "{{ edxapp_user }}" owner: "{{ edxapp_user }}"
group: "{{ common_web_group }}"
mode: 0640
become: true
with_nested: with_nested:
- "{{ service_variants_enabled }}" - "{{ service_variants_enabled }}"
- [ 'env', 'auth' ] - [ 'env', 'auth' ]
...@@ -34,6 +40,7 @@ ...@@ -34,6 +40,7 @@
dest: "{{ supervisor_available_dir }}/{{ item }}.conf" dest: "{{ supervisor_available_dir }}/{{ item }}.conf"
owner: "{{ supervisor_user }}" owner: "{{ supervisor_user }}"
group: "{{ supervisor_user }}" group: "{{ supervisor_user }}"
mode: 0644
become_user: "{{ supervisor_user }}" become_user: "{{ supervisor_user }}"
with_items: "{{ service_variants_enabled }}" with_items: "{{ service_variants_enabled }}"
tags: tags:
...@@ -47,6 +54,7 @@ ...@@ -47,6 +54,7 @@
dest: "{{ supervisor_available_dir }}/{{ item }}" dest: "{{ supervisor_available_dir }}/{{ item }}"
owner: "{{ supervisor_user }}" owner: "{{ supervisor_user }}"
group: "{{ supervisor_user }}" group: "{{ supervisor_user }}"
mode: 0644
become_user: "{{ supervisor_user }}" become_user: "{{ supervisor_user }}"
with_items: with_items:
- edxapp.conf - edxapp.conf
...@@ -59,6 +67,7 @@ ...@@ -59,6 +67,7 @@
template: template:
src: "{{ item }}_gunicorn.py.j2" src: "{{ item }}_gunicorn.py.j2"
dest: "{{ edxapp_app_dir }}/{{ item }}_gunicorn.py" dest: "{{ edxapp_app_dir }}/{{ item }}_gunicorn.py"
mode: 0644
become_user: "{{ edxapp_user }}" become_user: "{{ edxapp_user }}"
with_items: "{{ service_variants_enabled }}" with_items: "{{ service_variants_enabled }}"
tags: tags:
......
...@@ -19,8 +19,8 @@ edxlocal_databases: ...@@ -19,8 +19,8 @@ edxlocal_databases:
edxlocal_database_users: edxlocal_database_users:
- { - {
db: "{{ ECOMMERCE_DEFAULT_DB_NAME | default(None) }}", db: "{{ ECOMMERCE_DEFAULT_DB_NAME | default(None) }}",
user: "{{ ECOMMERCE_DATABASES.default.USER | default(None) }}", user: "{{ ECOMMERCE_DATABASE_USER | default(None) }}",
pass: "{{ ECOMMERCE_DATABASES.default.PASSWORD | default(None) }}" pass: "{{ ECOMMERCE_DATABASE_PASSWORD | default(None) }}"
} }
- { - {
db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}", db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}",
...@@ -44,8 +44,8 @@ edxlocal_database_users: ...@@ -44,8 +44,8 @@ edxlocal_database_users:
} }
- { - {
db: "{{ PROGRAMS_DEFAULT_DB_NAME | default(None) }}", db: "{{ PROGRAMS_DEFAULT_DB_NAME | default(None) }}",
user: "{{ PROGRAMS_DATABASES.default.USER | default(None) }}", user: "{{ PROGRAMS_DATABASE_USER | default(None) }}",
pass: "{{ PROGRAMS_DATABASES.default.PASSWORD | default(None) }}" pass: "{{ PROGRAMS_DATABASE_PASSWORD | default(None) }}"
} }
- { - {
db: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME | default(None) }}", db: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME | default(None) }}",
......
...@@ -21,30 +21,27 @@ ...@@ -21,30 +21,27 @@
# #
# #
- name: download elasticsearch plugin - name: download elasticsearch plugin
shell: > shell: "./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y"
./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y
args: args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}" chdir: "{{ NEWRELIC_NPI_PREFIX }}"
creates: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}.compressed" creates: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}.compressed"
become_user: "{{ NEWRELIC_USER }}" become_user: "{{ NEWRELIC_USER }}"
- name: prepare elasticsearch plugin - name: prepare elasticsearch plugin
shell: > shell: "./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n"
./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n
args: args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}" chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}" become_user: "{{ NEWRELIC_USER }}"
- name: configure elasticsearch plugin - name: configure elasticsearch plugin
template: > template:
src=plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2 src: "plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2"
dest={{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json dest: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json"
owner={{ NEWRELIC_USER }} owner: "{{ NEWRELIC_USER }}"
mode=0644 mode: 0644
- name: register/start elasticsearch plugin - name: register/start elasticsearch plugin
shell: > shell: "./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}"
./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}
args: args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}" chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "root" become_user: "root"
......
...@@ -30,37 +30,39 @@ ...@@ -30,37 +30,39 @@
--- ---
- name: install pip packages - name: install pip packages
pip: name={{ item }} state=present pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs with_items: "{{ gh_mirror_pip_pkgs }}"
- name: install debian packages - name: install debian packages
apt: > apt:
pkg={{ ",".join(gh_mirror_debian_pkgs) }} pkg: '{{ ",".join(gh_mirror_debian_pkgs) }}'
state=present state: present
update_cache=yes update_cache: yes
- name: create gh_mirror user - name: create gh_mirror user
user: > user:
name={{ gh_mirror_user }} name: "{{ gh_mirror_user }}"
state=present state: present
- name: create the gh_mirror data directory - name: create the gh_mirror data directory
file: > file:
path={{ gh_mirror_data_dir }} path: "{{ gh_mirror_data_dir }}"
state=directory state: directory
owner={{ gh_mirror_user }} owner: "{{ gh_mirror_user }}"
group={{ gh_mirror_group }} group: "{{ gh_mirror_group }}"
- name: create the gh_mirror app directory - name: create the gh_mirror app directory
file: > file:
path={{ gh_mirror_app_dir }} path: "{{ gh_mirror_app_dir }}"
state=directory state: directory
- name: create org config - name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts - name: copying sync scripts
copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }} copy:
with_items: gh_mirror_app_files src: "{{ item }}"
dest: "{{ gh_mirror_app_dir }}/{{ item }}"
with_items: "{{ gh_mirror_app_files }}"
- name: creating cron job to update repos - name: creating cron job to update repos
cron: cron:
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
- name: Set git fetch.prune to ignore deleted remote refs - name: Set git fetch.prune to ignore deleted remote refs
shell: git config --global fetch.prune true shell: git config --global fetch.prune true
become_user: "{{ repo_owner }}" become_user: "{{ repo_owner }}"
when: GIT_REPOS is defined
no_log: true no_log: true
when: repo_owner is defined and GIT_REPOS|length > 0
tags: tags:
- install - install
- install:code - install:code
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
fail: fail:
msg: '{{ GIT_REPOS.PROTOCOL }} must be "https" or "ssh"' msg: '{{ GIT_REPOS.PROTOCOL }} must be "https" or "ssh"'
when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined
with_items: GIT_REPOS with_items: "{{ GIT_REPOS }}"
no_log: true no_log: true
tags: tags:
- install - install
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
group: "{{ repo_group }}" group: "{{ repo_group }}"
mode: "0600" mode: "0600"
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS with_items: "{{ GIT_REPOS }}"
no_log: true no_log: true
tags: tags:
- install - install
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
become_user: "{{ repo_owner }}" become_user: "{{ repo_owner }}"
register: code_checkout register: code_checkout
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS with_items: "{{ GIT_REPOS }}"
no_log: true no_log: true
tags: tags:
- install - install
...@@ -78,7 +78,7 @@ ...@@ -78,7 +78,7 @@
become_user: "{{ repo_owner }}" become_user: "{{ repo_owner }}"
register: code_checkout register: code_checkout
when: item.PROTOCOL == "https" and GIT_REPOS is defined when: item.PROTOCOL == "https" and GIT_REPOS is defined
with_items: GIT_REPOS with_items: "{{ GIT_REPOS }}"
no_log: true no_log: true
tags: tags:
- install - install
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
dest: "{{ git_home }}/.ssh/{{ item.REPO }}" dest: "{{ git_home }}/.ssh/{{ item.REPO }}"
state: absent state: absent
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS with_items: "{{ GIT_REPOS }}"
no_log: true no_log: true
tags: tags:
- install - install
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
# #
# #
- name: restart gitreload - name: restart gitreload
supervisorctl: > supervisorctl:
name=gitreload name: gitreload
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path: "{{ supervisor_ctl }}"
config={{ supervisor_cfg }} config: "{{ supervisor_cfg }}"
state=restarted state: restarted
when: not disable_edx_services when: not disable_edx_services
...@@ -3,32 +3,32 @@ ...@@ -3,32 +3,32 @@
- name: clone all course repos - name: clone all course repos
git_2_0_1: dest={{ GITRELOAD_REPODIR }}/{{ item.name }} repo={{ item.url }} version={{ item.commit }} git_2_0_1: dest={{ GITRELOAD_REPODIR }}/{{ item.name }} repo={{ item.url }} version={{ item.commit }}
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
with_items: GITRELOAD_REPOS with_items: "{{ GITRELOAD_REPOS }}"
- name: do import of courses - name: do import of courses
shell: > shell: "SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/python manage.py lms --settings=aws git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }}"
executable=/bin/bash args:
chdir="{{ edxapp_code_dir }}" executable: "/bin/bash"
SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/python manage.py lms --settings=aws git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }} chdir: "{{ edxapp_code_dir }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
with_items: GITRELOAD_REPOS with_items: "{{ GITRELOAD_REPOS }}"
- name: change ownership on repos for access by edxapp and www-data - name: change ownership on repos for access by edxapp and www-data
file: > file:
path={{ GITRELOAD_REPODIR }} path: "{{ GITRELOAD_REPODIR }}"
state=directory state: directory
owner={{ common_web_user }} owner: "{{ common_web_user }}"
owner={{ common_web_group }} owner: "{{ common_web_group }}"
recurse=yes recurse: yes
- name: change group on repos if using devstack - name: change group on repos if using devstack
file: > file:
path={{ GITRELOAD_REPODIR }} path: "{{ GITRELOAD_REPODIR }}"
state=directory state: directory
group={{ edxapp_user }} group: "{{ edxapp_user }}"
recurse=yes recurse: yes
when: devstack when: devstack
- name: change mode on repos with using devstack - name: change mode on repos with using devstack
command: chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }} command: "chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }}"
when: devstack when: devstack
- name: create ssh dir for the content repos key - name: create ssh dir for the content repos key
file: path=~/.ssh state=directory mode=0700 file:
path: "~/.ssh"
state: "directory"
mode: "0700"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
- name: install ssh key for the content repos - name: install ssh key for the content repos
copy: content="{{ GITRELOAD_GIT_IDENTITY }}" dest=~/.ssh/id_rsa mode=0600 copy:
content: "{{ GITRELOAD_GIT_IDENTITY }}"
dest: "~/.ssh/id_rsa"
mode: "0600"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
- include: course_pull.yml - include: course_pull.yml
...@@ -11,35 +17,44 @@ ...@@ -11,35 +17,44 @@
tags: course_pull tags: course_pull
- name: install gitreload - name: install gitreload
pip: > pip:
name=git+{{ gitreload_repo }}@{{ gitreload_version }}#egg=gitreload name: "git+{{ gitreload_repo }}@{{ gitreload_version }}#egg=gitreload"
virtualenv={{ gitreload_venv }} virtualenv: "{{ gitreload_venv }}"
extra_args="--exists-action w" extra_args: "--exists-action w"
become_user: "{{ gitreload_user }}" become_user: "{{ gitreload_user }}"
notify: restart gitreload notify: restart gitreload
- name: copy configuration - name: copy configuration
template: src=edx/app/gitreload/gr.env.json.j2 dest={{ gitreload_dir }}/gr.env.json template:
src: "edx/app/gitreload/gr.env.json.j2"
dest: "{{ gitreload_dir }}/gr.env.json"
become_user: "{{ gitreload_user }}" become_user: "{{ gitreload_user }}"
notify: restart gitreload notify: restart gitreload
- name: "add gunicorn configuration file" - name: "add gunicorn configuration file"
template: > template:
src=edx/app/gitreload/gitreload_gunicorn.py.j2 dest={{ gitreload_dir }}/gitreload_gunicorn.py src: "edx/app/gitreload/gitreload_gunicorn.py.j2"
dest: "{{ gitreload_dir }}/gitreload_gunicorn.py"
become_user: "{{ gitreload_user }}" become_user: "{{ gitreload_user }}"
notify: restart gitreload notify: restart gitreload
- name: "writing supervisor script" - name: "writing supervisor script"
template: > template:
src=edx/app/supervisor/conf.available.d/gitreload.conf.j2 dest={{ supervisor_available_dir }}/gitreload.conf src: "edx/app/supervisor/conf.available.d/gitreload.conf.j2"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 dest: "{{ supervisor_available_dir }}/gitreload.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: "0644"
- name: "enable supervisor script" - name: "enable supervisor script"
file: > file:
src={{ supervisor_available_dir }}/gitreload.conf src: "{{ supervisor_available_dir }}/gitreload.conf"
dest={{ supervisor_cfg_dir }}/gitreload.conf dest: "{{ supervisor_cfg_dir }}/gitreload.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner: "{{ supervisor_user }}"
state=link force=yes group: "{{ common_web_user }}"
mode: "0644"
state: link
force: "yes"
when: not disable_edx_services when: not disable_edx_services
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
...@@ -54,9 +69,9 @@ ...@@ -54,9 +69,9 @@
when: not disable_edx_services when: not disable_edx_services
- name: ensure gitreload is started - name: ensure gitreload is started
supervisorctl: > supervisorctl:
name=gitreload name: gitreload
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path: "{{ supervisor_ctl }}"
config={{ supervisor_cfg }} config: "{{ supervisor_cfg }}"
state=started state: started
when: not disable_edx_services when: not disable_edx_services
...@@ -38,45 +38,45 @@ ...@@ -38,45 +38,45 @@
- deploy - deploy
- name: create gitreload user - name: create gitreload user
user: > user:
name="{{ gitreload_user }}" name: "{{ gitreload_user }}"
home="{{ gitreload_dir }}" home: "{{ gitreload_dir }}"
createhome=no createhome: no
shell=/bin/false shell: /bin/false
- name: ensure home folder exists - name: ensure home folder exists
file: > file:
path={{ gitreload_dir }} path: "{{ gitreload_dir }}"
state=directory state: directory
owner={{ gitreload_user }} owner: "{{ gitreload_user }}"
group={{ gitreload_user }} group: "{{ gitreload_user }}"
- name: ensure repo dir exists - name: ensure repo dir exists
file: > file:
path={{ GITRELOAD_REPODIR }} path: "{{ GITRELOAD_REPODIR }}"
state=directory state: directory
owner={{ common_web_user }} owner: "{{ common_web_user }}"
group={{ common_web_group }} group: "{{ common_web_group }}"
- name: grab ssh host keys - name: grab ssh host keys
shell: ssh-keyscan {{ item }} shell: ssh-keyscan {{ item }}
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
with_items: GITRELOAD_HOSTS with_items: "{{ GITRELOAD_HOSTS }}"
register: gitreload_repo_host_keys register: gitreload_repo_host_keys
- name: add host keys if needed to known_hosts - name: add host keys if needed to known_hosts
lineinfile: > lineinfile:
create=yes create: yes
dest=~/.ssh/known_hosts dest: ~/.ssh/known_hosts
line="{{ item.stdout }}" line: "{{ item.stdout }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
with_items: gitreload_repo_host_keys.results with_items: "{{ gitreload_repo_host_keys.results }}"
- name: create a symlink for venv python - name: create a symlink for venv python
file: > file:
src="{{ gitreload_venv_bin }}/{{ item }}" src: "{{ gitreload_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.gitreload dest: "{{ COMMON_BIN_DIR }}/{{ item }}.gitreload"
state=link state: "link"
with_items: with_items:
- python - python
- pip - pip
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
# Ignoring error below so that we can move the data folder and have it be a link # Ignoring error below so that we can move the data folder and have it be a link
- name: all | create folders - name: all | create folders
file: path={{ item.path }} state=directory file: path={{ item.path }} state=directory
with_items: gluster_volumes with_items: "{{ gluster_volumes }}"
when: > when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
ignore_errors: yes ignore_errors: yes
...@@ -32,52 +32,52 @@ ...@@ -32,52 +32,52 @@
- name: primary | create peers - name: primary | create peers
command: gluster peer probe {{ item }} command: gluster peer probe {{ item }}
with_items: gluster_peers with_items: "{{ gluster_peers }}"
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: primary | create volumes - name: primary | create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %} command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: primary | start volumes - name: primary | start volumes
command: gluster volume start {{ item.name }} command: gluster volume start {{ item.name }}
with_items: gluster_volumes with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: primary | set security - name: primary | set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }} command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: primary | set performance cache - name: primary | set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }} command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: all | mount volume - name: all | mount volume
mount: > mount:
name={{ item.mount_location }} name: "{{ item.mount_location }}"
src={{ gluster_primary_ip }}:{{ item.name }} src: "{{ gluster_primary_ip }}:{{ item.name }}"
fstype=glusterfs fstype: glusterfs
state=mounted state: mounted
opts=defaults,_netdev opts: defaults,_netdev
with_items: gluster_volumes with_items: "{{ gluster_volumes }}"
tags: gluster tags: gluster
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system # This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1 # before the network stack is up and can't lookup 127.0.0.1
- name: all | sleep mount - name: all | sleep mount
lineinfile: > lineinfile:
dest=/etc/rc.local dest: /etc/rc.local
line='sleep 5; /bin/mount -a' line: 'sleep 5; /bin/mount -a'
regexp='sleep 5; /bin/mount -a' regexp: 'sleep 5; /bin/mount -a'
insertbefore='exit 0' insertbefore: 'exit 0'
tags: gluster tags: gluster
...@@ -37,13 +37,13 @@ ...@@ -37,13 +37,13 @@
state: present state: present
update_cache: true update_cache: true
cache_valid_time: 3600 cache_valid_time: 3600
with_items: GO_SERVER_BACKUP_APT_PKGS with_items: "{{ GO_SERVER_BACKUP_APT_PKGS }}"
- name: install required python packages - name: install required python packages
pip: pip:
name: "{{ item }}" name: "{{ item }}"
state: present state: present
with_items: GO_SERVER_BACKUP_PIP_PKGS with_items: "{{ GO_SERVER_BACKUP_PIP_PKGS }}"
- name: create the temp directory - name: create the temp directory
file: file:
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
state: present state: present
update_cache: true update_cache: true
cache_valid_time: 3600 cache_valid_time: 3600
with_items: GO_SERVER_APT_PKGS with_items: "{{ GO_SERVER_APT_PKGS }}"
- name: create go-server plugin directory - name: create go-server plugin directory
file: file:
...@@ -76,20 +76,17 @@ ...@@ -76,20 +76,17 @@
- { url: "{{ GO_SERVER_GITHUB_PR_PLUGIN_JAR_URL }}", md5: "{{ GO_SERVER_GITHUB_PR_PLUGIN_MD5 }}" } - { url: "{{ GO_SERVER_GITHUB_PR_PLUGIN_JAR_URL }}", md5: "{{ GO_SERVER_GITHUB_PR_PLUGIN_MD5 }}" }
- name: generate line for go-server password file for admin user - name: generate line for go-server password file for admin user
command: > command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_ADMIN_USERNAME }}\" \"{{ GO_SERVER_ADMIN_PASSWORD }}\""
/usr/bin/htpasswd -nbs "{{ GO_SERVER_ADMIN_USERNAME }}" "{{ GO_SERVER_ADMIN_PASSWORD }}"
register: admin_user_password_line register: admin_user_password_line
when: GO_SERVER_ADMIN_USERNAME and GO_SERVER_ADMIN_PASSWORD when: GO_SERVER_ADMIN_USERNAME and GO_SERVER_ADMIN_PASSWORD
- name: generate line for go-server password file for backup user - name: generate line for go-server password file for backup user
command: > command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_BACKUP_USERNAME }}\" \"{{ GO_SERVER_BACKUP_PASSWORD }}\""
/usr/bin/htpasswd -nbs "{{ GO_SERVER_BACKUP_USERNAME }}" "{{ GO_SERVER_BACKUP_PASSWORD }}"
register: backup_user_password_line register: backup_user_password_line
when: GO_SERVER_BACKUP_USERNAME and GO_SERVER_BACKUP_PASSWORD when: GO_SERVER_BACKUP_USERNAME and GO_SERVER_BACKUP_PASSWORD
- name: generate line for go-server password file for gomatic user - name: generate line for go-server password file for gomatic user
command: > command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_GOMATIC_USERNAME }}\" \"{{ GO_SERVER_GOMATIC_PASSWORD }}\""
/usr/bin/htpasswd -nbs "{{ GO_SERVER_GOMATIC_USERNAME }}" "{{ GO_SERVER_GOMATIC_PASSWORD }}"
register: gomatic_user_password_line register: gomatic_user_password_line
when: GO_SERVER_GOMATIC_USERNAME and GO_SERVER_GOMATIC_PASSWORD when: GO_SERVER_GOMATIC_USERNAME and GO_SERVER_GOMATIC_PASSWORD
......
...@@ -23,68 +23,84 @@ ...@@ -23,68 +23,84 @@
# #
- name: install system packages - name: install system packages
apt: > apt:
pkg={{ item }} pkg: "{{ item }}"
state=present state: present
with_items: hadoop_common_debian_pkgs with_items: "{{ hadoop_common_debian_pkgs }}"
- name: ensure group exists - name: ensure group exists
group: name={{ hadoop_common_group }} system=yes state=present group:
name: "{{ hadoop_common_group }}"
system: yes
state: present
- name: ensure user exists - name: ensure user exists
user: > user:
name={{ hadoop_common_user }} name: "{{ hadoop_common_user }}"
group={{ hadoop_common_group }} group: "{{ hadoop_common_group }}"
home={{ HADOOP_COMMON_USER_HOME }} createhome=yes home: "{{ HADOOP_COMMON_USER_HOME }}"
shell=/bin/bash system=yes generate_ssh_key=yes createhome: yes
state=present shell: /bin/bash
system: yes
generate_ssh_key: yes
state: present
- name: own key authorized - name: own key authorized
file: > file:
src={{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub src: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub"
dest={{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys"
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: ssh configured - name: ssh configured
template: > template:
src=hadoop_user_ssh_config.j2 src: hadoop_user_ssh_config.j2
dest={{ HADOOP_COMMON_USER_HOME }}/.ssh/config dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/config"
mode=0600 owner={{ hadoop_common_user }} group={{ hadoop_common_group }} mode: 0600
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
- name: ensure user is in sudoers - name: ensure user is in sudoers
lineinfile: > lineinfile:
dest=/etc/sudoers state=present dest: /etc/sudoers
regexp='^%hadoop ALL\=' line='%hadoop ALL=(ALL) NOPASSWD:ALL' state: present
validate='visudo -cf %s' regexp: '^%hadoop ALL\='
line: '%hadoop ALL=(ALL) NOPASSWD:ALL'
validate: 'visudo -cf %s'
- name: check if downloaded and extracted - name: check if downloaded and extracted
stat: path={{ HADOOP_COMMON_HOME }} stat: path={{ HADOOP_COMMON_HOME }}
register: extracted_hadoop_dir register: extracted_hadoop_dir
- name: distribution downloaded - name: distribution downloaded
get_url: > get_url:
url={{ hadoop_common_dist.url }} url: "{{ hadoop_common_dist.url }}"
sha256sum={{ hadoop_common_dist.sha256sum }} sha256sum: "{{ hadoop_common_dist.sha256sum }}"
dest={{ hadoop_common_temporary_dir }} dest: "{{ hadoop_common_temporary_dir }}"
when: not extracted_hadoop_dir.stat.exists when: not extracted_hadoop_dir.stat.exists
- name: distribution extracted - name: distribution extracted
shell: > shell: "tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }}"
chdir={{ HADOOP_COMMON_USER_HOME }} args:
tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }} chdir: "{{ HADOOP_COMMON_USER_HOME }}"
when: not extracted_hadoop_dir.stat.exists when: not extracted_hadoop_dir.stat.exists
- name: versioned directory symlink created - name: versioned directory symlink created
file: > file:
src={{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }} src: "{{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }}"
dest={{ HADOOP_COMMON_HOME }} dest: "{{ HADOOP_COMMON_HOME }}"
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: configuration installed - name: configuration installed
template: > template:
src={{ item }}.j2 src: "{{ item }}.j2"
dest={{ HADOOP_COMMON_CONF_DIR }}/{{ item }} dest: "{{ HADOOP_COMMON_CONF_DIR }}/{{ item }}"
mode=0640 owner={{ hadoop_common_user }} group={{ hadoop_common_group }} mode: 0640
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
with_items: with_items:
- hadoop-env.sh - hadoop-env.sh
- mapred-site.xml - mapred-site.xml
...@@ -93,79 +109,84 @@ ...@@ -93,79 +109,84 @@
- yarn-site.xml - yarn-site.xml
- name: upstart scripts installed - name: upstart scripts installed
template: > template:
src={{ item }}.j2 src: "{{ item }}.j2"
dest=/etc/init/{{ item }} dest: "/etc/init/{{ item }}"
mode=0640 owner=root group=root mode: 0640
owner: root
group: root
with_items: with_items:
- hdfs.conf - hdfs.conf
- yarn.conf - yarn.conf
- name: hadoop env file exists - name: hadoop env file exists
file: > file:
path={{ hadoop_common_env }} state=touch path: "{{ hadoop_common_env }}"
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state: touch
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
- name: env vars sourced in bashrc - name: env vars sourced in bashrc
lineinfile: > lineinfile:
dest={{ HADOOP_COMMON_USER_HOME }}/.bashrc dest: "{{ HADOOP_COMMON_USER_HOME }}/.bashrc"
state=present state: present
regexp="^. {{ hadoop_common_env }}" regexp: "^. {{ hadoop_common_env }}"
line=". {{ hadoop_common_env }}" line: ". {{ hadoop_common_env }}"
insertbefore=BOF insertbefore: BOF
- name: env vars sourced in hadoop env - name: env vars sourced in hadoop env
lineinfile: > lineinfile:
dest={{ hadoop_common_env }} state=present dest: "{{ hadoop_common_env }}"
regexp="^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh" line=". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh" state: present
regexp: "^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
line: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
- name: check if native libraries need to be built - name: check if native libraries need to be built
stat: path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built stat: path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built
register: native_libs_built register: native_libs_built
- name: protobuf downloaded - name: protobuf downloaded
get_url: > get_url:
url={{ hadoop_common_protobuf_dist.url }} url: "{{ hadoop_common_protobuf_dist.url }}"
sha256sum={{ hadoop_common_protobuf_dist.sha256sum }} sha256sum: "{{ hadoop_common_protobuf_dist.sha256sum }}"
dest={{ hadoop_common_temporary_dir }} dest: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: protobuf extracted - name: protobuf extracted
shell: > shell: "tar -xzf {{ hadoop_common_protobuf_dist.filename }}"
chdir={{ hadoop_common_temporary_dir }} args:
tar -xzf {{ hadoop_common_protobuf_dist.filename }} chdir: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: protobuf installed - name: protobuf installed
shell: > shell: "./configure --prefix=/usr/local && make && make install"
chdir={{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }} args:
./configure --prefix=/usr/local && make && make install chdir: "{{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}"
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: native lib source downloaded - name: native lib source downloaded
get_url: > get_url:
url={{ hadoop_common_native_dist.url }} url: "{{ hadoop_common_native_dist.url }}"
sha256sum={{ hadoop_common_native_dist.sha256sum }} sha256sum: "{{ hadoop_common_native_dist.sha256sum }}"
dest={{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }} dest: "{{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }}"
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: native lib source extracted - name: native lib source extracted
shell: > shell: "tar -xzf {{ hadoop_common_native_dist.filename }}"
chdir={{ hadoop_common_temporary_dir }} args:
tar -xzf {{ hadoop_common_native_dist.filename }} chdir: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: native lib built - name: native lib built
shell: > shell: "mvn package -X -Pnative -DskipTests"
chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project args:
mvn package -X -Pnative -DskipTests chdir: "{{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project"
environment: environment:
LD_LIBRARY_PATH: /usr/local/lib LD_LIBRARY_PATH: /usr/local/lib
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: old native libs renamed - name: old native libs renamed
shell: > shell: "mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}"
mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}
with_items: with_items:
- { name: libhadoop.a, new_name: libhadoop32.a } - { name: libhadoop.a, new_name: libhadoop32.a }
- { name: libhadoop.so, new_name: libhadoop32.so } - { name: libhadoop.so, new_name: libhadoop32.so }
...@@ -173,9 +194,9 @@ ...@@ -173,9 +194,9 @@
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: new native libs installed - name: new native libs installed
shell: > shell: "chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}"
args:
chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project/hadoop-common/target/native/target/usr/local/lib chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project/hadoop-common/target/native/target/usr/local/lib
chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}
with_items: with_items:
- libhadoop.a - libhadoop.a
- libhadoop.so - libhadoop.so
...@@ -183,13 +204,17 @@ ...@@ -183,13 +204,17 @@
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: native lib marker touched - name: native lib marker touched
file: > file:
path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built path: "{{ HADOOP_COMMON_USER_HOME }}/.native_libs_built"
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=touch owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: touch
when: not native_libs_built.stat.exists when: not native_libs_built.stat.exists
- name: service directory exists - name: service directory exists
file: > file:
path={{ HADOOP_COMMON_SERVICES_DIR }} path: "{{ HADOOP_COMMON_SERVICES_DIR }}"
mode=0750 owner={{ hadoop_common_user }} group={{ hadoop_common_group }} mode: "0750"
state=directory owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: directory
...@@ -22,9 +22,11 @@ ...@@ -22,9 +22,11 @@
notify: restart haproxy notify: restart haproxy
- name: Server configuration file - name: Server configuration file
template: > template:
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg src: "{{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg"
owner=root group=root mode=0644 owner: root
group: root
mode: 0644
notify: reload haproxy notify: reload haproxy
- name: Enabled in default - name: Enabled in default
......
--- ---
# Installs the harprofiler # Installs the harprofiler
- name: create harprofiler user - name: create harprofiler user
user: > user:
name="{{ harprofiler_user }}" name: "{{ harprofiler_user }}"
createhome=no createhome: no
home={{ harprofiler_dir }} home: "{{ harprofiler_dir }}"
shell=/bin/bash shell: /bin/bash
- name: create harprofiler repo - name: create harprofiler repo
file: > file:
path={{ harprofiler_dir }} state=directory path: "{{ harprofiler_dir }}"
owner="{{ harprofiler_user }}" group="{{ common_web_group }}" state: directory
mode=0755 owner: "{{ harprofiler_user }}"
group: "{{ common_web_group }}"
mode: 0755
- name: check out the harprofiler - name: check out the harprofiler
git_2_0_1: > git_2_0_1:
dest={{ harprofiler_dir }} dest: "{{ harprofiler_dir }}"
repo={{ harprofiler_github_url }} version={{ harprofiler_version }} repo: "{{ harprofiler_github_url }}"
accept_hostkey=yes version: "{{ harprofiler_version }}"
accept_hostkey: yes
become_user: "{{ harprofiler_user }}" become_user: "{{ harprofiler_user }}"
- name: set bashrc for harprofiler user - name: set bashrc for harprofiler user
template: > template:
src=bashrc.j2 dest="{{ harprofiler_dir }}/.bashrc" owner="{{ harprofiler_user }}" src: bashrc.j2
mode=0755 dest: "{{ harprofiler_dir }}/.bashrc"
owner: "{{ harprofiler_user }}"
mode: 0755
- name: install requirements - name: install requirements
pip: > pip:
requirements="{{ harprofiler_dir }}/requirements.txt" virtualenv="{{ harprofiler_venv_dir }}" requirements: "{{ harprofiler_dir }}/requirements.txt"
virtualenv: "{{ harprofiler_venv_dir }}"
become_user: "{{ harprofiler_user }}" become_user: "{{ harprofiler_user }}"
- name: update config file - name: update config file
# harprofiler ships with a default config file. Doing a line-replace for the default # harprofiler ships with a default config file. Doing a line-replace for the default
# configuration that does not match what this machine will have # configuration that does not match what this machine will have
lineinfile: > lineinfile:
dest={{ harprofiler_dir }}/config.yaml dest: "{{ harprofiler_dir }}/config.yaml"
regexp="browsermob_dir" regexp: "browsermob_dir"
line="browsermob_dir: /usr/local" line: "browsermob_dir: /usr/local"
state=present state: present
- name: create validation shell script - name: create validation shell script
template: template:
...@@ -47,8 +53,8 @@ ...@@ -47,8 +53,8 @@
mode: 0755 mode: 0755
become_user: "{{ harprofiler_user }}" become_user: "{{ harprofiler_user }}"
- name: test install - name: test install
shell: > shell: "./{{ harprofiler_validation_script }}"
./{{ harprofiler_validation_script }} chdir={{ harprofiler_dir }} args:
chdir: "{{ harprofiler_dir }}"
become_user: "{{ harprofiler_user }}" become_user: "{{ harprofiler_user }}"
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
- install - install
- install:app-requirements - install:app-requirements
become_user: "{{ harstorage_user }}" become_user: "{{ harstorage_user }}"
with_items: harstorage_python_pkgs with_items: "{{ harstorage_python_pkgs }}"
- name: create directories - name: create directories
file: file:
......
...@@ -21,63 +21,71 @@ ...@@ -21,63 +21,71 @@
- name: check if downloaded and extracted - name: check if downloaded and extracted
stat: path={{ HIVE_HOME }} stat:
path: "{{ HIVE_HOME }}"
register: extracted_dir register: extracted_dir
- name: distribution downloaded - name: distribution downloaded
get_url: > get_url:
url={{ hive_dist.url }} url: "{{ hive_dist.url }}"
sha256sum={{ hive_dist.sha256sum }} sha256sum: "{{ hive_dist.sha256sum }}"
dest={{ hive_temporary_dir }} dest: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists when: not extracted_dir.stat.exists
- name: distribution extracted - name: distribution extracted
shell: > shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hive-{{ HIVE_VERSION }}-bin"
chdir={{ HADOOP_COMMON_USER_HOME }} args:
tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hive-{{ HIVE_VERSION }}-bin chdir: "{{ HADOOP_COMMON_USER_HOME }}"
when: not extracted_dir.stat.exists when: not extracted_dir.stat.exists
- name: versioned directory symlink created - name: versioned directory symlink created
file: > file:
src={{ HADOOP_COMMON_USER_HOME }}/hive-{{ HIVE_VERSION }}-bin src: "{{ HADOOP_COMMON_USER_HOME }}/hive-{{ HIVE_VERSION }}-bin"
dest={{ HIVE_HOME }} dest: "{{ HIVE_HOME }}"
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: hive mysql connector distribution downloaded - name: hive mysql connector distribution downloaded
get_url: > get_url:
url={{ hive_mysql_connector_dist.url }} url: "{{ hive_mysql_connector_dist.url }}"
sha256sum={{ hive_mysql_connector_dist.sha256sum }} sha256sum: "{{ hive_mysql_connector_dist.sha256sum }}"
dest={{ hive_temporary_dir }} dest: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists when: not extracted_dir.stat.exists
- name: hive mysql connector distribution extracted - name: hive mysql connector distribution extracted
shell: > shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }}"
chdir={{ hive_temporary_dir }} args:
tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }} chdir: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists when: not extracted_dir.stat.exists
- name: hive lib exists - name: hive lib exists
file: > file:
path={{ HIVE_LIB }} path: "{{ HIVE_LIB }}"
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=directory owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: directory
- name: hive mysql connector installed - name: hive mysql connector installed
shell: > shell: "cp mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ HIVE_LIB }} && chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar"
chdir=/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }} args:
cp mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ HIVE_LIB }} && chdir: "/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}"
chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar
when: not extracted_dir.stat.exists when: not extracted_dir.stat.exists
- name: configuration installed - name: configuration installed
template: > template:
src={{ item }}.j2 src: "{{ item }}.j2"
dest={{ HIVE_CONF }}/{{ item }} dest: "{{ HIVE_CONF }}/{{ item }}"
mode=0640 owner={{ hadoop_common_user }} group={{ hadoop_common_group }} mode: 0640
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
with_items: with_items:
- hive-env.sh - hive-env.sh
- hive-site.xml - hive-site.xml
- name: env vars sourced in hadoop env - name: env vars sourced in hadoop env
lineinfile: > lineinfile:
dest={{ hadoop_common_env }} state=present dest: "{{ hadoop_common_env }}"
regexp="^. {{ HIVE_CONF }}/hive-env.sh" line=". {{ HIVE_CONF }}/hive-env.sh" state: present
regexp: "^. {{ HIVE_CONF }}/hive-env.sh"
line: ". {{ HIVE_CONF }}/hive-env.sh"
...@@ -22,31 +22,32 @@ ...@@ -22,31 +22,32 @@
# #
- name: setup the insights env file - name: setup the insights env file
template: > template:
src="edx/app/insights/insights_env.j2" src: "edx/app/insights/insights_env.j2"
dest="{{ insights_app_dir }}/insights_env" dest: "{{ insights_app_dir }}/insights_env"
owner={{ insights_user }} owner: "{{ insights_user }}"
group={{ insights_user }} group: "{{ insights_user }}"
mode=0644 mode: 0644
tags: tags:
- install - install
- install:configuration - install:configuration
- name: install application requirements - name: install application requirements
pip: > pip:
requirements="{{ insights_requirements_base }}/{{ item }}" requirements: "{{ insights_requirements_base }}/{{ item }}"
virtualenv="{{ insights_venv_dir }}" virtualenv: "{{ insights_venv_dir }}"
state=present extra_args="--exists-action w" state: present
extra_args: "--exists-action w"
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
with_items: insights_requirements with_items: "{{ insights_requirements }}"
tags: tags:
- install - install
- install:app-requirements - install:app-requirements
- name: create nodeenv - name: create nodeenv
shell: > shell: "{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --prebuilt"
creates={{ insights_nodeenv_dir }} args:
{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --prebuilt creates: "{{ insights_nodeenv_dir }}"
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
tags: tags:
- install - install
...@@ -61,21 +62,19 @@ ...@@ -61,21 +62,19 @@
environment: "{{ insights_environment }}" environment: "{{ insights_environment }}"
- name: install bower dependencies - name: install bower dependencies
shell: > shell: ". {{ insights_venv_dir }}/bin/activate && . {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false"
chdir={{ insights_code_dir }} args:
. {{ insights_venv_dir }}/bin/activate && chdir: "{{ insights_code_dir }}"
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
tags: tags:
- install - install
- install:app-requirements - install:app-requirements
- name: migrate - name: migrate
shell: > shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput"
chdir={{ insights_code_dir }} args:
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' chdir: "{{ insights_code_dir }}"
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
environment: "{{ insights_environment }}" environment: "{{ insights_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
...@@ -84,18 +83,18 @@ ...@@ -84,18 +83,18 @@
- migrate:db - migrate:db
- name: run r.js optimizer - name: run r.js optimizer
shell: > shell: ". {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js"
chdir={{ insights_code_dir }} args:
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
tags: tags:
- assets - assets
- assets:gather - assets:gather
- name: run collectstatic - name: run collectstatic
shell: > shell: "{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }}"
chdir={{ insights_code_dir }} args:
{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }} chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
environment: "{{ insights_environment }}" environment: "{{ insights_environment }}"
with_items: with_items:
...@@ -106,38 +105,42 @@ ...@@ -106,38 +105,42 @@
- assets:gather - assets:gather
- name: compile translations - name: compile translations
shell: > shell: ". {{ insights_venv_dir }}/bin/activate && i18n_tool generate -v"
chdir={{ insights_code_dir }}/analytics_dashboard args:
. {{ insights_venv_dir }}/bin/activate && i18n_tool generate -v chdir: "{{ insights_code_dir }}/analytics_dashboard"
become_user: "{{ insights_user }}" become_user: "{{ insights_user }}"
tags: tags:
- assets - assets
- assets:gather - assets:gather
- name: write out the supervisior wrapper - name: write out the supervisior wrapper
template: > template:
src=edx/app/insights/insights.sh.j2 src: "edx/app/insights/insights.sh.j2"
dest={{ insights_app_dir }}/{{ insights_service_name }}.sh dest: "{{ insights_app_dir }}/{{ insights_service_name }}.sh"
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }} mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags: tags:
- install - install
- install:configuration - install:configuration
- name: write supervisord config - name: write supervisord config
template: > template:
src=edx/app/supervisor/conf.d.available/insights.conf.j2 src: edx/app/supervisor/conf.d.available/insights.conf.j2
dest="{{ supervisor_available_dir }}/{{ insights_service_name }}.conf" dest: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags: tags:
- install - install
- install:configuration - install:configuration
- name: enable supervisor script - name: enable supervisor script
file: > file:
src={{ supervisor_available_dir }}/{{ insights_service_name }}.conf src: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
dest={{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf dest: "{{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf"
state=link state: link
force=yes force: yes
when: not disable_edx_services when: not disable_edx_services
tags: tags:
- install - install
...@@ -151,10 +154,10 @@ ...@@ -151,10 +154,10 @@
- manage:start - manage:start
- name: create symlinks from the venv bin dir - name: create symlinks from the venv bin dir
file: > file:
src="{{ insights_venv_dir }}/bin/{{ item }}" src: "{{ insights_venv_dir }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_service_name }}" dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_service_name }}"
state=link state: link
with_items: with_items:
- python - python
- pip - pip
...@@ -164,20 +167,20 @@ ...@@ -164,20 +167,20 @@
- install:base - install:base
- name: create manage.py symlink - name: create manage.py symlink
file: > file:
src="{{ insights_manage }}" src: "{{ insights_manage }}"
dest="{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}" dest: "{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}"
state=link state: link
tags: tags:
- install - install
- install:base - install:base
- name: restart insights - name: restart insights
supervisorctl: > supervisorctl:
state=restarted state: restarted
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path: "{{ supervisor_ctl }}"
config={{ supervisor_cfg }} config: "{{ supervisor_cfg }}"
name={{ insights_service_name }} name: "{{ insights_service_name }}"
when: not disable_edx_services when: not disable_edx_services
become_user: "{{ supervisor_service_user }}" become_user: "{{ supervisor_service_user }}"
tags: tags:
......
...@@ -34,108 +34,125 @@ ...@@ -34,108 +34,125 @@
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
- name: add admin specific apt repositories - name: add admin specific apt repositories
apt_repository: repo="{{ item }}" state=present update_cache=yes apt_repository:
with_items: jenkins_admin_debian_repos repo: "{{ item }}"
state: "present"
update_cache: "yes"
with_items: "{{ jenkins_admin_debian_repos }}"
- name: create the scripts directory - name: create the scripts directory
file: path={{ jenkins_admin_scripts_dir }} state=directory file:
owner={{ jenkins_user }} group={{ jenkins_group }} mode=755 path: "{{ jenkins_admin_scripts_dir }}"
state: "directory"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0755
- name: configure s3 plugin - name: configure s3 plugin
template: > template:
src="./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2" src: "./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest="{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml" dest: "{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml"
owner={{ jenkins_user }} owner: "{{ jenkins_user }}"
group={{ jenkins_group }} group: "{{ jenkins_group }}"
mode=0644 mode: 0644
- name: configure the boto profiles for jenkins - name: configure the boto profiles for jenkins
template: > template:
src="./{{ jenkins_home }}/boto.j2" src: "./{{ jenkins_home }}/boto.j2"
dest="{{ jenkins_home }}/.boto" dest: "{{ jenkins_home }}/.boto"
owner="{{ jenkins_user }}" owner: "{{ jenkins_user }}"
group="{{ jenkins_group }}" group: "{{ jenkins_group }}"
mode="0600" mode: 0600
tags: tags:
- aws-config - aws-config
- name: create the .aws directory - name: create the .aws directory
file: path={{ jenkins_home }}/.aws state=directory file:
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 path: "{{ jenkins_home }}/.aws"
state: "directory"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0700
tags: tags:
- aws-config - aws-config
- name: configure the awscli profiles for jenkins - name: configure the awscli profiles for jenkins
template: > template:
src="./{{ jenkins_home }}/aws_config.j2" src: "./{{ jenkins_home }}/aws_config.j2"
dest="{{ jenkins_home }}/.aws/config" dest: "{{ jenkins_home }}/.aws/config"
owner="{{ jenkins_user }}" owner: "{{ jenkins_user }}"
group="{{ jenkins_group }}" group: "{{ jenkins_group }}"
mode="0600" mode: 0600
tags: tags:
- aws-config - aws-config
- name: create the ssh directory - name: create the ssh directory
file: > file:
path={{ jenkins_home }}/.ssh path: "{{ jenkins_home }}/.ssh"
owner={{ jenkins_user }} owner: "{{ jenkins_user }}"
group={{ jenkins_group }} group: "{{ jenkins_group }}"
mode=0700 mode: 0700
state=directory state: directory
# Need to add Github to known_hosts to avoid # Need to add Github to known_hosts to avoid
# being prompted when using git through ssh # being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist - name: Add github.com to known_hosts if it does not exist
shell: > shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts"
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
- name: create job directory - name: create job directory
file: > file:
path="{{ jenkins_home }}/jobs" path: "{{ jenkins_home }}/jobs"
owner="{{ jenkins_user }}" owner: "{{ jenkins_user }}"
group="{{ jenkins_group }}" group: "{{ jenkins_group }}"
mode=0755 mode: 0755
state=directory state: directory
- name: create admin job directories - name: create admin job directories
file: > file:
path="{{ jenkins_home }}/jobs/{{ item }}" path: "{{ jenkins_home }}/jobs/{{ item }}"
owner={{ jenkins_user }} owner: "{{ jenkins_user }}"
group={{ jenkins_group }} group: "{{ jenkins_group }}"
mode=0755 mode: 0755
state=directory state: directory
with_items: jenkins_admin_jobs with_items: jenkins_admin_jobs
- name: create admin job config files - name: create admin job config files
template: > template:
src="./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2" src: "./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2"
dest="{{ jenkins_home }}/jobs/{{ item }}/config.xml" dest: "{{ jenkins_home }}/jobs/{{ item }}/config.xml"
owner={{ jenkins_user }} owner: "{{ jenkins_user }}"
group={{ jenkins_group }} group: "{{ jenkins_group }}"
mode=0644 mode: 0644
with_items: jenkins_admin_jobs with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo # adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs - name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}" apt_repository:
repo: "{{ jenkins_chrislea_ppa }}"
- name: install system packages for edxapp virtualenvs - name: install system packages for edxapp virtualenvs
apt: pkg={{','.join(jenkins_admin_debian_pkgs)}} state=present update_cache=yes apt:
pkg: "{{ ','.join(jenkins_admin_debian_pkgs) }}"
state: "present"
update_cache: yes
# This is necessary so that ansible can run with # This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins # sudo set to True (as the jenkins user) on jenkins
- name: grant sudo access to the jenkins user - name: grant sudo access to the jenkins user
copy: > copy:
content="{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL" content: "{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL"
dest=/etc/sudoers.d/99-jenkins owner=root group=root dest: "/etc/sudoers.d/99-jenkins"
mode=0440 validate='visudo -cf %s' owner: "root"
group: "root"
mode: 0440
validate: "visudo -cf %s"
- name: install global gem dependencies - name: install global gem dependencies
gem: > gem:
name={{ item.name }} name: "{{ item.name }}"
state=present state: present
version={{ item.version }} version: "{{ item.version }}"
user_install=no user_install: no
with_items: jenkins_admin_gem_pkgs with_items: jenkins_admin_gem_pkgs
- name: get s3 one time url - name: get s3 one time url
...@@ -152,7 +169,7 @@ ...@@ -152,7 +169,7 @@
get_url: get_url:
url: "{{ s3_one_time_url.url }}" url: "{{ s3_one_time_url.url }}"
dest: "/tmp/{{ JENKINS_ADMIN_BACKUP_S3_KEY | basename }}" dest: "/tmp/{{ JENKINS_ADMIN_BACKUP_S3_KEY | basename }}"
mode: "0644" mode: 0644
owner: "{{ jenkins_user }}" owner: "{{ jenkins_user }}"
when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined
......
...@@ -26,15 +26,14 @@ ...@@ -26,15 +26,14 @@
dest: "{{ jenkins_cli_jar }}" dest: "{{ jenkins_cli_jar }}"
- name: execute command - name: execute command
shell: > shell: "{{ jenkins_command_prefix|default('') }} java -jar {{ jenkins_cli_jar }} -s http://localhost:{{ jenkins_port }} {{ jenkins_auth_realm.cli_auth }} {{ jenkins_command_string }}"
{{ jenkins_command_prefix|default('') }} java -jar {{ jenkins_cli_jar }} -s http://localhost:{{ jenkins_port }}
{{ jenkins_auth_realm.cli_auth }}
{{ jenkins_command_string }}
register: jenkins_command_output register: jenkins_command_output
ignore_errors: "{{ jenkins_ignore_cli_errors|default (False) }}" ignore_errors: "{{ jenkins_ignore_cli_errors|default (False) }}"
- name: "clean up --- remove the credentials dir" - name: "clean up --- remove the credentials dir"
file: name=jenkins_cli_root state=absent file:
name: jenkins_cli_root
state: absent
- name: "clean up --- remove cached Jenkins credentials" - name: "clean up --- remove cached Jenkins credentials"
command: rm -rf $HOME/.jenkins command: rm -rf $HOME/.jenkins
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
- name: install jenkins analytics extra system packages - name: install jenkins analytics extra system packages
apt: apt:
pkg={{ item }} state=present update_cache=yes pkg={{ item }} state=present update_cache=yes
with_items: JENKINS_ANALYTICS_EXTRA_PKGS with_items: "{{ JENKINS_ANALYTICS_EXTRA_PKGS }}"
tags: tags:
- jenkins - jenkins
...@@ -170,9 +170,9 @@ ...@@ -170,9 +170,9 @@
- jenkins-seed-job - jenkins-seed-job
- name: generate seed job xml - name: generate seed job xml
shell: > shell: "GRADLE_OPTS=\"-Dorg.gradle.daemon=true\" ./gradlew run -Pargs={{ jenkins_seed_job_script }}"
cd {{ jenkins_seed_job_root }} && args:
GRADLE_OPTS="-Dorg.gradle.daemon=true" ./gradlew run -Pargs={{ jenkins_seed_job_script }} chdir: "{{ jenkins_seed_job_root }}"
become: yes become: yes
become_user: "{{ jenkins_user }}" become_user: "{{ jenkins_user }}"
tags: tags:
......
...@@ -6,22 +6,20 @@ ...@@ -6,22 +6,20 @@
# refers to the --depth-setting of git clone. A value of 1 # refers to the --depth-setting of git clone. A value of 1
# will truncate all history prior to the last revision. # will truncate all history prior to the last revision.
- name: Create shallow clone of edx-platform - name: Create shallow clone of edx-platform
git_2_0_1: > git_2_0_1:
repo=https://github.com/edx/edx-platform.git repo: https://github.com/edx/edx-platform.git
dest={{ jenkins_home }}/shallow-clone dest: "{{ jenkins_home }}/shallow-clone"
version={{ jenkins_edx_platform_version }} version: "{{ jenkins_edx_platform_version }}"
depth=1 depth: 1
become_user: "{{ jenkins_user }}" become_user: "{{ jenkins_user }}"
# Install the platform requirements using pip. # Install the platform requirements using pip.
- name: Install edx-platform requirements using pip - name: Install edx-platform requirements using pip
pip: > pip:
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }} requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}"
extra_args="--exists-action=w" extra_args: "--exists-action=w"
virtualenv={{ jenkins_home }}/edx-venv virtualenv: "{{ jenkins_home }}/edx-venv"
virtualenv_command=virtualenv virtualenv_command: virtualenv
executable=pip
with_items: with_items:
- pre.txt - pre.txt
- github.txt - github.txt
...@@ -39,12 +37,11 @@ ...@@ -39,12 +37,11 @@
become_user: "{{ jenkins_user }}" become_user: "{{ jenkins_user }}"
- name: Install edx-platform post requirements using pip - name: Install edx-platform post requirements using pip
pip: > pip:
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }} requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}"
extra_args="--exists-action=w" extra_args: "--exists-action=w"
virtualenv={{ jenkins_home }}/edx-venv virtualenv: "{{ jenkins_home }}/edx-venv"
virtualenv_command=virtualenv virtualenv_command: virtualenv
executable=pip
with_items: with_items:
- post.txt - post.txt
become_user: "{{ jenkins_user }}" become_user: "{{ jenkins_user }}"
...@@ -55,9 +52,9 @@ ...@@ -55,9 +52,9 @@
# The edx-venv directory is deleted and then recreated # The edx-venv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts. # cleanly from the archive by the jenkins build scripts.
- name: Create a clean virtualenv archive - name: Create a clean virtualenv archive
command: > command: "tar -cpzf edx-venv_clean.tar.gz edx-venv"
tar -cpzf edx-venv_clean.tar.gz edx-venv args:
chdir={{ jenkins_home }} chdir: "{{ jenkins_home }}"
become_user: "{{ jenkins_user }}" become_user: "{{ jenkins_user }}"
# Remove the shallow-clone directory now that we are # Remove the shallow-clone directory now that we are
......
...@@ -39,8 +39,7 @@ ...@@ -39,8 +39,7 @@
# Need to add Github to known_hosts to avoid # Need to add Github to known_hosts to avoid
# being prompted when using git through ssh # being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist - name: Add github.com to known_hosts if it does not exist
shell: > shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts"
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
# Edit the /etc/hosts file so that the Preview button will work in Studio # Edit the /etc/hosts file so that the Preview button will work in Studio
- name: add preview.localhost to /etc/hosts - name: add preview.localhost to /etc/hosts
......
...@@ -12,28 +12,42 @@ ...@@ -12,28 +12,42 @@
- nginx - nginx
- name: Ensure {{ kibana_app_dir }} exists - name: Ensure {{ kibana_app_dir }} exists
file: path={{ kibana_app_dir }} state=directory owner=root group=root mode=0755 file:
path: "{{ kibana_app_dir }}"
state: directory
owner: root
group: root
mode: 0755
- name: Ensure subdirectories exist - name: Ensure subdirectories exist
file: path={{ kibana_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory file:
path: "{{ kibana_app_dir }}/{{ item }}"
owner: root
group: root
mode: 0755
state: directory
with_items: with_items:
- htdocs - htdocs
- share - share
- name: ensure we have the specified kibana release - name: ensure we have the specified kibana release
get_url: url={{ kibana_url }} dest={{ kibana_app_dir }}/share/{{ kibana_file }} get_url:
url: "{{ kibana_url }}"
dest: "{{ kibana_app_dir }}/share/{{ kibana_file }}"
- name: extract - name: extract
shell: > shell: "tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}"
chdir={{ kibana_app_dir }}/share args:
tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }} chdir: "{{ kibana_app_dir }}/share"
creates={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }} creates: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}"
- name: install - name: install
shell: > shell: "cp -R * {{ kibana_app_dir }}/htdocs/"
chdir={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }} args:
cp -R * {{ kibana_app_dir }}/htdocs/ chdir: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}"
- name: copy config - name: copy config
template: src=config.js.j2 dest={{ kibana_app_dir }}/htdocs/config.js template:
src: config.js.j2
dest: "{{ kibana_app_dir }}/htdocs/config.js"
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
module: ec2_lookup module: ec2_lookup
region: "{{ region }}" region: "{{ region }}"
tags: tags:
- Name: "{{ name_tag }}" Name: "{{ name_tag }}"
register: tag_lookup register: tag_lookup
when: terminate_instance == true when: terminate_instance == true
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
ttl: 300 ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}" record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: ec2.instances with_items: "{{ ec2.instances }}"
- name: Add DNS names for services - name: Add DNS names for services
local_action: local_action:
...@@ -77,7 +77,7 @@ ...@@ -77,7 +77,7 @@
record: "{{ item[1] }}-{{ dns_name }}.{{ dns_zone }}" record: "{{ item[1] }}-{{ dns_name }}.{{ dns_zone }}"
value: "{{ item[0].public_dns_name }}" value: "{{ item[0].public_dns_name }}"
with_nested: with_nested:
- ec2.instances - "{{ ec2.instances }}"
- ['studio', 'ecommerce', 'preview', 'programs', 'discovery', 'credentials'] - ['studio', 'ecommerce', 'preview', 'programs', 'discovery', 'credentials']
- name: Add new instance to host group - name: Add new instance to host group
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
module: add_host module: add_host
hostname: "{{ item.public_ip }}" hostname: "{{ item.public_ip }}"
groups: launched groups: launched
with_items: ec2.instances with_items: "{{ ec2.instances }}"
- name: Wait for SSH to come up - name: Wait for SSH to come up
local_action: local_action:
...@@ -94,4 +94,4 @@ ...@@ -94,4 +94,4 @@
search_regex: OpenSSH search_regex: OpenSSH
port: 22 port: 22
delay: 10 delay: 10
with_items: ec2.instances with_items: "{{ ec2.instances }}"
...@@ -8,63 +8,63 @@ localdev_xvfb_display: ":1" ...@@ -8,63 +8,63 @@ localdev_xvfb_display: ":1"
localdev_accounts: localdev_accounts:
- { - {
user: "{{ edxapp_user|default('None') }}", user: "{{ edxapp_user|default('None') }}",
home: "{{ edxapp_app_dir }}", home: "{{ edxapp_app_dir|default('None') }}",
env: "edxapp_env", env: "edxapp_env",
repo: "edx-platform" repo: "edx-platform"
} }
- { - {
user: "{{ forum_user|default('None') }}", user: "{{ forum_user|default('None') }}",
home: "{{ forum_app_dir }}", home: "{{ forum_app_dir|default('None') }}",
env: "forum_env", env: "forum_env",
repo: "cs_comments_service" repo: "cs_comments_service"
} }
- { - {
user: "{{ notifier_user|default('None') }}", user: "{{ notifier_user|default('None') }}",
home: "{{ notifier_app_dir }}", home: "{{ notifier_app_dir|default('None') }}",
env: "notifier_env", env: "notifier_env",
repo: "" repo: ""
} }
- { - {
user: "{{ ecommerce_user|default('None') }}", user: "{{ ecommerce_user|default('None') }}",
home: "{{ ecommerce_home }}", home: "{{ ecommerce_home|default('None') }}",
env: "ecommerce_env", env: "ecommerce_env",
repo: "ecommerce" repo: "ecommerce"
} }
- { - {
user: "{{ ecommerce_worker_user|default('None') }}", user: "{{ ecommerce_worker_user|default('None') }}",
home: "{{ ecommerce_worker_home }}", home: "{{ ecommerce_worker_home|default('None') }}",
env: "ecommerce_worker_env", env: "ecommerce_worker_env",
repo: "ecommerce_worker" repo: "ecommerce_worker"
} }
- { - {
user: "{{ analytics_api_user|default('None') }}", user: "{{ analytics_api_user|default('None') }}",
home: "{{ analytics_api_home }}", home: "{{ analytics_api_home|default('None') }}",
env: "analytics_api_env", env: "analytics_api_env",
repo: "analytics_api" repo: "analytics_api"
} }
- { - {
user: "{{ insights_user|default('None') }}", user: "{{ insights_user|default('None') }}",
home: "{{ insights_home }}", home: "{{ insights_home|default('None') }}",
env: "insights_env", env: "insights_env",
repo: "edx_analytics_dashboard" repo: "edx_analytics_dashboard"
} }
- { - {
user: "{{ programs_user|default('None') }}", user: "{{ programs_user|default('None') }}",
home: "{{ programs_home }}", home: "{{ programs_home|default('None') }}",
env: "programs_env", env: "programs_env",
repo: "programs" repo: "programs"
} }
- { - {
user: "{{ credentials_user|default('None') }}", user: "{{ credentials_user|default('None') }}",
home: "{{ credentials_home }}", home: "{{ credentials_home|default('None') }}",
env: "credentials_env", env: "credentials_env",
repo: "credentials" repo: "credentials"
} }
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
state: "present" state: "present"
update_cache: true update_cache: true
cache_valid_time: 3600 cache_valid_time: 3600
with_items: locust_debian_pkgs with_items: "{{ locust_debian_pkgs }}"
- name: Install application requirements - name: Install application requirements
pip: pip:
......
...@@ -49,26 +49,26 @@ ...@@ -49,26 +49,26 @@
- name: Install python requirements - name: Install python requirements
pip: name={{ item }} state=present pip: name={{ item }} state=present
with_items: logstash_python_requirements with_items: "{{ logstash_python_requirements }}"
- name: Checkout logstash rotation scripts - name: Checkout logstash rotation scripts
git: repo={{ logstash_scripts_repo }} dest={{ logstash_app_dir }}/share/logstash-elasticsearch-scripts git: repo={{ logstash_scripts_repo }} dest={{ logstash_app_dir }}/share/logstash-elasticsearch-scripts
when: LOGSTASH_ROTATE|bool when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation - name: Setup cron to run rotation
cron: > cron:
user=root user: root
name="Elasticsearch logstash index rotation" name: "Elasticsearch logstash index rotation"
hour={{ logstash_rotate_cron.hour }} hour: "{{ logstash_rotate_cron.hour }}"
minute={{ logstash_rotate_cron.minute }} minute: "{{ logstash_rotate_cron.minute }}"
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron" job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
when: LOGSTASH_ROTATE|bool when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation - name: Setup cron to run rotation
cron: > cron:
user=root user: root
name="Elasticsearch logstash index optimization" name: "Elasticsearch logstash index optimization"
hour={{ logstash_optimize_cron.hour }} hour: "{{ logstash_optimize_cron.hour }}"
minute={{ logstash_optimize_cron.minute }} minute: "{{ logstash_optimize_cron.minute }}"
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron" job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
when: LOGSTASH_ROTATE|bool when: LOGSTASH_ROTATE|bool
- name: copy galera cluster config - name: copy galera cluster config
template: > template:
src="etc/mysql/conf.d/galera.cnf.j2" src: "etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf" dest: "/etc/mysql/conf.d/galera.cnf"
owner="root" owner: "root"
group="root" group: "root"
mode=0600 mode: 0600
- name: check if we have already bootstrapped the cluster - name: check if we have already bootstrapped the cluster
stat: path=/etc/mysql/ansible_cluster_started stat: path=/etc/mysql/ansible_cluster_started
...@@ -15,18 +15,18 @@ ...@@ -15,18 +15,18 @@
when: not mariadb_bootstrap.stat.exists when: not mariadb_bootstrap.stat.exists
- name: setup bootstrap on primary - name: setup bootstrap on primary
lineinfile: > lineinfile:
dest="/etc/mysql/conf.d/galera.cnf" dest: "/etc/mysql/conf.d/galera.cnf"
regexp="^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$" regexp: "^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$"
line="wsrep_cluster_address=gcomm://" line: "wsrep_cluster_address=gcomm://"
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
- name: fetch debian.cnf file so start-stop will work properly - name: fetch debian.cnf file so start-stop will work properly
fetch: > fetch:
src=/etc/mysql/debian.cnf src: /etc/mysql/debian.cnf
dest=/tmp/debian.cnf dest: /tmp/debian.cnf
fail_on_missing=yes fail_on_missing: yes
flat=yes flat: yes
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
register: mariadb_new_debian_cnf register: mariadb_new_debian_cnf
...@@ -39,12 +39,12 @@ ...@@ -39,12 +39,12 @@
when: not mariadb_bootstrap.stat.exists when: not mariadb_bootstrap.stat.exists
- name: reset galera cluster config since we are bootstrapped - name: reset galera cluster config since we are bootstrapped
template: > template:
src="etc/mysql/conf.d/galera.cnf.j2" src: "etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf" dest: "/etc/mysql/conf.d/galera.cnf"
owner="root" owner: "root"
group="root" group: "root"
mode=0600 mode: 0600
when: not mariadb_bootstrap.stat.exists when: not mariadb_bootstrap.stat.exists
- name: touch bootstrap file to confirm we are fully up - name: touch bootstrap file to confirm we are fully up
...@@ -53,6 +53,5 @@ ...@@ -53,6 +53,5 @@
# This is needed for mysql-check in haproxy or other mysql monitor # This is needed for mysql-check in haproxy or other mysql monitor
# scripts to prevent haproxy checks exceeding `max_connect_errors`. # scripts to prevent haproxy checks exceeding `max_connect_errors`.
- name: create haproxy monitor user - name: create haproxy monitor user
command: > command: "mysql -e \"INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;\""
mysql -e "INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;" with_items: "{{ MARIADB_HAPROXY_HOSTS }}"
with_items: MARIADB_HAPROXY_HOSTS
...@@ -23,31 +23,32 @@ ...@@ -23,31 +23,32 @@
- name: Install pre-req debian packages - name: Install pre-req debian packages
apt: name={{ item }} state=present apt: name={{ item }} state=present
with_items: mariadb_debian_pkgs with_items: "{{ mariadb_debian_pkgs }}"
- name: Add mariadb apt key - name: Add mariadb apt key
apt_key: url="{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}" apt_key:
url: "{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}"
- name: add the mariadb repo to the sources list - name: add the mariadb repo to the sources list
apt_repository: > apt_repository:
repo='{{ MARIADB_REPO }}' repo: "{{ MARIADB_REPO }}"
state=present state: present
- name: install mariadb solo packages - name: install mariadb solo packages
apt: name={{ item }} update_cache=yes apt: name={{ item }} update_cache=yes
with_items: mariadb_solo_packages with_items: "{{ mariadb_solo_packages }}"
when: not MARIADB_CLUSTERED|bool when: not MARIADB_CLUSTERED|bool
- name: install mariadb cluster packages - name: install mariadb cluster packages
apt: name={{ item }} update_cache=yes apt: name={{ item }} update_cache=yes
with_items: mariadb_cluster_packages with_items: "{{ mariadb_cluster_packages }}"
when: MARIADB_CLUSTERED|bool when: MARIADB_CLUSTERED|bool
- name: remove bind-address - name: remove bind-address
lineinfile: > lineinfile:
dest=/etc/mysql/my.cnf dest: /etc/mysql/my.cnf
regexp="^bind-address\s+=\s+127\.0\.0\.1$" regexp: '^bind-address\s+=\s+127\.0\.0\.1$'
state=absent state: absent
when: MARIADB_LISTEN_ALL|bool or MARIADB_CLUSTERED|bool when: MARIADB_LISTEN_ALL|bool or MARIADB_CLUSTERED|bool
- include: cluster.yml - include: cluster.yml
...@@ -57,37 +58,37 @@ ...@@ -57,37 +58,37 @@
service: name=mysql state=started service: name=mysql state=started
- name: create all databases - name: create all databases
mysql_db: > mysql_db:
db={{ item }} db: "{{ item }}"
state=present state: present
encoding=utf8 encoding: utf8
with_items: MARIADB_DATABASES with_items: "{{ MARIADB_DATABASES }}"
when: MARIADB_CREATE_DBS|bool when: MARIADB_CREATE_DBS|bool
- name: create all analytics dbs - name: create all analytics dbs
mysql_db: > mysql_db:
db={{ item }} db: "{{ item }}"
state=present state: present
encoding=utf8 encoding: utf8
with_items: MARIADB_ANALYTICS_DATABASES with_items: "{{ MARIADB_ANALYTICS_DATABASES }}"
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
- name: create all users/privs - name: create all users/privs
mysql_user: > mysql_user:
name="{{ item.name }}" name: "{{ item.name }}"
password="{{ item.pass }}" password: "{{ item.pass }}"
priv="{{ item.priv }}" priv: "{{ item.priv }}"
host="{{ item.host }}" host: "{{ item.host }}"
append_privs=yes append_privs: yes
with_items: MARIADB_USERS with_items: "{{ MARIADB_USERS }}"
when: MARIADB_CREATE_DBS|bool when: MARIADB_CREATE_DBS|bool
- name: create all analytics users/privs - name: create all analytics users/privs
mysql_user: > mysql_user:
name="{{ item.name }}" name: "{{ item.name }}"
password="{{ item.pass }}" password: "{{ item.pass }}"
priv="{{ item.priv }}" priv: "{{ item.priv }}"
host="{{ item.host }}" host: "{{ item.host }}"
append_privs=yes append_privs: yes
with_items: MARIADB_ANALYTICS_USERS with_items: "{{ MARIADB_ANALYTICS_USERS }}"
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
install_recommends: yes install_recommends: yes
force: yes force: yes
update_cache: yes update_cache: yes
with_items: mongodb_debian_pkgs with_items: "{{ mongodb_debian_pkgs }}"
tags: tags:
- install - install
- install:app-requirements - install:app-requirements
...@@ -292,8 +292,6 @@ ...@@ -292,8 +292,6 @@
register: replset_status register: replset_status
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
- configure_replica_set
tags:
- "manage" - "manage"
- "manage:db" - "manage:db"
- "configure_replica_set" - "configure_replica_set"
...@@ -314,8 +312,6 @@ ...@@ -314,8 +312,6 @@
run_once: true run_once: true
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
- configure_replica_set
tags:
- "manage" - "manage"
- "manage:db" - "manage:db"
...@@ -330,7 +326,7 @@ ...@@ -330,7 +326,7 @@
roles: "{{ item.roles }}" roles: "{{ item.roles }}"
state: present state: present
replica_set: "{{ MONGO_REPL_SET }}" replica_set: "{{ MONGO_REPL_SET }}"
with_items: MONGO_USERS with_items: "{{ MONGO_USERS }}"
run_once: true run_once: true
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
...@@ -346,7 +342,7 @@ ...@@ -346,7 +342,7 @@
password: "{{ item.password }}" password: "{{ item.password }}"
roles: "{{ item.roles }}" roles: "{{ item.roles }}"
state: present state: present
with_items: MONGO_USERS with_items: "{{ MONGO_USERS }}"
when: not MONGO_CLUSTERED when: not MONGO_CLUSTERED
tags: tags:
- "manage" - "manage"
......
...@@ -11,31 +11,30 @@ ...@@ -11,31 +11,30 @@
when: MMSAPIKEY is not defined when: MMSAPIKEY is not defined
- name: download mongo mms agent - name: download mongo mms agent
get_url: > get_url:
url="{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ pkg_format }}" url: "{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ pkg_format }}"
dest="/tmp/{{ item.agent }}-{{ item.version }}.{{ pkg_format }}" dest: "/tmp/{{ item.agent }}-{{ item.version }}.{{ pkg_format }}"
register: download_mms_deb register: download_mms_deb
with_items: with_items: "{{ agents }}"
agents
- name: install mongo mms agent - name: install mongo mms agent
apt: > apt:
deb="/tmp/{{ item.agent }}-{{ item.version }}.deb" deb: "/tmp/{{ item.agent }}-{{ item.version }}.deb"
when: download_mms_deb.changed when: download_mms_deb.changed
notify: restart mms notify: restart mms
with_items: with_items:
agents agents
- name: add key to monitoring-agent.config - name: add key to monitoring-agent.config
lineinfile: > lineinfile:
dest="{{ item.config }}" dest: "{{ item.config }}"
regexp="^mmsApiKey=" regexp: "^mmsApiKey="
line="mmsApiKey={{ MMSAPIKEY }}" line: "mmsApiKey={{ MMSAPIKEY }}"
notify: restart mms notify: restart mms
with_items: with_items: "{{ agents }}"
agents
- name: start mms service - name: start mms service
service: name="{{ item.agent }}" state=started service:
with_items: name: "{{ item.agent }}"
agents state: started
with_items: "{{ agents }}"
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
fstype: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype }}" fstype: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype }}"
state: unmounted state: unmounted
when: "{{ UNMOUNT_DISKS and (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype != item.fstype }}" when: "{{ UNMOUNT_DISKS and (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype != item.fstype }}"
with_items: volumes with_items: "{{ volumes }}"
# Noop & reports "ok" if fstype is correct # Noop & reports "ok" if fstype is correct
# Errors if fstype is wrong and disk is mounted (hence above task) # Errors if fstype is wrong and disk is mounted (hence above task)
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
fstype: "{{ item.fstype }}" fstype: "{{ item.fstype }}"
# Necessary because AWS gives some ephemeral disks the wrong fstype by default # Necessary because AWS gives some ephemeral disks the wrong fstype by default
force: true force: true
with_items: volumes with_items: "{{ volumes }}"
# This can fail if one volume is mounted on a child directory as another volume # This can fail if one volume is mounted on a child directory as another volume
# and it attempts to unmount the parent first. This is generally fixable by rerunning. # and it attempts to unmount the parent first. This is generally fixable by rerunning.
...@@ -49,21 +49,21 @@ ...@@ -49,21 +49,21 @@
src: "{{ item.device }}" src: "{{ item.device }}"
fstype: "{{ item.fstype }}" fstype: "{{ item.fstype }}"
state: unmounted state: unmounted
when: > when:
UNMOUNT_DISKS and UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount (volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount
with_items: ansible_mounts with_items: "{{ ansible_mounts }}"
# If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false # If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false
# that is an errorable condition, since it can easily allow us to double mount a disk. # that is an errorable condition, since it can easily allow us to double mount a disk.
- name: Check that we don't want to unmount disks when UNMOUNT_DISKS is false - name: Check that we don't want to unmount disks when UNMOUNT_DISKS is false
fail: msg="Found disks mounted in the wrong place, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe." fail: msg="Found disks mounted in the wrong place, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe."
when: > when:
not UNMOUNT_DISKS and not UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount (volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount
with_items: ansible_mounts with_items: "{{ ansible_mounts }}"
- name: Mount disks - name: Mount disks
mount: mount:
...@@ -72,4 +72,4 @@ ...@@ -72,4 +72,4 @@
state: mounted state: mounted
fstype: "{{ item.fstype }}" fstype: "{{ item.fstype }}"
opts: "{{ item.options }}" opts: "{{ item.options }}"
with_items: volumes with_items: "{{ volumes }}"
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
name: "{{ item }}" name: "{{ item }}"
install_recommends: yes install_recommends: yes
state: present state: present
with_items: mysql_debian_pkgs with_items: "{{ mysql_debian_pkgs }}"
- name: Start mysql - name: Start mysql
service: service:
......
...@@ -22,41 +22,37 @@ ...@@ -22,41 +22,37 @@
# #
- name: Download newrelic NPI - name: Download newrelic NPI
get_url: > get_url:
dest="/tmp/{{ newrelic_npi_installer }}" dest: "/tmp/{{ newrelic_npi_installer }}"
url="{{ NEWRELIC_NPI_URL }}" url: "{{ NEWRELIC_NPI_URL }}"
register: download_npi_installer register: download_npi_installer
- name: create npi install directory {{ NEWRELIC_NPI_PREFIX }} - name: create npi install directory {{ NEWRELIC_NPI_PREFIX }}
file: > file:
path="{{ NEWRELIC_NPI_PREFIX }}" path: "{{ NEWRELIC_NPI_PREFIX }}"
state=directory state: directory
mode=0755 mode: 0755
owner="{{ NEWRELIC_USER }}" owner: "{{ NEWRELIC_USER }}"
- name: install newrelic npi - name: install newrelic npi
shell: > shell: "tar -xzf /tmp/{{ newrelic_npi_installer }} --strip-components=1 -C \"{{NEWRELIC_NPI_PREFIX}}\""
tar -xzf /tmp/{{ newrelic_npi_installer }} --strip-components=1 -C "{{NEWRELIC_NPI_PREFIX}}"
when: download_npi_installer.changed when: download_npi_installer.changed
become_user: "{{ NEWRELIC_USER }}" become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the default user - name: configure npi with the default user
shell: > shell: "{{ NEWRELIC_NPI_PREFIX }}/bin/node {{ NEWRELIC_NPI_PREFIX }}/npi.js \"set user {{ NEWRELIC_USER }}\""
{{ NEWRELIC_NPI_PREFIX }}/bin/node {{ NEWRELIC_NPI_PREFIX }}/npi.js "set user {{ NEWRELIC_USER }}"
args: args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}" chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}" become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the license key - name: configure npi with the license key
shell: > shell: "./npi set license_key {{ NEWRELIC_LICENSE_KEY }}"
./npi set license_key {{ NEWRELIC_LICENSE_KEY }}
args: args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}" chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}" become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the distro - name: configure npi with the distro
shell: > shell: "./npi set distro {{ NEWRELIC_NPI_DISTRO }}"
./npi set distro {{ NEWRELIC_NPI_DISTRO }}
args: args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}" chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}" become_user: "{{ NEWRELIC_USER }}"
......
...@@ -5,23 +5,24 @@ ...@@ -5,23 +5,24 @@
- name: create the nltk data directory and subdirectories - name: create the nltk data directory and subdirectories
file: path={{ NLTK_DATA_DIR }}/{{ item.path|dirname }} state=directory file: path={{ NLTK_DATA_DIR }}/{{ item.path|dirname }} state=directory
with_items: NLTK_DATA with_items: "{{ NLTK_DATA }}"
tags: tags:
- deploy - deploy
- name: download nltk data - name: download nltk data
get_url: > get_url:
dest={{ NLTK_DATA_DIR }}/{{ item.url|basename }} dest: "{{ NLTK_DATA_DIR }}/{{ item.url|basename }}"
url={{ item.url }} url: "{{ item.url }}"
with_items: NLTK_DATA with_items: "{{ NLTK_DATA }}"
register: nltk_download register: nltk_download
tags: tags:
- deploy - deploy
- name: unarchive nltk data - name: unarchive nltk data
shell: > shell: "unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }}"
unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}" args:
with_items: NLTK_DATA chdir: "{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: "{{ NLTK_DATA }}"
when: nltk_download|changed when: nltk_download|changed
tags: tags:
- deploy - deploy
...@@ -19,38 +19,38 @@ oauth_client_setup_role_name: oauth_client_setup ...@@ -19,38 +19,38 @@ oauth_client_setup_role_name: oauth_client_setup
oauth_client_setup_oauth2_clients: oauth_client_setup_oauth2_clients:
- { - {
name: "{{ ecommerce_service_name | default('None') }}", name: "{{ ecommerce_service_name | default('None') }}",
url_root: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}", url_root: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT | default('None') }}",
id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY }}", id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET }}", secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ ECOMMERCE_LOGOUT_URL }}" logout_uri: "{{ ECOMMERCE_LOGOUT_URL | default('None') }}"
} }
- { - {
name: "{{ INSIGHTS_OAUTH2_APP_CLIENT_NAME | default('None') }}", name: "{{ INSIGHTS_OAUTH2_APP_CLIENT_NAME | default('None') }}",
url_root: "{{ INSIGHTS_BASE_URL }}", url_root: "{{ INSIGHTS_BASE_URL | default('None') }}",
id: "{{ INSIGHTS_OAUTH2_KEY }}", id: "{{ INSIGHTS_OAUTH2_KEY | default('None') }}",
secret: "{{ INSIGHTS_OAUTH2_SECRET }}", secret: "{{ INSIGHTS_OAUTH2_SECRET | default('None') }}",
logout_uri: "{{ INSIGHTS_LOGOUT_URL }}" logout_uri: "{{ INSIGHTS_LOGOUT_URL | default('None') }}"
} }
- { - {
name: "{{ programs_service_name | default('None') }}", name: "{{ programs_service_name | default('None') }}",
url_root: "{{ PROGRAMS_URL_ROOT }}", url_root: "{{ PROGRAMS_URL_ROOT | default('None') }}",
id: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_KEY }}", id: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_SECRET }}", secret: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ PROGRAMS_LOGOUT_URL }}" logout_uri: "{{ PROGRAMS_LOGOUT_URL | default('None') }}"
} }
- { - {
name: "{{ credentials_service_name | default('None') }}", name: "{{ credentials_service_name | default('None') }}",
url_root: "{{ CREDENTIALS_URL_ROOT }}", url_root: "{{ CREDENTIALS_URL_ROOT | default('None') }}",
id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY }}", id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET }}", secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ CREDENTIALS_LOGOUT_URL }}" logout_uri: "{{ CREDENTIALS_LOGOUT_URL | default('None') }}"
} }
- { - {
name: "{{ discovery_service_name | default('None') }}", name: "{{ discovery_service_name | default('None') }}",
url_root: "{{ DISCOVERY_URL_ROOT }}", url_root: "{{ DISCOVERY_URL_ROOT | default('None') }}",
id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY }}", id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET }}", secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ DISCOVERY_LOGOUT_URL }}" logout_uri: "{{ DISCOVERY_LOGOUT_URL | default('None') }}"
} }
# #
......
...@@ -35,5 +35,5 @@ ...@@ -35,5 +35,5 @@
--logout_uri {{ item.logout_uri | default("") }} --logout_uri {{ item.logout_uri | default("") }}
become_user: "{{ edxapp_user }}" become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
with_items: oauth_client_setup_oauth2_clients with_items: "{{ oauth_client_setup_oauth2_clients }}"
when: item.name != 'None' when: item.name != 'None'
...@@ -55,9 +55,9 @@ ...@@ -55,9 +55,9 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory; # Need to use command rather than pip so that we can maintain the context of our current working directory;
# some requirements are pathed relative to the edx-platform repo. # some requirements are pathed relative to the edx-platform repo.
# Using the pip from inside the virtual environment implicitly installs everything into that virtual environment. # Using the pip from inside the virtual environment implicitly installs everything into that virtual environment.
command: > command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }}"
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }} args:
chdir={{ edxapp_code_dir }} chdir: "{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
when: edxapp_code_dir is defined when: edxapp_code_dir is defined
......
...@@ -21,15 +21,20 @@ PROGRAMS_SSL_NGINX_PORT: 48140 ...@@ -21,15 +21,20 @@ PROGRAMS_SSL_NGINX_PORT: 48140
PROGRAMS_DEFAULT_DB_NAME: 'programs' PROGRAMS_DEFAULT_DB_NAME: 'programs'
PROGRAMS_DATABASE_USER: 'programs001'
PROGRAMS_DATABASE_PASSWORD: 'password'
PROGRAMS_DATABASE_HOST: 'localhost'
PROGRAMS_DATABASE_PORT: 3306
PROGRAMS_DATABASES: PROGRAMS_DATABASES:
# rw user # rw user
default: default:
ENGINE: 'django.db.backends.mysql' ENGINE: 'django.db.backends.mysql'
NAME: '{{ PROGRAMS_DEFAULT_DB_NAME }}' NAME: '{{ PROGRAMS_DEFAULT_DB_NAME }}'
USER: 'programs001' USER: '{{ PROGRAMS_DATABASE_USER }}'
PASSWORD: 'password' PASSWORD: '{{ PROGRAMS_DATABASE_PASSWORD }}'
HOST: 'localhost' HOST: '{{ PROGRAMS_DATABASE_HOST }}'
PORT: '3306' PORT: '{{ PROGRAMS_DATABASE_PORT }}'
ATOMIC_REQUESTS: true ATOMIC_REQUESTS: true
CONN_MAX_AGE: 60 CONN_MAX_AGE: 60
......
...@@ -56,9 +56,9 @@ ...@@ -56,9 +56,9 @@
- migrate:db - migrate:db
- name: run collectstatic - name: run collectstatic
shell: > shell: "{{ programs_venv_dir }}/bin/python manage.py collectstatic --noinput"
chdir={{ programs_code_dir }} args:
{{ programs_venv_dir }}/bin/python manage.py collectstatic --noinput chdir: "{{ programs_code_dir }}"
become_user: "{{ programs_user }}" become_user: "{{ programs_user }}"
environment: "{{ programs_environment }}" environment: "{{ programs_environment }}"
when: not devstack when: not devstack
...@@ -68,9 +68,12 @@ ...@@ -68,9 +68,12 @@
# NOTE this isn't used or needed when s3 is used for PROGRAMS_MEDIA_STORAGE_BACKEND # NOTE this isn't used or needed when s3 is used for PROGRAMS_MEDIA_STORAGE_BACKEND
- name: create programs media dir - name: create programs media dir
file: > file:
path="{{ item }}" state=directory mode=0775 path: "{{ item }}"
owner="{{ programs_user }}" group="{{ common_web_group }}" state: directory
mode: 0775
owner: "{{ programs_user }}"
group: "{{ common_web_group }}"
with_items: with_items:
- "{{ PROGRAMS_MEDIA_ROOT }}" - "{{ PROGRAMS_MEDIA_ROOT }}"
tags: tags:
......
...@@ -171,8 +171,7 @@ ...@@ -171,8 +171,7 @@
- maintenance - maintenance
- name: Make queues mirrored - name: Make queues mirrored
shell: > shell: "/usr/sbin/rabbitmqctl -p {{ item }} set_policy HA \"\" '{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}'"
/usr/sbin/rabbitmqctl -p {{ item }} set_policy HA "" '{"ha-mode":"all","ha-sync-mode":"automatic"}'
when: RABBITMQ_CLUSTERED_HOSTS|length > 1 when: RABBITMQ_CLUSTERED_HOSTS|length > 1
with_items: "{{ RABBITMQ_VHOSTS }}" with_items: "{{ RABBITMQ_VHOSTS }}"
tags: tags:
......
...@@ -38,42 +38,47 @@ ...@@ -38,42 +38,47 @@
when: rbenv_ruby_version is not defined when: rbenv_ruby_version is not defined
- name: create rbenv user {{ rbenv_user }} - name: create rbenv user {{ rbenv_user }}
user: > user:
name={{ rbenv_user }} home={{ rbenv_dir }} name: "{{ rbenv_user }}"
shell=/bin/false createhome=no home: "{{ rbenv_dir }}"
shell: /bin/false
createhome: no
when: rbenv_user != common_web_user when: rbenv_user != common_web_user
tags: tags:
- install - install
- install:base - install:base
- name: create rbenv dir if it does not exist - name: create rbenv dir if it does not exist
file: > file:
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}" path: "{{ rbenv_dir }}"
state=directory owner: "{{ rbenv_user }}"
state: directory
tags: tags:
- install - install
- install:base - install:base
- name: install build depends - name: install build depends
apt: pkg={{ ",".join(rbenv_debian_pkgs) }} update_cache=yes state=present install_recommends=no apt: pkg={{ ",".join(rbenv_debian_pkgs) }} update_cache=yes state=present install_recommends=no
with_items: rbenv_debian_pkgs with_items: "{{ rbenv_debian_pkgs }}"
tags: tags:
- install - install
- install:base - install:base
- name: update rbenv repo - name: update rbenv repo
git_2_0_1: > git_2_0_1:
repo=https://github.com/sstephenson/rbenv.git repo: https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }} dest: "{{ rbenv_dir }}/.rbenv"
accept_hostkey=yes version: "{{ rbenv_version }}"
accept_hostkey: yes
become_user: "{{ rbenv_user }}" become_user: "{{ rbenv_user }}"
tags: tags:
- install - install
- install:base - install:base
- name: ensure ruby_env exists - name: ensure ruby_env exists
template: > template:
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env src: ruby_env.j2
dest: "{{ rbenv_dir }}/ruby_env"
become_user: "{{ rbenv_user }}" become_user: "{{ rbenv_user }}"
tags: tags:
- install - install
...@@ -107,9 +112,10 @@ ...@@ -107,9 +112,10 @@
- install:base - install:base
- name: clone ruby-build repo - name: clone ruby-build repo
git: > git:
repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build repo: https://github.com/sstephenson/ruby-build.git
accept_hostkey=yes dest: "{{ tempdir.stdout }}/ruby-build"
accept_hostkey: yes
when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers))
become_user: "{{ rbenv_user }}" become_user: "{{ rbenv_user }}"
tags: tags:
......
...@@ -29,13 +29,13 @@ ...@@ -29,13 +29,13 @@
# file: # file:
# path={{ item.mount_point }} owner={{ item.owner }} # path={{ item.mount_point }} owner={{ item.owner }}
# group={{ item.group }} mode={{ item.mode }} state="directory" # group={{ item.group }} mode={{ item.mode }} state="directory"
# with_items: my_role_s3fs_mounts # with_items: "{{ my_role_s3fs_mounts }}"
# #
# - name: mount s3 buckets # - name: mount s3 buckets
# mount: # mount:
# name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs # name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs
# opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted # opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted
# with_items: myrole_s3fs_mounts # with_items: "{{ myrole_s3fs_mounts }}"
# #
# Example play: # Example play:
# #
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
- name: Downloads metadata into metadata directory as backup - name: Downloads metadata into metadata directory as backup
get_url: > get_url:
url={{ shib_metadata_backup_url }} url: "{{ shib_metadata_backup_url }}"
dest=/etc/shibboleth/metadata/idp-metadata.xml dest: "/etc/shibboleth/metadata/idp-metadata.xml"
mode=0640 mode: 0640
group=_shibd group: _shibd
owner=_shibd owner: _shibd
when: shib_download_metadata when: shib_download_metadata
- name: writes out key and pem file - name: writes out key and pem file
......
...@@ -9,39 +9,51 @@ ...@@ -9,39 +9,51 @@
- oinkmaster - oinkmaster
- name: configure snort - name: configure snort
template: > template:
src=etc/snort/snort.conf.j2 dest=/etc/snort/snort.conf src: etc/snort/snort.conf.j2
owner=root group=root mode=0644 dest: /etc/snort/snort.conf
owner: root
group: root
mode: 0644
- name: configure snort (debian) - name: configure snort (debian)
template: > template:
src=etc/snort/snort.debian.conf.j2 dest=/etc/snort/snort.debian.conf src: etc/snort/snort.debian.conf.j2
owner=root group=root mode=0644 dest: /etc/snort/snort.debian.conf
owner: root
group: root
mode: 0644
- name: configure oinkmaster - name: configure oinkmaster
template: > template:
src=etc/oinkmaster.conf.j2 dest=/etc/oinkmaster.conf src: etc/oinkmaster.conf.j2
owner=root group=root mode=0644 dest: /etc/oinkmaster.conf
owner: root
group: root
mode: 0644
- name: update snort - name: update snort
shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/ shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/
become: yes become: yes
- name: snort service - name: snort service
service: > service:
name="snort" name: "snort"
state="started" state: "started"
- name: open read permissions on snort logs - name: open read permissions on snort logs
file: > file:
name="/var/log/snort" name: "/var/log/snort"
state="directory" state: "directory"
mode="755" mode: "755"
- name: install oinkmaster cronjob - name: install oinkmaster cronjob
template: > template:
src=etc/cron.daily/oinkmaster.j2 dest=/etc/cron.daily/oinkmaster src: etc/cron.daily/oinkmaster.j2
owner=root group=root mode=0755 dest: /etc/cron.daily/oinkmaster
owner: root
group: root
mode: 0755
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
fail: fail:
msg: Please define either "source" or "sourcetype", not both or neither msg: Please define either "source" or "sourcetype", not both or neither
when: ('source' in item and 'sourcetype' in item) or ('source' not in item and 'sourcetype' not in item) when: ('source' in item and 'sourcetype' in item) or ('source' not in item and 'sourcetype' not in item)
with_items: SPLUNK_FIELD_EXTRACTIONS with_items: "{{ SPLUNK_FIELD_EXTRACTIONS }}"
- name: Make sure necessary dirs exist - name: Make sure necessary dirs exist
file: file:
...@@ -144,7 +144,7 @@ ...@@ -144,7 +144,7 @@
owner: "{{ splunk_user }}" owner: "{{ splunk_user }}"
group: "{{ splunk_user }}" group: "{{ splunk_user }}"
mode: 0700 mode: 0700
with_items: SPLUNK_DASHBOARDS with_items: "{{ SPLUNK_DASHBOARDS }}"
tags: tags:
- install - install
- install:configuration - install:configuration
......
...@@ -116,7 +116,7 @@ ...@@ -116,7 +116,7 @@
group: splunk group: splunk
mode: "0400" mode: "0400"
when: "{{ item.ssl_cert is defined }}" when: "{{ item.ssl_cert is defined }}"
with_items: SPLUNKFORWARDER_SERVERS with_items: "{{ SPLUNKFORWARDER_SERVERS }}"
- name: Write root CA to disk - name: Write root CA to disk
copy: copy:
...@@ -126,7 +126,7 @@ ...@@ -126,7 +126,7 @@
group: splunk group: splunk
mode: "0400" mode: "0400"
when: "{{ item.ssl_cert is defined }}" when: "{{ item.ssl_cert is defined }}"
with_items: SPLUNKFORWARDER_SERVERS with_items: "{{ SPLUNKFORWARDER_SERVERS }}"
- name: Create inputs and outputs configuration - name: Create inputs and outputs configuration
template: template:
......
...@@ -3,7 +3,7 @@ description "Tasks before supervisord" ...@@ -3,7 +3,7 @@ description "Tasks before supervisord"
start on runlevel [2345] start on runlevel [2345]
task task
setuid {{ supervisor_user }} setuid {{ common_web_user }}
{% if programs_code_dir is defined %} {% if programs_code_dir is defined %}
{% set programs_command = "--programs-env " + programs_home + "/programs_env --programs-code-dir " + programs_code_dir + " --programs-python " + COMMON_BIN_DIR + "/python.programs" %} {% set programs_command = "--programs-env " + programs_home + "/programs_env --programs-code-dir " + programs_code_dir + " --programs-python " + COMMON_BIN_DIR + "/python.programs" %}
......
---
tanguru_debian_pkgs:
- openjdk-7-jre
- unzip
- libmysql-java
- python-mysqldb
- tomcat7
- libspring-instrument-java
- xvfb
- mailutils
- postfix
tanaguru_download_link: "http://download.tanaguru.org/Tanaguru/tanaguru-3.1.0.i386.tar.gz"
# Go this link to find your desired ESR Firefox
# http://download-origin.cdn.mozilla.net/pub/firefox/releases/24.0esr/linux-x86_64/
# Default is en-US in our example
fixfox_esr_link: "http://download-origin.cdn.mozilla.net/pub/firefox/releases/24.0esr/linux-x86_64/en-US/firefox-24.0esr.tar.bz2"
TANAGURU_DATABASE_NAME: 'tgdatabase'
TANAGURU_DATABASE_USER: 'tguser'
TANAGURU_DATABASE_PASSWORD: 'tgPassword'
TANAGURU_URL: 'http://localhost:8080/tanaguru/'
TANAGURU_ADMIN_EMAIL: 'admin@example.com'
TANAGURU_ADMIN_PASSWORD: 'tanaguru15'
tanaguru_parameters:
db_name: "{{ TANAGURU_DATABASE_NAME }}"
db_user: "{{ TANAGURU_DATABASE_USER }}"
db_password: "{{ TANAGURU_DATABASE_PASSWORD }}"
url: "{{ TANAGURU_URL }}"
admin_email: "{{ TANAGURU_ADMIN_EMAIL }}"
admin_passwd: "{{ TANAGURU_ADMIN_PASSWORD }}"
\ No newline at end of file
---
- name: Add the Partner repository
apt_repository:
repo: "{{ item }}"
state: present
with_items:
- "deb http://archive.canonical.com/ubuntu {{ ansible_distribution_release }} partner"
- "deb-src http://archive.canonical.com/ubuntu {{ ansible_distribution_release }} partner"
tags:
- install
- install:base
- name: Set Postfix options
debconf:
name: postifx
question: "{{ item.question }}"
value: "{{ item.value }} "
vtype: "string"
with_items:
- { question: "postfix/mailname", value: " " }
- { question: "postfix/main_mailer_type", value: "Satellite system" }
tags:
- install
- install:configuration
- name: Install the TanaGuru Prerequisites
apt:
name: "{{ item }}"
update_cache: yes
state: installed
with_items: tanguru_debian_pkgs
tags:
- install
- install:base
- name: Modify the my.cnf file for max_allowed_packet option
lineinfile:
dest: /etc/mysql/my.cnf
regexp: '^max_allowed_packet'
line: 'max_allowed_packet = 64M'
state: present
register: my_cnf
tags:
- install
- install:configuration
- name: Restart MySQL
service:
name: mysql
state: restarted
when: my_cnf.changed
- name: Create a soft link for tomcat jar and mysql connector
file:
dest: "{{ item.dest }}"
src: "{{ item.src }}"
state: link
with_items:
- { src: '/usr/share/java/spring3-instrument-tomcat.jar', dest: '/usr/share/tomcat7/lib/spring3-instrument-tomcat.jar' }
- { src: '/usr/share/java/mysql-connector-java.jar', dest: '/usr/share/tomcat7/lib/mysql-connector-java.jar'}
tags:
- install
- install:configuration
- name: Copy the xvfb template to /etc/init.d
template:
dest: /etc/init.d/xvfb
src: xvfb.j2
owner: root
group: root
mode: 0755
register: xvfb
tags:
- install
- install:configuration
- name: Restart xvfb
service:
name: xvfb
pattern: /etc/init.d/xvfb
state: restarted
when: xvfb.changed
- name: Configure xvfb to run at startup
command: update-rc.d xvfb defaults
ignore_errors: yes
when: xvfb.changed
- name: Download the latest ESR Firfox
get_url:
url: "{{ fixfox_esr_link }}"
dest: "/tmp/{{ fixfox_esr_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded Firfox zipped file
unarchive:
src: "/tmp/{{ fixfox_esr_link | basename }}"
dest: /opt
copy: no
tags:
- install
- install:base
- name: Download the latest TanaGuru tarball
get_url:
url: "{{ tanaguru_download_link }}"
dest: "/tmp/{{ tanaguru_download_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded TanaGuru tarball
unarchive:
src: "/tmp/{{ tanaguru_download_link | basename }}"
dest: "/tmp/"
copy: no
tags:
- install
- install:base
- name: Create MySQL database for TanaGuru
mysql_db:
name: "{{ tanaguru_parameters.db_name }}"
state: present
encoding: utf8
collation: utf8_general_ci
tags:
- install
- install:base
- name: Create MySQL user for TanaGuru
mysql_user:
name: "{{ tanaguru_parameters.db_user }}"
password: "{{ tanaguru_parameters.db_password }}"
host: localhost
priv: "{{ tanaguru_parameters.db_name }}.*:ALL"
state: present
tags:
- install
- install:base
- name: Check that tanaguru app is running
shell: >
/bin/ps aux | grep -i tanaguru
register: tanaguru_app
changed_when: no
tags:
- install
- name: Install the TanaGuru
shell: >
/bin/echo "yes" | ./install.sh --mysql-tg-user "{{ tanaguru_parameters.db_user }}" \
--mysql-tg-passwd "{{ tanaguru_parameters.db_password }}" \
--mysql-tg-db "{{ tanaguru_parameters.db_name }}" \
--tanaguru-url "{{ tanaguru_parameters.url }}" \
--tomcat-webapps /var/lib/tomcat7/webapps \
--tomcat-user tomcat7 \
--tg-admin-email "{{ tanaguru_parameters.admin_email }}" \
--tg-admin-passwd "{{ tanaguru_parameters.admin_passwd }}" \
--firefox-esr-path /opt/firefox/firefox \
--display-port ":99.1"
args:
chdir: "/tmp/{{ tanaguru_download_link | basename | regex_replace('.tar.gz$', '') }}"
when: "tanaguru_app.stdout.find('/etc/tanaguru/') == -1"
register: tanaguru_install
tags:
- install
- install:base
- name: Restart tomcat7
service:
name: tomcat7
state: restarted
when: tanaguru_install.changed
\ No newline at end of file
#!/bin/sh
set -e
RUN_AS_USER=tomcat7
OPTS=":99 -screen 1 1024x768x24 -nolisten tcp"
XVFB_DIR=/usr/bin
PIDFILE=/var/run/xvfb
case $1 in
start)
start-stop-daemon --chuid $RUN_AS_USER -b --start --exec $XVFB_DIR/Xvfb --make-pidfile --pidfile $PIDFILE -- $OPTS &
;;
stop)
start-stop-daemon --stop --user $RUN_AS_USER --pidfile $PIDFILE
rm -f $PIDFILE
;;
restart)
if start-stop-daemon --test --stop --user $RUN_AS_USER --pidfile $PIDFILE >/dev/null; then
$0 stop
fi;
$0 start
;;
*)
echo "Usage: $0 (start|restart|stop)"
exit 1
;;
esac
exit 0
\ No newline at end of file
...@@ -21,20 +21,20 @@ ...@@ -21,20 +21,20 @@
# #
- name: Create clone of edx-platform - name: Create clone of edx-platform
git_2_0_1: > git_2_0_1:
repo=https://github.com/edx/edx-platform.git repo: "https://github.com/edx/edx-platform.git"
dest={{ test_build_server_repo_path }}/edx-platform-clone dest: "{{ test_build_server_repo_path }}/edx-platform-clone"
version={{ test_edx_platform_version }} version: "{{ test_edx_platform_version }}"
become_user: "{{ test_build_server_user }}" become_user: "{{ test_build_server_user }}"
- name: get xargs limit - name: get xargs limit
shell: "xargs --show-limits" shell: "xargs --show-limits"
- name: Copy test-development-environment.sh to somewhere the jenkins user can access it - name: Copy test-development-environment.sh to somewhere the jenkins user can access it
copy: > copy:
src=test-development-environment.sh src: test-development-environment.sh
dest="{{ test_build_server_repo_path }}" dest: "{{ test_build_server_repo_path }}"
mode=0755 mode: 0755
- name: Validate build environment - name: Validate build environment
shell: "bash test-development-environment.sh {{ item }}" shell: "bash test-development-environment.sh {{ item }}"
......
--- ---
- name: import the test courses from github - name: import the test courses from github
shell: > shell: "{{ demo_edxapp_venv_bin }}/python /edx/bin/manage.edxapp lms git_add_course --settings=aws \"{{ item.github_url }}\""
{{ demo_edxapp_venv_bin }}/python /edx/bin/manage.edxapp lms git_add_course --settings=aws "{{ item.github_url }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
when: item.install == True when: item.install == True
with_items: TESTCOURSES_EXPORTS with_items: "{{ TESTCOURSES_EXPORTS }}"
- name: enroll test users in the testcourses - name: enroll test users in the testcourses
shell: > shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}"
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }} args:
chdir={{ demo_edxapp_code_dir }} chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}" become_user: "{{ common_web_user }}"
when: item[1].install == True when: item[1].install == True
with_nested: with_nested:
- demo_test_users - "{{ demo_test_users }}"
- TESTCOURSES_EXPORTS - "{{ TESTCOURSES_EXPORTS }}"
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
owner: "{{ jenkins_user }}" owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}" group: "{{ jenkins_group }}"
mode: 0644 mode: 0644
with_items: hpi_files.stdout_lines with_items: "{{ hpi_files.stdout_lines }}"
when: hpi_files when: hpi_files
notify: notify:
- restart Jenkins - restart Jenkins
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
owner: "{{ jenkins_user }}" owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}" group: "{{ jenkins_group }}"
mode: 0644 mode: 0644
with_items: jpi_files.stdout_lines with_items: "{{ jpi_files.stdout_lines }}"
when: jpi_files when: jpi_files
notify: notify:
- restart Jenkins - restart Jenkins
......
...@@ -46,11 +46,8 @@ ...@@ -46,11 +46,8 @@
- name: Set sandbox limits - name: Set sandbox limits
template: template:
src: "{{ item }}" src: "sandbox.conf.j2"
dest: "/etc/security/limits.d/sandbox.conf" dest: "/etc/security/limits.d/sandbox.conf"
first_available_file:
- "{{ secure_dir }}/sandbox.conf.j2"
- "sandbox.conf.j2"
- name: Install system dependencies of xserver - name: Install system dependencies of xserver
apt: apt:
...@@ -60,11 +57,8 @@ ...@@ -60,11 +57,8 @@
- name: Load python-sandbox apparmor profile - name: Load python-sandbox apparmor profile
template: template:
src: "{{ item }}" src: "usr.bin.python-sandbox.j2"
dest: "/etc/apparmor.d/edx_apparmor_sandbox" dest: "/etc/apparmor.d/edx_apparmor_sandbox"
first_available_file:
- "{{ secure_dir }}/files/edx_apparmor_sandbox.j2"
- "usr.bin.python-sandbox.j2"
- include: deploy.yml - include: deploy.yml
tags: tags:
......
-r github.txt ansible==2.2.0.0
PyYAML==3.11 PyYAML==3.12
Jinja2==2.8 Jinja2==2.8
MarkupSafe==0.23 MarkupSafe==0.23
boto==2.33.0 boto==2.33.0
ecdsa==0.11 ecdsa==0.11
paramiko==1.15.1 paramiko==2.0.2
pycrypto==2.6.1 pycrypto==2.6.1
wsgiref==0.1.2 wsgiref==0.1.2
docopt==0.6.1 docopt==0.6.1
......
yml_files:=$(shell find . -name "*.yml") yml_files:=$(shell find . -name "*.yml")
json_files:=$(shell find . -name "*.json") json_files:=$(shell find . -name "*.json")
jinja_files:=$(shell find . -name "*.j2")
# $(images) is calculated in the docker.mk file # $(images) is calculated in the docker.mk file
test: test.syntax test.edx_east_roles test: test.syntax test.edx_east_roles
test.syntax: test.syntax.yml test.syntax.json test.syntax.jinja test.syntax.dockerfiles test.syntax: test.syntax.yml test.syntax.json test.syntax.dockerfiles
test.syntax.yml: $(patsubst %,test.syntax.yml/%,$(yml_files)) test.syntax.yml: $(patsubst %,test.syntax.yml/%,$(yml_files))
...@@ -18,13 +17,13 @@ test.syntax.json: $(patsubst %,test.syntax.json/%,$(json_files)) ...@@ -18,13 +17,13 @@ test.syntax.json: $(patsubst %,test.syntax.json/%,$(json_files))
test.syntax.json/%: test.syntax.json/%:
jsonlint -v $* jsonlint -v $*
test.syntax.jinja: $(patsubst %,test.syntax.jinja/%,$(jinja_files))
test.syntax.jinja/%:
cd playbooks && python ../tests/jinja_check.py ../$*
test.syntax.dockerfiles: test.syntax.dockerfiles:
python util/check_dockerfile_coverage.py "$(images)" python util/check_dockerfile_coverage.py "$(images)"
test.edx_east_roles: test.edx_east_roles:
tests/test_edx_east_roles.sh tests/test_edx_east_roles.sh
clean: test.clean
test.clean:
rm -rf playbooks/edx-east/test_output
#!/usr/bin/env python
import os
import sys
from jinja2 import FileSystemLoader
from jinja2 import Environment as j
from jinja2.exceptions import UndefinedError
from ansible.utils.template import _get_filters, _get_extensions
from yaml.representer import RepresenterError
input_file = sys.argv[1]
if not os.path.exists(input_file):
print('{0}: deleted in diff'.format(input_file))
sys.exit(0)
# Setup jinja to include ansible filters
j_e = j(trim_blocks=True, extensions=_get_extensions())
j_e.loader = FileSystemLoader(['.', os.path.dirname(input_file)])
j_e.filters.update(_get_filters())
# Go ahead and catch errors for undefined variables and bad yaml
# from `to_nice_yaml` ansible filter
try:
j_e.from_string(file((input_file)).read()).render(func=lambda: None)
except (UndefinedError, RepresenterError), ex:
pass
except TypeError, ex:
if ex.message != 'Undefined is not JSON serializable':
raise Exception(ex.message)
pass
print('{}: ok'.format(input_file))
...@@ -7,7 +7,7 @@ import logging ...@@ -7,7 +7,7 @@ import logging
import sys import sys
import docker_images import docker_images
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR") TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR", ".")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml") CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
......
...@@ -45,7 +45,7 @@ ANSIBLE_DIR="/tmp/ansible" ...@@ -45,7 +45,7 @@ ANSIBLE_DIR="/tmp/ansible"
CONFIGURATION_DIR="/tmp/configuration" CONFIGURATION_DIR="/tmp/configuration"
EDX_PPA="deb http://ppa.edx.org precise main" EDX_PPA="deb http://ppa.edx.org precise main"
EDX_PPA_KEY_SERVER="hkp://pgp.mit.edu:80" EDX_PPA_KEY_SERVER="hkp://pgp.mit.edu:80"
EDX_PPA_KEY_ID="69464050" EDX_PPA_KEY_ID="B41E5E3969464050"
cat << EOF cat << EOF
****************************************************************************** ******************************************************************************
...@@ -116,7 +116,8 @@ fi ...@@ -116,7 +116,8 @@ fi
# which may differ from what is pinned in virtualenvironments # which may differ from what is pinned in virtualenvironments
apt-get update -y apt-get update -y
apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev openssl-dev
# Workaround for a 16.04 bug, need to upgrade to latest and then # Workaround for a 16.04 bug, need to upgrade to latest and then
# potentially downgrade to the preferred version. # potentially downgrade to the preferred version.
......
...@@ -302,14 +302,129 @@ export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED ...@@ -302,14 +302,129 @@ export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM
export HIPCHAT_MSG_COLOR DATADOG_API_KEY export HIPCHAT_MSG_COLOR DATADOG_API_KEY
if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
echo "Installing pkg dependencies" #################################### Lifted from ansible-bootstrap.sh
/usr/bin/apt-get update if [[ -z "$ANSIBLE_REPO" ]]; then
/usr/bin/apt-get install -y git python-pip python-apt \\ ANSIBLE_REPO="https://github.com/edx/ansible.git"
git-core build-essential python-dev libxml2-dev \\ fi
libxslt-dev curl libmysqlclient-dev --force-yes
if [[ -z "$ANSIBLE_VERSION" ]]; then
ANSIBLE_VERSION="master"
fi
if [[ -z "$CONFIGURATION_REPO" ]]; then
CONFIGURATION_REPO="https://github.com/edx/configuration.git"
fi
if [[ -z "$CONFIGURATION_VERSION" ]]; then
CONFIGURATION_VERSION="master"
fi
if [[ -z "$UPGRADE_OS" ]]; then
UPGRADE_OS=false
fi
#
# Bootstrapping constants
#
VIRTUAL_ENV_VERSION="15.0.2"
PIP_VERSION="8.1.2"
SETUPTOOLS_VERSION="24.0.3"
EDX_PPA="deb http://ppa.edx.org precise main"
EDX_PPA_KEY_SERVER="hkp://pgp.mit.edu:80"
EDX_PPA_KEY_ID="B41E5E3969464050"
cat << EOF
******************************************************************************
Running the abbey with the following arguments:
ANSIBLE_REPO="$ANSIBLE_REPO"
ANSIBLE_VERSION="$ANSIBLE_VERSION"
CONFIGURATION_REPO="$CONFIGURATION_REPO"
CONFIGURATION_VERSION="$CONFIGURATION_VERSION"
******************************************************************************
EOF
if [[ $(id -u) -ne 0 ]] ;then
echo "Please run as root";
exit 1;
fi
if grep -q 'Precise Pangolin' /etc/os-release
then
SHORT_DIST="precise"
elif grep -q 'Trusty Tahr' /etc/os-release
then
SHORT_DIST="trusty"
elif grep -q 'Xenial Xerus' /etc/os-release
then
SHORT_DIST="xenial"
else
cat << EOF
This script is only known to work on Ubuntu Precise, Trusty and Xenial,
exiting. If you are interested in helping make installation possible
on other platforms, let us know.
EOF
exit 1;
fi fi
EDX_PPA="deb http://ppa.edx.org $SHORT_DIST main"
# Upgrade the OS
apt-get update -y
apt-key update -y
if [ "$UPGRADE_OS" = true ]; then
echo "Upgrading the OS..."
apt-get upgrade -y
fi
# Required for add-apt-repository
apt-get install -y software-properties-common python-software-properties
# Add git PPA
add-apt-repository -y ppa:git-core/ppa
# For older distributions we need to install a PPA for Python 2.7.10
if [[ "precise" = "$SHORT_DIST" || "trusty" = "$SHORT_DIST" ]]; then
# Add python PPA
apt-key adv --keyserver "$EDX_PPA_KEY_SERVER" --recv-keys "$EDX_PPA_KEY_ID"
add-apt-repository -y "$EDX_PPA"
fi
# Install python 2.7 latest, git and other common requirements
# NOTE: This will install the latest version of python 2.7 and
# which may differ from what is pinned in virtualenvironments
apt-get update -y
apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev
# Workaround for a 16.04 bug, need to upgrade to latest and then
# potentially downgrade to the preferred version.
# https://github.com/pypa/pip/issues/3862
if [[ "xenial" = "$SHORT_DIST" ]]; then
pip install --upgrade pip
pip install --upgrade pip=="$PIP_VERSION"
else
pip install --upgrade pip=="$PIP_VERSION"
fi
# pip moves to /usr/local/bin when upgraded
hash -r #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
PATH=/usr/local/bin:$PATH
pip install setuptools=="$SETUPTOOLS_VERSION"
pip install virtualenv=="$VIRTUAL_ENV_VERSION"
##################### END Lifted from ansible-bootstrap.sh
# python3 is required for certain other things # python3 is required for certain other things
# (currently xqwatcher so it can run python2 and 3 grader code, # (currently xqwatcher so it can run python2 and 3 grader code,
# but potentially more in the future). It's not available on Ubuntu 12.04, # but potentially more in the future). It's not available on Ubuntu 12.04,
...@@ -324,15 +439,6 @@ fi ...@@ -324,15 +439,6 @@ fi
# only runs on a build from scratch # only runs on a build from scratch
/usr/bin/apt-get install -y python-httplib2 --force-yes /usr/bin/apt-get install -y python-httplib2 --force-yes
# Must upgrade to latest before pinning to work around bug
# https://github.com/pypa/pip/issues/3862
pip install --upgrade pip
hash -r #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
pip install --upgrade pip==8.1.2
# upgrade setuptools early to avoid no distribution errors
pip install --upgrade setuptools==24.0.3
rm -rf $base_dir rm -rf $base_dir
mkdir -p $base_dir mkdir -p $base_dir
cd $base_dir cd $base_dir
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment