Commit 848fd291 by Filippo Panessa

Merge conflict

parents 11ad0661 911ce6a3
......@@ -4,6 +4,6 @@ Configuration Pull Request
Make sure that the following steps are done before merging
- [ ] @devops team member has commented with :+1:
- [ ] are you adding any new default values that need to be overriden when this goes live?
- [ ] are you adding any new default values that need to be overridden when this goes live?
- [ ] Open a ticket (DEVOPS) to make sure that they have been added to secure vars.
- [ ] Add an entry to the CHANGELOG.
# Travis CI configuration file for running tests
language: python
python:
- "2.7"
branches:
only:
- master
python:
- "2.7"
- master
services:
- docker
......
......@@ -201,3 +201,10 @@
- Changed MONGO_STORAGE_ENGINE to default to wiredTiger which is the default in 3.2 and 3.4 and what edX suggests be used even on 3.0.
If you have a mmapv1 3.0 install, override MONGO_STORAGE_ENGINE to be mmapv1 which was the old default.
- Ready for deploying Mongo 3.2
- Role: xqueue
- Added `EDXAPP_CELERY_BROKER_USE_SSL` to allow configuring celery to use TLS.
- Role: edxapp
- Added `XQUEUE_RABBITMQ_VHOST` to allow configuring the xqueue RabbitMQ host.
- Added `XQUEUE_RABBITMQ_PORT` and `XQUEUE_RABBITMQ_TLS` to allow configuring the RabbitMQ port, and enabling TLS respectively.
......@@ -26,7 +26,9 @@ test: docker.test
pkg: docker.pkg
clean:
clean: docker.clean
docker.clean:
rm -rf .build
docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))
......
FROM edxops/precise-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
RUN apt-get update
......
......@@ -19,3 +19,6 @@ ANALYTICS_API_DATABASES:
PASSWORD: 'password'
HOST: "db.{{ DOCKER_TLD }}"
PORT: '3306'
# Change this if you want to build a specific version of the ANALYTICS_API
ANALYTICS_API_VERSION: 'master'
......@@ -7,7 +7,7 @@
# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible
# with the currently checked-out configuration repo.
FROM edxops/trusty-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
ENV DISCOVERY_VERSION=master
......
# To build this Dockerfile:
#
# From the root of configuration:
#
# docker build -f docker/build/docker-tools/Dockerfile .
#
# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible
# with the currently checked-out configuration repo.
FROM edxops/xenial-common:latest
MAINTAINER edxops
ENV PROGRAMS_VERSION=master
ENV REPO_OWNER=edx
ADD . /edx/app/edx_ansible/edx_ansible
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
COPY docker/build/docker-tools/ansible_overrides.yml /
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook docker-tools.yml \
-c local -i '127.0.0.1,' \
-t 'install'
RUN which docker
RUN which docker-compose
......@@ -42,6 +42,9 @@ RUN apt-get update && apt-get install -y \
php5-common \
php5-cli
# Install dependencies needed for Ansible 2.x
RUN apt-get update && apt-get install -y libffi-dev libssl-dev
# Install drush (drupal shell) for access to Drupal commands/Acquia
RUN php -r "readfile('http://files.drush.org/drush.phar');" > drush && \
chmod +x drush && \
......@@ -56,7 +59,19 @@ RUN /bin/bash /tmp/docker/docker_install.sh
RUN usermod -aG docker go
# Assign the go user root privlidges
RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip\n" >> /etc/sudoers
RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip, /usr/local/bin/pip\n" >> /etc/sudoers
# Upgrade pip and setup tools. Needed for Ansible 2.x
# Must upgrade to latest before pinning to work around bug
# https://github.com/pypa/pip/issues/3862
RUN \
pip install --upgrade pip && \
#pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
hash -r && \
pip install --upgrade pip==8.1.2 && \
# upgrade setuptools early to avoid no distribution errors
pip install --upgrade setuptools==24.0.3
# Install AWS command-line interface - for AWS operations in a go-agent task.
RUN pip install awscli
......
......@@ -29,6 +29,11 @@ necessary.
##Building and Uploading the container to ECS
* Copy the go-agent GitHub private key to this path:
- ```docker/build/go-agent/files/go_github_key.pem```
- A dummy key is in the repo file.
- The actual private key is kept in LastPass - see DevOps for access.
- WARNING: Do *NOT* commit/push the real private key to the public configuration repo!
* Create image
- This must be run from the root of the configuration repository
- ```docker build -f docker/build/go-agent/Dockerfile .```
......@@ -36,9 +41,10 @@ necessary.
- ```make docker.test.go-agent```
* Log docker in to AWS
- ```sh -c `aws ecr get-login --region us-east-1` ```
- You might need to remove the `-e` option returned by that command in order to successfully login.
* Tag image
- ```docker tag -f <image_id> ############.dkr.ecr.us-east-1.amazonaws.com/release-pipeline:latest```
- ```docker tag -f <image_id> ############.dkr.ecr.us-east-1.amazonaws.com/release-pipeline:<version_number>```
- ```docker tag <image_id> ############.dkr.ecr.us-east-1.amazonaws.com/prod-tools-goagent:latest```
- ```docker tag <image_id> ############.dkr.ecr.us-east-1.amazonaws.com/prod-tools-goagent:<version_number>```
* upload:
- ```docker push ############.dkr.ecr.us-east-1.amazonaws.com/edx/release-pipeline/go-agent/python:latest```
- ```docker push ############.dkr.ecr.us-east-1.amazonaws.com/edx/release-pipeline/go-agent/python:<version_number>```
\ No newline at end of file
- ```docker push ############.dkr.ecr.us-east-1.amazonaws.com/edx/release-pipeline/prod-tools-goagent:latest```
- ```docker push ############.dkr.ecr.us-east-1.amazonaws.com/edx/release-pipeline/prod-tools-goagent:<version_number>```
\ No newline at end of file
FROM edxops/precise-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
......
......@@ -4,7 +4,7 @@ DOCKER_TLD: "edx"
# In addition, on systemd systems, and newer rsyslogd
# there may be issues with /dev/log existing
# http://www.projectatomic.io/blog/2014/09/running-syslog-within-a-docker-container/
PROGRAMS_DJANGO_SETTINGS_MODULE: programs.settings.local
PROGRAMS_DJANGO_SETTINGS_MODULE: programs.settings.devstack
PROGRAMS_DATABASES:
# rw user
default:
......
......@@ -2,8 +2,6 @@
DOCKER_TLD: "xqueue"
CONFIGURATION_REPO: "https://github.com/edx/configuration.git"
CONFIGURATION_VERSION: "hack2015/docker"
XQUEUE_SYSLOG_SERVER: "localhost"
XQUEUE_RABBITMQ_HOSTNAME: "rabbit.{{ DOCKER_TLD }}"
XQUEUE_MYSQL_HOST: "db.{{ DOCKER_TLD }}"
- name: build a VM with docker-tools
hosts: all
sudo: True
gather_facts: True
roles:
- docker
- docker-tools
......@@ -9,9 +9,10 @@ try:
import hipchat
except ImportError:
hipchat = None
from ansible.plugins.callback import CallbackBase
class CallbackModule(object):
class CallbackModule(CallbackBase):
"""Send status updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
......
......@@ -28,9 +28,10 @@ except ImportError:
else:
import boto.sqs
from boto.exception import NoAuthHandlerFound
from ansible.plugins.callback import CallbackBase
class CallbackModule(object):
class CallbackModule(CallbackBase):
"""
This Ansible callback plugin sends task events
to SQS.
......
......@@ -238,7 +238,7 @@ class CallbackModule(CallbackBase):
Record the start of a play.
"""
self.playbook_name, _ = splitext(
basename(self.play.playbook.filename)
basename(self.play.get_name())
)
self.playbook_timestamp = Timestamp()
......
......@@ -12,3 +12,4 @@ ansible_managed=This file is created and updated by ansible, edit at your peril
[ssh_connection]
ssh_args=-o ControlMaster=auto -o ControlPersist=60s -o ControlPath="~/.ansible/tmp/ansible-ssh-%h-%p-%r" -o ServerAliveInterval=30
retries=5
\ No newline at end of file
......@@ -13,11 +13,13 @@
# - APPLICATION_NAME - The name of the application that we are migrating.
# - APPLICATION_USER - user which is meant to run the application
# - ARTIFACT_PATH - the path where the migration artifacts should be copied after completion
# - DB_MIGRATION_USER - the database username
# - DB_MIGRATION_PASS - the database password
#
# Other variables:
# - HIPCHAT_TOKEN - API token to send messages to hipchat
# - HIPCHAT_ROOM - ID or name of the room to send the notification
# - HIPCHAT_URL - URL of the hipchat API (defaults to v1 of the api)
#
# Other variables:
# - migration_plan - the filename where the unapplied migration YAML output is stored
# - migration_result - the filename where the migration output is saved
# - SUB_APPLICATION_NAME - used for migrations in edxapp {lms|cms}, must be specified
......@@ -59,7 +61,7 @@
shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations --database "{{ item }}" --output_file "{{ temp_output_dir.stdout }}/{{ item }}_{{ migration_plan }}" --settings "{{ EDX_PLATFORM_SETTINGS }}"'
become_user: "{{ APPLICATION_USER }}"
when: APPLICATION_NAME == "edxapp" and item != "read_replica"
with_items: edxapp_databases.keys()
with_items: "{{ edxapp_databases.keys() }}"
- name: migrate to apply any unapplied migrations
shell: '{{ COMMAND_PREFIX }} run_migrations --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"'
......@@ -70,7 +72,7 @@
shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} run_migrations --database "{{ item }}" --settings "{{ EDX_PLATFORM_SETTINGS }}" --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"'
become_user: "{{ APPLICATION_USER }}"
when: APPLICATION_NAME == "edxapp" and item != "read_replica"
with_items: edxapp_databases.keys()
with_items: "{{ edxapp_databases.keys() }}"
- name: List all migration files
action: "command ls -1 {{ temp_output_dir.stdout }}"
......
......@@ -13,25 +13,27 @@
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
- fail:
msg: "You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
- fail:
msg: "public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
copy:
content: "{{ ubuntu_public_keys[public_key] }}"
dest: "{{ mktemp.stdout }}"
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file:
path: "{{ mktemp.stdout }}"
state: absent
- lineinfile:
dest: "{{ keyfile }}"
line: "{{ ubuntu_public_keys[public_key] }}"
- file:
path: "{{ keyfile }}"
owner: "{{ owner }}"
mode: 0600
......@@ -14,7 +14,8 @@
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -26,8 +27,9 @@
become: False
when: elb_pre_post
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- debug:
var: "{{ ansible_ec2_local_ipv4 }}"
with_items: "{{ list.results }}"
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }}
when: hostvars.keys()[0] != ansible_ec2_local_ipv4
......@@ -39,10 +41,9 @@
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -47,11 +47,10 @@
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
#
......
......@@ -13,9 +13,9 @@
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
lineinfile:
dest: /edx/app/edx_ansible/server-vars.yml
line: "{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
......@@ -24,9 +24,9 @@
tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
lineinfile:
dest: /edx/app/edx_ansible/server-vars.yml
line: "{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
......@@ -35,6 +35,5 @@
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform {{ edxapp_version }}
shell: "/edx/bin/update edx-platform {{ edxapp_version }}"
tags: update_edxapp_mysql_host
......@@ -53,27 +53,27 @@
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.name}}
state={{ item.state }}
encoding={{ item.encoding }}
login_host={{ item.login_host }}
login_user={{ item.login_user }}
login_password={{ item.login_password }}
with_items: databases
mysql_db:
db: "{{ item.name}}"
state: "{{ item.state }}"
encoding: "{{ item.encoding }}"
login_host: "{{ item.login_host }}"
login_user: "{{ item.login_user }}"
login_password: "{{ item.login_password }}"
with_items: "{{ databases }}"
tags:
- dbs
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.name }}"
priv="{{ '/'.join(item.privileges) }}"
password="{{ item.password }}"
host={{ item.host }}
login_host={{ item.login_host }}
login_user={{ item.login_user }}
login_password={{ item.login_password }}
append_privs=yes
with_items: database_users
mysql_user:
name: "{{ item.name }}"
priv: "{{ '/'.join(item.privileges) }}"
password: "{{ item.password }}"
host: "{{ item.host }}"
login_host: "{{ item.login_host }}"
login_user: "{{ item.login_user }}"
login_password: "{{ item.login_password }}"
append_privs: yes
with_items: "{{ database_users }}"
tags:
- users
......@@ -41,4 +41,4 @@
roles: "{{ item.roles }}"
state: present
replica_set: "{{ repl_set }}"
with_items: MONGO_USERS
with_items: "{{ MONGO_USERS }}"
......@@ -21,7 +21,14 @@
dns_zone: sandbox.edx.org
name_tag: sandbox-temp
elb: false
vpc_subnet_id: subnet-cd867aba
ec2_vpc_subnet_id: subnet-cd867aba
instance_userdata: |
#!/bin/bash
set -x
set -e
export RUN_ANSIBLE=false;
wget https://raw.githubusercontent.com/edx/configuration/{{ configuration_version }}/util/install/ansible-bootstrap.sh -O - | bash;
launch_wait_time: 5
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
......@@ -34,23 +41,27 @@
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
vpc_subnet_id: "{{ ec2_vpc_subnet_id }}"
assign_public_ip: yes
terminate_instance: true
instance_profile_name: sandbox
user_data: "{{ instance_userdata }}"
launch_ec2_wait_time: "{{ launch_wait_time }}"
- name: Configure instance(s)
hosts: launched
become: True
gather_facts: True
gather_facts: False
vars:
elb: false
elb: False
pre_tasks:
- name: Wait for cloud-init to finish
wait_for: >
path=/var/log/cloud-init.log
timeout=15
search_regex="final-message"
wait_for:
path: /var/log/cloud-init.log
timeout: 15
search_regex: "final-message"
- name: gather_facts
setup: ""
vars_files:
- roles/edxapp/defaults/main.yml
- roles/xqueue/defaults/main.yml
......
---
- name: Build service RDS instances
hosts: all
connection: local
# Needed for timestamps
gather_facts: True
roles:
- edx_service_rds
---
# Sample command: ansible-playbook -c local -i localhost, edx_vpc.yml -e@/Users/feanil/src/edx-secure/cloud_migrations/vpcs/test.yml -vvv
- name: Create a simple empty vpc
hosts: all
connection: local
gather_facts: False
vars:
vpc_state: present
roles:
- edx_vpc
......@@ -8,9 +8,9 @@
- edxapp
tasks:
- name: migrate lms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws
shell: "python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......@@ -21,9 +21,9 @@
tags:
- always
- name: migrate cms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws
shell: "python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
......@@ -12,7 +12,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -29,16 +30,16 @@
- oraclejdk
- elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -14,11 +14,11 @@
- name: stop certs service
service: name="certificates" state="stopped"
- name: checkout code
git_2_0_1: >
repo="{{ repo_url }}"
dest="{{ repo_path }}"
version="{{ certificates_version }}"
accept_hostkey=yes
git:
repo: "{{ repo_url }}"
dest: "{{ repo_path }}"
version: "{{ certificates_version }}"
accept_hostkey: yes
environment:
GIT_SSH: "{{ git_ssh_script }}"
- name: install requirements
......@@ -29,11 +29,11 @@
# Need to do this because the www-data user is not properly setup
# and can't run ssh.
- name: change owner to www-data
file: >
path="{{ repo_path }}"
owner="www-data"
group="www-data"
recurse=yes
state="directory"
file:
path: "{{ repo_path }}"
owner: "www-data"
group: "www-data"
recurse: yes
state: "directory"
- name: start certs service
service: name="certificates" state="started"
......@@ -79,6 +79,8 @@
manage_path: /edx/bin/manage.edxapp
ignore_user_creation_errors: no
deployment_settings: "{{ EDXAPP_SETTINGS | default('aws') }}"
vars_files:
- roles/common_vars/defaults/main.yml
tasks:
- name: Manage groups
shell: >
......@@ -86,7 +88,9 @@
manage_group {{ item.name | quote }}
{% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %}
{% if item.get('remove') %}--remove{% endif %}
with_items: django_groups
with_items: "{{ django_groups }}"
become: true
become_user: "{{ common_web_user }}"
- name: Manage users
shell: >
......@@ -98,6 +102,8 @@
{% if item.get('staff') %}--staff{% endif %}
{% if item.get('unusable_password') %}--unusable-password{% endif %}
{% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %}
with_items: django_users
with_items: "{{ django_users }}"
register: manage_users_result
failed_when: (manage_users_result | failed) and not (ignore_user_creation_errors | bool)
become: true
become_user: "{{ common_web_user }}"
......@@ -72,7 +72,7 @@
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
with_items: "{{ mongodb_debian_pkgs }}"
- name: wait for mongo server to start
wait_for:
port: 27017
......
......@@ -48,7 +48,7 @@
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
with_items: "{{ mongodb_debian_pkgs }}"
- name: wait for mongo server to start
wait_for:
port: 27017
......
......@@ -9,5 +9,6 @@
- "roles/ecommerce/defaults/main.yml"
- "roles/programs/defaults/main.yml"
- "roles/credentials/defaults/main.yml"
- "roles/discovery/defaults/main.yml"
roles:
- oauth_client_setup
......@@ -46,9 +46,7 @@
dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}"
register: xblock_config_file
- name: Manage xblock configurations
shell: >
{{ python_path }} {{ manage_path }} lms --settings=aws
populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}
shell: "{{ python_path }} {{ manage_path }} lms --settings=aws populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}"
register: command_result
changed_when: "'Import complete, 0 new entries created' not in command_result.stdout"
- debug: msg="{{ command_result.stdout }}"
......
......@@ -17,7 +17,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -32,16 +33,16 @@
- aws
- rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -17,22 +17,21 @@
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
- shell: "grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}"
- shell: "while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}"
args:
executable: /bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- file:
path: "{{ keyfile }}"
owner: "{{ owner }}"
mode: 0600
- file:
path: "{{ mktemp.stdout }}"
state: absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
......
......@@ -7,6 +7,6 @@
- roles/supervisor/defaults/main.yml
tasks:
- name: supervisor | restart supervisor
service: >
name={{ supervisor_service }}
state=restarted
service:
name: "{{ supervisor_service }}"
state: restarted
......@@ -12,8 +12,8 @@
- name: Set hostname
hostname: name={{ hostname_fqdn.split('.')[0] }}
- name: Update /etc/hosts
lineinfile: >
dest=/etc/hosts
regexp="^127\.0\.1\.1"
line="127.0.1.1{{'\t'}}{{ hostname_fqdn.split('.')[0] }}{{'\t'}}{{ hostname_fqdn }}{{'\t'}}localhost"
state=present
lineinfile:
dest: /etc/hosts
regexp: "^127\\.0\\.1\\.1"
line: "127.0.1.1{{ '\t' }}{{ hostname_fqdn.split('.')[0] }}{{ '\t' }}{{ hostname_fqdn }}{{ '\t' }}localhost"
state: present
......@@ -11,7 +11,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -25,16 +26,16 @@
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -14,7 +14,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -38,16 +39,16 @@
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -40,8 +40,6 @@
- role: mongo
when: "'localhost' in EDXAPP_MONGO_HOSTS"
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- role: aws
when: EDXAPP_SETTINGS == 'aws'
- { role: 'edxapp', celery_worker: True }
- edxapp
- role: ecommerce
......
......@@ -96,22 +96,10 @@ def main():
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None),
tags=dict(default=None, type='dict'),
)
)
tags_param = module.params.get('tags')
tags = {}
if isinstance(tags_param, list):
for item in module.params.get('tags'):
for k,v in item.iteritems():
tags[k] = v
elif isinstance(tags_param, dict):
tags = tags_param
else:
module.fail_json(msg="Invalid format for tags")
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
......@@ -137,7 +125,7 @@ def main():
instances = []
instance_ids = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
for tag, value in module.params.get('tags').iteritems()}):
for inst in res.instances:
if inst.state == "running":
instances.append({k: v for k, v in inst.__dict__.iteritems()
......
......@@ -66,7 +66,7 @@ tasks:
- name: tag my launched instances
local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
args:
tags:
Name: webserver
......@@ -76,7 +76,7 @@ tasks:
tasks:
- name: tag my instance
local_action: ec2_ntag resource={{ item.id }} region=us-east-1 state=present
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
args:
tags:
- Name: "{{ some_variable }}"
......@@ -101,7 +101,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(),
tags = dict(required=False, type='list'),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
......
......@@ -24,7 +24,7 @@
apt:
name: "{{ item }}"
state: present
with_items: ad_hoc_reporting_debian_pkgs
with_items: "{{ ad_hoc_reporting_debian_pkgs }}"
tags:
- install:system-requirements
......@@ -58,7 +58,7 @@
name: "{{ item }}"
state: present
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: ad_hoc_reporting_pip_pkgs
with_items: "{{ ad_hoc_reporting_pip_pkgs }}"
tags:
- install:app-requirements
......@@ -92,7 +92,7 @@
- scripts
- scripts:mysql
- install:code
with_items: AD_HOC_REPORTING_REPLICA_DB_HOSTS
with_items: "{{ AD_HOC_REPORTING_REPLICA_DB_HOSTS }}"
# These templates rely on there being a global
# read_only mongo user, you must override the default
......
......@@ -27,3 +27,6 @@
##
# Defaults for role add_user
#
#
#
dirs: []
......@@ -65,8 +65,7 @@
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode | default('0755') }}"
with_items: dirs
when: dirs is defined
with_items: "{{ dirs }}"
tags:
- install
- install:base
......@@ -12,7 +12,7 @@
notify: restart alton
- name: Checkout the code
git_2_0_1:
git:
dest: "{{ alton_code_dir }}"
repo: "{{ alton_source_repo }}"
version: "{{ alton_version }}"
......
......@@ -33,42 +33,40 @@
#
- name: setup the analytics_api env file
template: >
src="edx/app/analytics_api/analytics_api_env.j2"
dest="{{ analytics_api_home }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
template:
src: "edx/app/analytics_api/analytics_api_env.j2"
dest: "{{ analytics_api_home }}/analytics_api_env"
owner: "{{ analytics_api_user }}"
group: "{{ analytics_api_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: "add gunicorn configuration file"
template: >
src=edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest={{ analytics_api_home }}/analytics_api_gunicorn.py
template:
src: edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest: "{{ analytics_api_home }}/analytics_api_gunicorn.py"
become_user: "{{ analytics_api_user }}"
tags:
- install
- install:configuration
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state=present
pip:
requirements: "{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state: present
become_user: "{{ analytics_api_user }}"
with_items: analytics_api_requirements
with_items: "{{ analytics_api_requirements }}"
tags:
- install
- install:app-requirements
- name: migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}'
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput
shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......@@ -77,9 +75,9 @@
- migrate:db
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput
shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
tags:
......@@ -87,40 +85,44 @@
- assets:gather
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}
shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
with_dict: "{{ ANALYTICS_API_USERS }}"
tags:
- manage
- manage:app-users
- name: write out the supervisor wrapper
template: >
src=edx/app/analytics_api/analytics_api.sh.j2
dest={{ analytics_api_home }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
template:
src: edx/app/analytics_api/analytics_api.sh.j2
dest: "{{ analytics_api_home }}/{{ analytics_api_service_name }}.sh"
mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags:
- install
- install:configuration
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
file:
src: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
dest: "{{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
......@@ -134,10 +136,10 @@
- manage:start
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link
file:
src: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state: link
with_items:
- python
- pip
......@@ -147,10 +149,10 @@
- install:base
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link
file:
src: "{{ analytics_api_code_dir }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state: link
with_items:
- manage.py
tags:
......@@ -158,11 +160,11 @@
- install:base
- name: restart analytics_api
supervisorctl: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name={{ analytics_api_service_name }}
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
name: "{{ analytics_api_service_name }}"
when: not disable_edx_services
become_user: "{{ supervisor_service_user }}"
tags:
......
......@@ -11,12 +11,17 @@
# Defaults for role analytics_pipeline
#
ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER: pipeline001
ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD: password
ANALYTICS_PIPELINE_OUTPUT_DATABASE_HOST: localhost
ANALYTICS_PIPELINE_OUTPUT_DATABASE_PORT: 3306
ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME: "{{ ANALYTICS_API_REPORTS_DB_NAME }}"
ANALYTICS_PIPELINE_OUTPUT_DATABASE:
username: pipeline001
password: password
host: localhost
port: 3306
username: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER }}"
password: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD }}"
host: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_HOST }}"
port: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PORT }}"
ANALYTICS_PIPELINE_INPUT_DATABASE:
username: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
......
......@@ -89,7 +89,7 @@
- install:configuration
- name: Util library source checked out
git_2_0_1:
git:
repo: "{{ analytics_pipeline_util_library.repo }}"
dest: "{{ analytics_pipeline_util_library.path }}"
version: "{{ analytics_pipeline_util_library.version }}"
......
......@@ -3,13 +3,13 @@
#
# Tasks for role {{ role_name }}
#
#
# Overview:
#
#
#
# Dependencies:
#
#
#
# Example play:
#
#
......@@ -149,7 +149,7 @@
tags:
- install
- install:app-requirements
- name: run collectstatic
command: make static
args:
......@@ -161,7 +161,7 @@
- assets:gather
- name: restart the application
supervisorctl:
supervisorctl:
state: restarted
supervisorctl_path: "{{ '{{' }} supervisor_ctl }}"
config: "{{ '{{' }} supervisor_cfg }}"
......@@ -173,20 +173,24 @@
- manage:start
- name: Copying nginx configs for {{ role_name }}
template: >
src=edx/app/nginx/sites-available/{{ role_name }}.j2
dest={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}
owner=root group={{ '{{' }} common_web_user }} mode=0640
template:
src: "edx/app/nginx/sites-available/{{ role_name }}.j2"
dest: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
owner: root
group: "{{ '{{' }} common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for {{ role_name }}
file: >
src={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}
dest={{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}
state=link owner=root group=root
file:
src: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
dest: "{{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
......
......@@ -23,41 +23,41 @@
- name: install antivirus system packages
apt: pkg={{ item }} install_recommends=yes state=present
with_items: antivirus_debian_pkgs
with_items: "{{ antivirus_debian_pkgs }}"
- name: create antivirus scanner user
user: >
name="{{ antivirus_user }}"
home="{{ antivirus_app_dir }}"
createhome=no
shell=/bin/false
user:
name: "{{ antivirus_user }}"
home: "{{ antivirus_app_dir }}"
createhome: no
shell: /bin/false
- name: create antivirus app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ antivirus_user }}"
group="{{ antivirus_user }}"
file:
path: "{{ item }}"
state: directory
owner: "{{ antivirus_user }}"
group: "{{ antivirus_user }}"
with_items:
- "{{ antivirus_app_dir }}"
- "{{ antivirus_app_dir }}/data"
- name: install antivirus s3 scanner script
template: >
src=s3_bucket_virus_scan.sh.j2
dest={{ antivirus_app_dir }}/s3_bucket_virus_scan.sh
mode=0555
owner={{ antivirus_user }}
group={{ antivirus_user }}
template:
src: s3_bucket_virus_scan.sh.j2
dest: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh"
mode: "0555"
owner: "{{ antivirus_user }}"
group: "{{ antivirus_user }}"
- name: install antivirus s3 scanner cronjob
cron: >
name="antivirus-{{ item }}"
job="{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup=yes
cron_file=antivirus-{{ item }}
user={{ antivirus_user }}
hour="*"
minute="0"
day="*"
with_items: ANTIVIRUS_BUCKETS
cron:
name: "antivirus-{{ item }}"
job: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup: yes
cron_file: "antivirus-{{ item }}"
user: "{{ antivirus_user }}"
hour: "*"
minute: "0"
day: "*"
with_items: "{{ ANTIVIRUS_BUCKETS }}"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role asqatasun
#
ASQATASUN_LOCALE: 'en_US.UTF-8'
ASQATASUN_DATABASE_NAME: 'asqatasun'
ASQATASUN_DATABASE_USER: 'asqatasun'
ASQATASUN_DATABASE_PASSWORD: 'changeme'
ASQATASUN_DATABASE_HOST: 'localhost'
ASQATASUN_DATABASE_ENCODING: 'utf8'
ASQATASUN_DATABASE_COLLATION: 'utf8_general_ci'
ASQATASUN_URL: 'http://localhost:8080/asqatasun/'
ASQATASUN_ADMIN_EMAIL: 'admin@example.com'
ASQATASUN_ADMIN_PASSWORD: 'changeme'
asqatasun_debian_pkgs:
- wget
- bzip2
- openjdk-7-jre
- unzip
- mysql-server
- libmysql-java
- python-mysqldb
- libtomcat7-java
- tomcat7
- libspring-instrument-java
- xvfb
- libdbus-glib-1-2
- mailutils
- postfix
locale: "{{ ASQATASUN_LOCALE }}"
asqatasun_download_link: "http://download.asqatasun.org/asqatasun-latest.tar.gz"
# Asqatasun version that you want to install, get the full list of releases
#by clicking in the release tab of the github main interface.
asqatasun_version: "asqatasun-4.0.0-rc.1"
# Go this link to find your desired ESR Firefox
# For 32-bit architecture
# http://download-origin.cdn.mozilla.net/pub/firefox/releases/31.4.0esr/linux-i686/
# For 64-bit architecture
# http://download-origin.cdn.mozilla.net/pub/firefox/releases/31.4.0esr/linux-x86_64/
# Default is en-US in our example
fixfox_esr_link: "http://download-origin.cdn.mozilla.net/pub/firefox/releases/31.4.0esr/linux-x86_64/en-US/firefox-31.4.0esr.tar.bz2"
# MySQL variables for Asqatasun
default_character_set: "utf8"
collation_server: "utf8_general_ci"
init_connect: "SET NAMES utf8"
character_set_server: "utf8"
mysql_max_allowed_packet: "64M"
asqatasun_parameters:
db_name: "{{ ASQATASUN_DATABASE_NAME }}"
db_user: "{{ ASQATASUN_DATABASE_USER }}"
db_password: "{{ ASQATASUN_DATABASE_PASSWORD }}"
db_host: "{{ ASQATASUN_DATABASE_HOST }}"
db_encoding: "{{ ASQATASUN_DATABASE_ENCODING }}"
db_collation: "{{ ASQATASUN_DATABASE_COLLATION }}"
url: "{{ ASQATASUN_URL }}"
admin_email: "{{ ASQATASUN_ADMIN_EMAIL }}"
admin_passwd: "{{ ASQATASUN_ADMIN_PASSWORD }}"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role asqatasun
#
# Overview:
#
# Install the Asqatasun, an opensource web site analyzer,
# used for web accessibility (a11y) and Search Engine Optimization (SEO)
#
# ansible-playbook -i 'asqatasun.example.com,' ./asqatasun.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- name: Set Postfix options
debconf:
name: postifx
question: "{{ item.question }}"
value: "{{ item.value }} "
vtype: "string"
with_items:
- { question: "postfix/mailname", value: " " }
- { question: "postfix/main_mailer_type", value: "Satellite system" }
tags:
- install
- install:configuration
- name: Update locale Setting
locale_gen:
name: "{{ locale }}"
state: present
register: set_locale
tags:
- install
- install:base
- name: Reconfigure locale
command: dpkg-reconfigure locales
when: set_locale.changed
- name: Install the Asqatasun Prerequisites
apt:
name: "{{ item }}"
update_cache: yes
state: installed
with_items: asqatasun_debian_pkgs
tags:
- install
- install:base
- name: Copy the asqatasun.cnf template to /etc/mysql/conf.d
template:
dest: /etc/mysql/conf.d/asqatasun.cnf
src: etc/mysql/conf.d/asqatasun.cnf.j2
owner: root
group: root
when: "'{{ asqatasun_parameters.db_host }}' == 'localhost'"
register: my_cnf
tags:
- install
- install:configuration
- name: Restart MySQL
service:
name: mysql
state: restarted
when: my_cnf.changed
- name: Create a soft link for tomcat jar and mysql connector
file:
dest: "{{ item.dest }}"
src: "{{ item.src }}"
state: link
with_items:
- { src: '/usr/share/java/spring3-instrument-tomcat.jar', dest: '/usr/share/tomcat7/lib/spring3-instrument-tomcat.jar' }
- { src: '/usr/share/java/mysql-connector-java.jar', dest: '/usr/share/tomcat7/lib/mysql-connector-java.jar'}
tags:
- install
- install:configuration
- name: Copy the xvfb template to /etc/init.d
template:
dest: /etc/init.d/xvfb
src: etc/init.d/xvfb.j2
owner: root
group: root
mode: 755
register: xvfb
tags:
- install
- install:config
- name: Restart xvfb
service:
name: xvfb
pattern: /etc/init.d/xvfb
state: restarted
enabled: yes
when: xvfb.changed
tags:
- install
- install:config
- name: Download the latest ESR Firfox
get_url:
url: "{{ fixfox_esr_link }}"
dest: "/tmp/{{ fixfox_esr_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded Firfox zipped file
unarchive:
src: "/tmp/{{ fixfox_esr_link | basename }}"
dest: /opt
copy: no
tags:
- install
- install:base
- name: Download the latest Asqatasun tarball
get_url:
url: "{{ asqatasun_download_link }}"
dest: "/tmp/{{ asqatasun_download_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded Asqatasun tarball
unarchive:
src: "/tmp/{{ asqatasun_download_link | basename }}"
dest: "/tmp/"
copy: no
tags:
- install
- install:base
- name: Create MySQL database for Asqatasun
mysql_db:
name: "{{ asqatasun_parameters.db_name }}"
state: present
encoding: "{{ asqatasun_parameters.db_encoding }}"
collation: "{{ asqatasun_parameters.db_collation }}"
tags:
- migrate
- migrate:db
- name: Create MySQL user for Asqatasun
mysql_user:
name: "{{ asqatasun_parameters.db_user }}"
password: "{{ asqatasun_parameters.db_password }}"
host: "{{ asqatasun_parameters.db_host }}"
priv: "{{ asqatasun_parameters.db_name }}.*:ALL"
state: present
tags:
- migrate
- migrate:db
- name: Check that asqatasun app is running
shell: >
/bin/ps aux | grep -i asqatasun
register: asqatasun_app
changed_when: no
tags:
- install
- install:base
- name: Install the Asqatasun
shell: >
/bin/echo "yes" | ./install.sh --database-user "{{ asqatasun_parameters.db_user }}" \
--database-passwd "{{ asqatasun_parameters.db_password }}" \
--database-db "{{ asqatasun_parameters.db_name }}" \
--database-host "{{ asqatasun_parameters.db_host }}" \
--asqatasun-url http://localhost:8080/asqatasun/ \
--tomcat-webapps /var/lib/tomcat7/webapps/ \
--tomcat-user tomcat7 \
--asqa-admin-email "{{ asqatasun_parameters.admin_email }}" \
--asqa-admin-passwd "{{ asqatasun_parameters.admin_passwd }}" \
--firefox-esr-binary-path /opt/firefox-esr/firefox
--display-port ":99"
args:
chdir: "/tmp/{{ asqatasun_version }}.i386"
when: "asqatasun_app.stdout.find('/etc/asqatasun') == -1"
register: asqatasun_install
tags:
- install
- install:base
- name: Restart tomcat7
service:
name: tomcat7
state: restarted
when: asqatasun_install.changed
#!/bin/sh
### BEGIN INIT INFO
# Provides: xvfb
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: XVFB - Virtual X server display
# Description: XVFB - Virtual X server display
### END INIT INFO
# Author: Matthieu Faure <mfaure@asqatasun.org>
# Do NOT "set -e"
# TODO: improve with help from /etc/init.d/skeleton
RUN_AS_USER=tomcat7
OPTS=":99 -screen 1 1024x768x24 -nolisten tcp"
XVFB_DIR=/usr/bin
PIDFILE=/var/run/xvfb
case $1 in
start)
start-stop-daemon --chuid $RUN_AS_USER -b --start --exec $XVFB_DIR/Xvfb --make-pidfile --pidfile $PIDFILE -- $OPTS &
;;
stop)
start-stop-daemon --stop --user $RUN_AS_USER --pidfile $PIDFILE
rm -f $PIDFILE
;;
restart)
if start-stop-daemon --test --stop --user $RUN_AS_USER --pidfile $PIDFILE >/dev/null; then
$0 stop
fi;
$0 start
;;
*)
echo "Usage: $0 (start|restart|stop)"
exit 1
;;
esac
exit 0
[client]
default-character-set={{ default_character_set }}
[mysql]
default-character-set={{ default_character_set }}
[mysqld]
collation-server = {{ collation_server }}
init-connect={{ "\'" + init_connect + "\'" }}
character-set-server = {{ character_set_server }}
max_allowed_packet = {{ mysql_max_allowed_packet }}
......@@ -102,8 +102,5 @@
file:
path: "{{ item.item }}"
mode: "0644"
when: >
vagrant_home_dir.stat.exists == false and
ansible_distribution in common_debian_variants and
item.stat.exists
with_items: motd_files_exist.results
when: vagrant_home_dir.stat.exists == False and ansible_distribution in common_debian_variants and item.stat.exists
with_items: "{{ motd_files_exist.results }}"
# Install browsermob-proxy, which is used for page performance testing with bok-choy
---
- name: get zip file
get_url: >
url={{ browsermob_proxy_url }}
dest=/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
get_url:
url: "{{ browsermob_proxy_url }}"
dest: "/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
register: download_browsermob_proxy
- name: unzip into /var/tmp/
shell: >
unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
chdir=/var/tmp
shell: "unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
args:
chdir: "/var/tmp"
when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/
shell: >
mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy
shell: "mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy"
when: download_browsermob_proxy.changed
- name: change permissions of main script
file: >
path=/etc/browsermob-proxy/bin/browsermob-proxy
mode=0755
file:
path: "/etc/browsermob-proxy/bin/browsermob-proxy"
mode: 0755
when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy
copy: >
src=browsermob-proxy
dest=/usr/local/bin/browsermob-proxy
copy:
src: browsermob-proxy
dest: /usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of wrapper script
file: >
path=/usr/local/bin/browsermob-proxy
mode=0755
file:
path: /usr/local/bin/browsermob-proxy
mode: 0755
when: download_browsermob_proxy.changed
......@@ -8,12 +8,12 @@
- name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb
with_items: browser_s3_deb_pkgs
with_items: "{{ browser_s3_deb_pkgs }}"
- name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed
with_items: browser_s3_deb_pkgs
with_items: "{{ browser_s3_deb_pkgs }}"
# Because the source location has been deprecated, we need to
# ensure it does not interfere with subsequent apt commands
......@@ -50,15 +50,15 @@
- "chromedriver.stat.mode == '0755'"
- name: download PhantomJS
get_url: >
url={{ phantomjs_url }}
dest=/var/tmp/{{ phantomjs_tarfile }}
get_url:
url: "{{ phantomjs_url }}"
dest: "/var/tmp/{{ phantomjs_tarfile }}"
register: download_phantom_js
- name: unpack the PhantomJS tarfile
shell: >
tar -xjf /var/tmp/{{ phantomjs_tarfile }}
chdir=/var/tmp
shell: "tar -xjf /var/tmp/{{ phantomjs_tarfile }}"
args:
chdir: "/var/tmp"
when: download_phantom_js.changed
- name: move PhantomJS binary to /usr/local
......
......@@ -30,7 +30,7 @@
file:
path: "{{ cassandra_data_dir_prefix }}/{{ item }}"
state: directory
with_items: cassandra_data_dirs
with_items: "{{ cassandra_data_dirs }}"
- name: Mount ephemeral disks
mount:
......@@ -49,7 +49,7 @@
path: "{{ cassandra_data_dir_prefix }}/{{ item }}"
owner: "{{ cassandra_user }}"
group: "{{ cassandra_group }}"
with_items: cassandra_data_dirs
with_items: "{{ cassandra_data_dirs }}"
- name: Add the datastax repository apt-key
apt_key:
......
......@@ -3,10 +3,12 @@
template:
src: "{{ item.src }}"
dest: "{{ certs_app_dir }}/{{ item.dest }}"
owner: "{{ certs_user }}"
group: "{{ common_web_user }}"
mode: "0640"
with_items:
- { src: 'certs.env.json.j2', dest: 'env.json' }
- { src: 'certs.auth.json.j2', dest: 'auth.json' }
become_user: "{{ certs_user }}"
- name: Writing supervisor script for certificates
template:
......@@ -44,7 +46,7 @@
when: CERTS_GIT_IDENTITY != "none"
- name: "Checkout certificates repo into {{ certs_code_dir }}"
git_2_0_1:
git:
dest: "{{ certs_code_dir }}"
repo: "{{ CERTS_REPO }}"
version: "{{ certs_version }}"
......@@ -56,7 +58,7 @@
when: CERTS_GIT_IDENTITY != "none"
- name: Checkout certificates repo into {{ certs_code_dir }}
git_2_0_1:
git:
dest: "{{ certs_code_dir }}"
repo: "{{ CERTS_REPO }}"
version: "{{ certs_version }}"
......
......@@ -4,3 +4,4 @@
# role depends. This is to allow sharing vars without creating
# side-effects. Any vars requred by this role should be added to
# common_vars/defaults/main.yml
#
......@@ -3,7 +3,7 @@
fail:
msg: "Configuration Sources Checking (COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING) is enabled, you must define {{ item }}"
when: COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING and ({{ item }} is not defined or {{ item }} != True)
with_items: COMMON_EXTRA_CONFIGURATION_SOURCES
with_items: "{{ COMMON_EXTRA_CONFIGURATION_SOURCES }}"
tags:
- "install"
- "install:configuration"
......
......@@ -230,7 +230,6 @@ credentials_log_dir: "{{ COMMON_LOG_DIR }}/{{ credentials_service_name }}"
credentials_requirements_base: "{{ credentials_code_dir }}/requirements"
credentials_requirements:
- production.txt
- optional.txt
#
# OS packages
......
......@@ -10,13 +10,13 @@
#
#
# Tasks for role credentials
#
#
# Overview:
#
#
#
# Dependencies:
#
#
#
# Example play:
#
#
......@@ -43,9 +43,9 @@
- install:app-requirements
- name: create nodeenv
shell: >
creates={{ credentials_nodeenv_dir }}
{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt
shell: "{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt"
args:
creates: "{{ credentials_nodeenv_dir }}"
become_user: "{{ credentials_user }}"
tags:
- install
......@@ -74,9 +74,12 @@
# var should have more permissive permissions than the rest
- name: create credentials var dirs
file: >
path="{{ item }}" state=directory mode=0775
owner="{{ credentials_user }}" group="{{ common_web_group }}"
file:
path: "{{ item }}"
state: directory
mode: 0775
owner: "{{ credentials_user }}"
group: "{{ common_web_group }}"
with_items:
- "{{ CREDENTIALS_MEDIA_ROOT }}"
tags:
......@@ -180,7 +183,7 @@
- assets:gather
- name: restart the application
supervisorctl:
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
......@@ -192,20 +195,24 @@
- manage:start
- name: Copying nginx configs for credentials
template: >
src=edx/app/nginx/sites-available/credentials.j2
dest={{ nginx_sites_available_dir }}/credentials
owner=root group={{ common_web_user }} mode=0640
template:
src: edx/app/nginx/sites-available/credentials.j2
dest: "{{ nginx_sites_available_dir }}/credentials"
owner: root
group: "{{ common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for credentials
file: >
src={{ nginx_sites_available_dir }}/credentials
dest={{ nginx_sites_enabled_dir }}/credentials
state=link owner=root group=root
file:
src: "{{ nginx_sites_available_dir }}/credentials"
dest: "{{ nginx_sites_enabled_dir }}/credentials"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
......
---
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_agent_version: '1:5.1.1-546'
datadog_agent_version: '1:5.10.1-1'
datadog_apt_key: "0x226AE980C7A7DA52"
datadog_debian_pkgs:
......
---
- name: check out the demo course
git_2_0_1: >
dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
accept_hostkey=yes
git:
dest: "{{ demo_code_dir }}"
repo: "{{ demo_repo }}"
version: "{{ demo_version }}"
accept_hostkey: yes
become_user: "{{ demo_edxapp_user }}"
register: demo_checkout
- name: import demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: create some test users
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items: demo_test_users
with_items: "{{ demo_test_users }}"
when: demo_checkout.changed
- name: create staff user
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when:
- demo_checkout.changed
- DEMO_CREATE_STAFF_USER
- name: enroll test users in the demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items:
- "{{ demo_test_users }}"
......@@ -43,15 +45,15 @@
- name: add test users to the certificate whitelist
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
with_items: demo_test_users
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
with_items: "{{ demo_test_users }}"
when: demo_checkout.changed
- name: seed the forums for the demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
with_items: demo_test_users
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
with_items: "{{ demo_test_users }}"
when: demo_checkout.changed
......@@ -31,8 +31,10 @@
# - demo
- name: create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ demo_edxapp_user }}" group="{{ common_web_group }}"
file:
path: "{{ demo_app_dir }}"
state: directory
owner: "{{ demo_edxapp_user }}"
group: "{{ common_web_group }}"
- include: deploy.yml tags=deploy
......@@ -77,9 +77,9 @@
- devstack:install
- name: create nodeenv
shell: >
creates={{ discovery_nodeenv_dir }}
{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt
shell: "{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt"
args:
creates: "{{ discovery_nodeenv_dir }}"
become_user: "{{ discovery_user }}"
tags:
- install
......@@ -94,9 +94,9 @@
- install:app-requirements
- name: install bower dependencies
shell: >
chdir={{ discovery_code_dir }}
. {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false
shell: ". {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false"
args:
chdir: "{{ discovery_code_dir }}"
become_user: "{{ discovery_user }}"
tags:
- install
......
......@@ -7,15 +7,28 @@ COMMAND=$1
case $COMMAND in
start)
{% set discovery_venv_bin = discovery_home + "/venvs/" + discovery_service_name + "/bin" %}
{% set discovery_venv_bin = discovery_venv_dir + "/bin" %}
{{ supervisor_venv_bin }}/supervisord --configuration {{ supervisor_cfg }}
# Needed to run bower as root. See explaination around 'discovery_user=root'
echo '{ "allow_root": true }' > /root/.bowerrc
cd /edx/app/edx_ansible/edx_ansible/docker/plays
ansible-playbook discovery.yml -c local -i '127.0.0.1,' \
-t 'install:app-requirements,assets:gather,devstack,migrate,manage:start' \
/edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook discovery.yml -c local -i '127.0.0.1,' \
-t 'install:app-requirements,assets:gather,devstack,migrate' \
--extra-vars="migrate_db=yes" \
--extra-vars="@/ansible_overrides.yml"
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="discovery_user=root" # Needed when sharing the volume with the host machine because node/bower drops
# everything in the code directory by default. So we get issues with permissions
# on folders owned by the developer.
# Need to start supervisord and nginx manually because systemd is hard to run on docker
# http://developers.redhat.com/blog/2014/05/05/running-systemd-within-docker-container/
# Both daemon by default
nginx
/edx/app/supervisor/venvs/supervisor/bin/supervisord --configuration /edx/app/supervisor/supervisord.conf
# Docker requires an active foreground task. Tail the logs to appease Docker and
# provide useful output for development.
......
cache_valid_time: 3600
docker_tools_deps_deb_pkgs:
- apt-transport-https
- ca-certificates
- python-pip
docker_apt_keyserver: "hkp://ha.pool.sks-keyservers.net:80"
docker_apt_key_id: "58118E89F3A912897C070ADBF76221572C52609D"
docker_repo: "deb https://apt.dockerproject.org/repo ubuntu-xenial main"
docker_group: "docker"
docker_users: []
# Install docker-engine and docker-compose
# Add users to docker group
---
- name: add docker group
group:
name: "{{ docker_group }}"
tags:
- install
- install:base
- name: add users to docker group
user:
name: "{{ item }}"
groups: "{{ docker_group }}"
append: yes
with_items: "{{ docker_users }}"
tags:
- install
- install:base
- name: install package dependencies
apt:
name: "{{ docker_tools_deps_deb_pkgs }}"
update_cache: yes
cache_valid_time: "{{ cache_valid_time }}"
tags:
- install
- install:system-requirements
- name: add docker apt key
apt_key:
keyserver: "{{ docker_apt_keyserver }}"
id: "{{ docker_apt_key_id }}"
tags:
- install
- install:configuration
- name: add docker repo
apt_repository:
repo: "{{ docker_repo }}"
tags:
- install
- install:configuration
- name: install docker-engine
apt:
name: "docker-engine"
update_cache: yes
cache_valid_time: "{{ cache_valid_time }}"
tags:
- install
- install:system-requirements
- name: start docker service
service:
name: docker
enabled: yes
state: started
tags:
- install
- install:configuration
- name: install docker-compose
pip:
name: "docker-compose"
tags:
- install
- install:system-requirements
......@@ -21,16 +21,20 @@ ECOMMERCE_NGINX_PORT: "18130"
ECOMMERCE_SSL_NGINX_PORT: 48130
ECOMMERCE_DEFAULT_DB_NAME: 'ecommerce'
ECOMMERCE_DATABASE_USER: "ecomm001"
ECOMMERCE_DATABASE_PASSWORD: "password"
ECOMMERCE_DATABASE_HOST: "localhost"
ECOMMERCE_DATABASE_PORT: 3306
ECOMMERCE_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ECOMMERCE_DEFAULT_DB_NAME }}'
USER: 'ecomm001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
USER: '{{ ECOMMERCE_DATABASE_USER }}'
PASSWORD: '{{ ECOMMERCE_DATABASE_PASSWORD }}'
HOST: '{{ ECOMMERCE_DATABASE_HOST }}'
PORT: '{{ ECOMMERCE_DATABASE_PORT }}'
ATOMIC_REQUESTS: true
CONN_MAX_AGE: 60
......@@ -51,7 +55,7 @@ ECOMMERCE_JWT_DECODE_HANDLER: 'ecommerce.extensions.api.handlers.jwt_decode_hand
ECOMMERCE_JWT_ISSUERS:
- '{{ ECOMMERCE_LMS_URL_ROOT }}/oauth2'
- 'ecommerce_worker' # Must match the value of JWT_ISSUER configured for the ecommerce worker.
ECOMMERCE_JWT_LEEWAY: 1
# NOTE: We have an array of keys to allow for support of multiple when, for example,
# we change keys. This will ensure we continue to operate with JWTs issued signed with the old key
# while migrating to the new key.
......@@ -149,7 +153,7 @@ ECOMMERCE_SERVICE_CONFIG:
JWT_SECRET_KEY: '{{ ECOMMERCE_JWT_SECRET_KEY }}'
JWT_ALGORITHM: '{{ ECOMMERCE_JWT_ALGORITHM }}'
JWT_VERIFY_EXPIRATION: '{{ ECOMMERCE_JWT_VERIFY_EXPIRATION }}'
JWT_LEEWAY: 1
JWT_LEEWAY: '{{ ECOMMERCE_JWT_LEEWAY }}'
JWT_DECODE_HANDLER: '{{ ECOMMERCE_JWT_DECODE_HANDLER }}'
JWT_ISSUERS: '{{ ECOMMERCE_JWT_ISSUERS }}'
JWT_SECRET_KEYS: '{{ ECOMMERCE_JWT_SECRET_KEYS }}'
......
......@@ -84,11 +84,9 @@
- migrate:db
- name: Populate countries
shell: >
chdir={{ ecommerce_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries
shell: "DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }} DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }} {{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries"
args:
chdir: "{{ ecommerce_code_dir }}"
become_user: "{{ ecommerce_user }}"
environment: "{{ ecommerce_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......
......@@ -16,7 +16,7 @@
virtualenv: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}'
state: present
become_user: '{{ ecommerce_worker_user }}'
with_items: ecommerce_worker_requirements
with_items: "{{ ecommerce_worker_requirements }}"
- name: write out the supervisor wrapper
template:
......
---
- name: Git checkout edx_ansible repo into edx_ansible_code_dir
git_2_0_1:
git:
dest: "{{ edx_ansible_code_dir }}"
repo: "{{ edx_ansible_source_repo }}"
version: "{{ configuration_version }}"
......
......@@ -51,7 +51,7 @@
state: present
extra_args: "--exists-action w"
become_user: "{{ edx_notes_api_user }}"
with_items: edx_notes_api_requirements
with_items: "{{ edx_notes_api_requirements }}"
- name: Migrate
shell: >
......
......@@ -16,6 +16,7 @@
#
edx_service_name: edx_service
edx_service_repos: []
#
# OS packages
#
......
......@@ -99,6 +99,7 @@
tags:
- install
- install:configuration
- install:app-configuration
- name: Install a bunch of system packages on which edx_service relies
apt:
......@@ -126,18 +127,19 @@
action: ec2_facts
tags:
- to-remove
#old syntax - should be fixed
- name: Tag instance
ec2_tag_local: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
ec2_tag_local:
args:
resource: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}"
tags:
- Name: version:{{ edx_service_name }}
- Name: "version:{{ edx_service_name }}"
Value: "{{ item.0.DOMAIN }}/{{ item.0.PATH }}/{{ item.0.REPO }} {{ item.1.after |truncate(7,True,'') }}"
when: item.1.after is defined and COMMON_TAG_EC2_INSTANCE and edx_service_repos is defined
with_together:
- edx_service_repos
- code_checkout.results
- "{{ edx_service_repos }}"
- "{{ code_checkout.results }}"
tags:
- to-remove
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role edx_service_rds
#
#
# vars are namespaced with the module name.
#
edx_service_rds_role_name: edx_service_rds
E_D_C: "prod-sample-app"
EDX_SERVICE_RDS_INSTANCE_SIZE: 10
EDX_SERVICE_RDS_INSTANCE_TYPE: "db.m1.small"
EDX_SERVICE_RDS_ROOT_USER: "root"
# no unicode, c cedilla , passwords
EDX_SERVICE_RDS_ROOT_PASSWORD: "plus_ca_change"
EDX_SERVICE_RDS_ENGINE: "MySQL"
EDX_SERVICE_RDS_ENGINE_VERSION: "5.6.22"
EDX_SERVICE_RDS_PARAM_GROUP_ENGINE: "mysql5.6"
# will vary depending upon engine, examples assume
# MySQL 56
EDX_SERVICE_RDS_PARAM_GROUP_PARAMS:
character_set_client: "utf8"
character_set_connection: "utf8"
character_set_database: "utf8"
character_set_filesystem: "utf8"
character_set_results: "utf8"
character_set_server: "utf8"
collation_connection: "utf8_unicode_ci"
collation_server: "utf8_unicode_ci"
EDX_SERVICE_RDS_MULTI_AZ: No
EDX_SERVICE_RDS_MAINT_WINDOW: "Mon:00:00-Mon:01:15"
EDX_SERVICE_RDS_BACKUP_DAYS: 30
EDX_SERVICE_RDS_BACKUP_WINDOW: "02:00-03:00"
EDX_SERVICE_RDS_SUBNET_1_AZ: "us-east-1c"
EDX_SERVICE_RDS_SUBNET_1_CIDR: "{{ vpc_class_b }}.50.0/24"
EDX_SERVICE_RDS_SUBNET_2_AZ: "us-east-1d"
EDX_SERVICE_RDS_SUBNET_2_CIDR: "{{ vpc_class_b }}.51.0/24"
# The defaults are permissive, override
EDX_SERVICE_RDS_SECURITY_GROUP:
name: "{{ e_d_c }}-rds-sg"
description: "RDS ingress and egress."
rules:
- proto: "tcp"
from_port: "3306"
to_port: "3306"
cidr_ip: "0.0.0.0/0"
rules_egress:
- proto: "tcp"
from_port: "3306"
to_port: "3306"
cidr_ip: "0.0.0.0/0"
# The defaults are permissive, override
EDX_SERVICE_RDS_VPC_DB_ACL:
name: "{{ e_d_c }}-db"
rules:
- number: "100"
type: "ingress"
protocol: "tcp"
from_port: 3306
to_port: 3306
cidr_block: "0.0.0.0/0"
rule_action: "allow"
- number: "100"
type: "egress"
protocol: "all"
from_port: 0
to_port: 65535
cidr_block: "0.0.0.0/0"
rule_action: "allow"
EDX_SERVICE_RDS_VPC_DB_ROUTE_TABLE:
- cidr: "{{ vpc_class_b }}.0.0/16"
gateway: 'local'
# typically override the all caps vars, but may
# be convenient to override the entire structure
# if you spanning more than two subnets
edx_service_rds_vpc_db_subnets:
- name: "{{ E_D_C }}-db-{{ EDX_SERVICE_RDS_SUBNET_1_AZ }}"
cidr: "{{ EDX_SERVICE_RDS_SUBNET_1_CIDR }}"
az: "{{ EDX_SERVICE_RDS_SUBNET_1_AZ }}"
- name: "{{ E_D_C }}-db-{{ EDX_SERVICE_RDS_SUBNET_2_AZ }}"
cidr: "{{ EDX_SERVICE_RDS_SUBNET_2_CIDR }}"
az: "{{ EDX_SERVICE_RDS_SUBNET_2_AZ }}"
edx_service_rds_state: "present"
edx_service_rds_db:
state: "{{ edx_service_rds_state }}"
name: "{{ E_D_C }}-primary"
size: "{{ EDX_SERVICE_RDS_INSTANCE_SIZE }}"
instance_type: "{{ EDX_SERVICE_RDS_INSTANCE_TYPE }}"
root_user: "{{ EDX_SERVICE_RDS_ROOT_USER }}"
root_password: "{{ EDX_SERVICE_RDS_ROOT_PASSWORD }}"
engine: "{{ EDX_SERVICE_RDS_ENGINE }}"
engine_version: "{{ EDX_SERVICE_RDS_ENGINE_VERSION }}"
multi_az: "{{ EDX_SERVICE_RDS_MULTI_AZ }}"
maint_window: "{{ EDX_SERVICE_RDS_MAINT_WINDOW }}"
backup_days: "{{ EDX_SERVICE_RDS_BACKUP_DAYS }}"
backup_window: "{{ EDX_SERVICE_RDS_BACKUP_WINDOW }}"
param_group:
name: "{{ E_D_C}}"
engine: "{{ EDX_SERVICE_RDS_PARAM_GROUP_ENGINE }}"
params: "{{ EDX_SERVICE_RDS_PARAM_GROUP_PARAMS }}"
#
# OS packages
#
edx_service_rds_debian_pkgs: []
edx_service_rds_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role edx_service_rds
#
# Overview:
#
# Creates a VPC RDS instance and related network infrastructure, e.g.,
# subnets, subnet groups, acls, as well as an instance specific
# parameter group.
#
# NB: When using a boto profile other than the default, you will need
# to export AWS_PROFILE because some tasks do not properly process
# the profile argument.
#
# NB: You should currently not use this play for deleting databases as
# the final snapshot functionality doesn't work properly in the ansible
# module. First it default to not taking a final snapshot and
# when you specify one, it throw a key error.
#
# Dependencies:
#
# Assumes a working VPC, ideally created via the edx_vpc role as this
# role will produce configuration output that this role requires
# like the VPC, route table and subnet IDs.
#
# Example play:
#
# export AWS_PROFILE=sandbox
# ansible-playbook -i 'localhost,' edx_service_rds.yml -e@/path/to/secure-repo/cloud_migrations/vpcs/vpc-file.yml -e@/path/to/secure-repo/cloud_migrations/dbs/e-d-c-rds.yml
#
# TODO:
# - handle db deletes and updates
# - handle DNS updates, consider that a different profile may be required for this.
#
- name: create database route table
ec2_rt:
profile: "{{ profile }}"
vpc_id: "{{ vpc_id }}"
region: "{{ aws_region }}"
state: "{{ edx_service_rds_state }}"
name: "{{ e_d_c }}-db"
routes: "{{ EDX_SERVICE_RDS_VPC_DB_ROUTE_TABLE }}"
register: created_db_rt
- name: create db network acl
ec2_acl:
profile: "{{ profile }}"
name: "{{ EDX_SERVICE_RDS_VPC_DB_ACL.name }}"
vpc_id: "{{ vpc_id }}"
state: "{{ edx_service_rds_state }}"
region: "{{ aws_region }}"
rules: "{{ EDX_SERVICE_RDS_VPC_DB_ACL.rules }}"
register: created_db_acl
- name: create db subnets
ec2_subnet:
profile: "{{ profile }}"
vpc_id: "{{ vpc_id }}"
region: "{{ aws_region }}"
state: "{{ edx_service_rds_state }}"
name: "{{ item.name }}"
cidr: "{{ item.cidr }}"
az: "{{ item.az }}"
route_table_id: "{{ created_db_rt.id }}"
network_acl_id: "{{ created_db_acl.id }}"
with_items: edx_service_rds_vpc_db_subnets
register: created_db_subnets
- name: Apply function to subnet data
util_map:
function: 'zip_to_list'
input: "{{ created_db_subnets.results }}"
args:
- "subnet_id"
register: subnet_data
- name:
rds_subnet_group:
state: "{{ edx_service_rds_state }}"
profile: "{{ profile }}"
region: "{{ aws_region }}"
name: "{{ e_d_c }}"
description: "{{ e_d_c }}"
subnets: "{{ subnet_data.function_output }}"
- name: create RDS security group
ec2_group:
profile: "{{ profile }}"
vpc_id: "{{ vpc_id }}"
state: "{{ edx_service_rds_state }}"
region: "{{ aws_region }}"
name: "{{ EDX_SERVICE_RDS_SECURITY_GROUP.name }}"
rules: "{{ EDX_SERVICE_RDS_SECURITY_GROUP.rules }}"
description: "{{ EDX_SERVICE_RDS_SECURITY_GROUP.description }}"
rules_egress: "{{ EDX_SERVICE_RDS_SECURITY_GROUP.rules_egress }}"
register: created_rds_security_group
- name: create instance parameter group
rds_param_group:
state: "{{ edx_service_rds_state }}"
region: "{{ aws_region }}"
name: "{{ edx_service_rds_db.param_group.name }}"
description: "{{ edx_service_rds_db.param_group.name }}"
engine: "{{ edx_service_rds_db.param_group.engine }}"
params: "{{ edx_service_rds_db.param_group.params }}"
register: created_param_group
#
# Create the database
#
- name: Create service database
rds:
command: "create"
region: "{{ aws_region }}"
instance_name: "{{ edx_service_rds_db.name }}"
db_engine: "{{ edx_service_rds_db.engine }}"
engine_version: "{{ edx_service_rds_db.engine_version }}"
size: "{{ edx_service_rds_db.size }}"
instance_type: "{{ edx_service_rds_db.instance_type }}"
username: "{{ edx_service_rds_db.root_user }}"
password: "{{ edx_service_rds_db.root_password }}"
subnet: "{{ e_d_c }}"
vpc_security_groups: "{{ created_rds_security_group.group_id }}"
multi_zone: "{{ edx_service_rds_db.multi_az }}"
maint_window: "{{ edx_service_rds_db.maint_window }}"
backup_window: "{{ edx_service_rds_db.backup_window }}"
backup_retention: "{{ edx_service_rds_db.backup_days }}"
parameter_group: "{{ edx_service_rds_db.param_group.name }}"
tags:
Environment: "{{ env }}"
Application: "{{ deployment }}"
when: edx_service_rds_db.state == 'present'
register: created_db
#
# Delete the database, need to debug module for this to
# full work.
#
- name: Delete service database
rds:
command: "delete"
region: "{{ aws_region }}"
instance_name: "{{ edx_service_rds_db.name }}"
# bug inthe module related to final snapshots
#snapshot: "{{ edx_service_rds_db.name }}-final-{{ ansible_date_time.epoch }}"
snapshot: "red-blue"
when: edx_service_rds_db.state == 'absent'
......@@ -50,7 +50,7 @@
shell: /bin/bash
groups: "{{ themes_group }}"
append: yes
with_items: theme_users
with_items: "{{ theme_users }}"
when: theme_users is defined
- name: update .bashrc to set umask value
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role edx_vpc
#
#
# vars are namespace with the module name.
#
vpc_role_name: vpc
#
# OS packages
#
vpc_debian_pkgs: []
vpc_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role edx_vpc
#
# Overview:
# This role creates an opinionated vpc for containing cluster of edx services.
#
# It currently assumes that we will be multi-az, with a single NAT, and all
# traffic going over that NAT. A public subnet, and both public and private
# route tables are created by default that can be used by new services in this
# vpc. The public subnet should house ELBs and any newly created private subnets
# can use the existing private route table to be able to reach the internet from
# private machines.
#
#
# Example play:
#
# ansible-playbook -c local -i localhost, edx_vpc.yml -e@/Users/feanil/src/edx-secure/cloud_migrations/vpcs/test.yml
# DO NOT use the subnet or route table sections of this command.
# They will delete any subnets or rts not defined here which is
# probably not what you want, since other services were added
# to the vpc whose subnets and rts are not enumerated here.
- name: create a vpc
local_action:
profile: "{{ vpc_aws_profile }}"
module: "ec2_vpc_local"
resource_tags: "{{ vpc_tags }}"
cidr_block: "{{ vpc_cidr }}"
region: "{{ vpc_aws_region }}"
state: "{{ vpc_state }}"
internet_gateway: yes
wait: yes
register: created_vpc
# A default network acl is created when a vpc is created so each VPC
# should have one but we create one here that allows access to the
# outside world using the internet gateway.
- name: create public network acl
ec2_acl:
profile: "{{ vpc_aws_profile }}"
name: "{{ vpc_public_acl.name }}"
vpc_id: "{{ created_vpc.vpc_id }}"
state: "present"
region: "{{ vpc_aws_region }}"
rules: "{{ vpc_public_acl.rules }}"
register: created_public_acl
- name: create public route table
ec2_rt:
profile: "{{ vpc_aws_profile }}"
vpc_id: "{{ created_vpc.vpc_id }}"
region: "{{ vpc_aws_region }}"
state: "present"
name: "{{ vpc_name }}-public"
routes: "{{ vpc_public_route_table }}"
register: created_public_rt
- name: create public subnets
ec2_subnet:
profile: "{{ vpc_aws_profile }}"
vpc_id: "{{ created_vpc.vpc_id }}"
region: "{{ vpc_aws_region }}"
state: "present"
name: "{{ item.name }}"
cidr: "{{ item.cidr }}"
az: "{{ item.az }}"
route_table_id: "{{ created_public_rt.id }}"
network_acl_id: "{{ created_public_acl.id }}"
with_items: vpc_public_subnets
register: created_public_subnets
- name: create NAT security group
ec2_group:
profile: "{{ vpc_aws_profile }}"
vpc_id: "{{ created_vpc.vpc_id }}"
state: "present"
region: "{{ vpc_aws_region }}"
name: "{{ nat_security_group.name }}"
rules: "{{ nat_security_group.rules }}"
description: "{{ nat_security_group.description }}"
rules_egress: "{{ nat_security_group.rules_egress }}"
register: created_nat_security_group
- name: check to see if we already have a nat instance
local_action:
module: "ec2_lookup"
region: "{{ vpc_aws_region }}"
tags:
- Name: "{{ vpc_name }}-nat-instance"
register: nat_instance
- name: create nat instance
local_action:
module: "ec2"
state: "present"
wait: yes
source_dest_check: false
region: "{{ vpc_aws_region }}"
profile: "{{ vpc_aws_profile }}"
group_id: "{{ created_nat_security_group.group_id }}"
key_name: "{{ vpc_keypair }}"
vpc_subnet_id: "{{ created_public_subnets.results[0].subnet_id }}"
instance_type: "{{ vpc_nat_instance_type }}"
instance_tags:
Name: "{{ vpc_name }}-nat-instance"
image: "{{ vpc_nat_ami_id }}"
register: new_nat_instance
when: nat_instance.instances|length == 0
# We need to do this instead of registering the output of the above
# command because if the above command get skipped, the output does
# not contain information about the instance.
- name: lookup the created nat_instance
local_action:
module: "ec2_lookup"
region: "{{ vpc_aws_region }}"
tags:
- Name: "{{ vpc_name }}-nat-instance"
register: nat_instance
- name: assign eip to nat
ec2_eip:
profile: "{{ vpc_aws_profile }}"
region: "{{ vpc_aws_region }}"
instance_id: "{{ nat_instance.instances[0].id }}"
in_vpc: true
reuse_existing_ip_allowed: true
when: new_nat_instance.changed
- name: create private route table
ec2_rt:
profile: "{{ vpc_aws_profile }}"
vpc_id: "{{ created_vpc.vpc_id }}"
region: "{{ vpc_aws_region }}"
state: "present"
name: "{{ vpc_name }}-private"
routes: "{{ vpc_private_route_table }}"
register: created_private_rt
- name: output a vpc_config for using to build services
local_action:
module: template
src: "vpc_config.yml.j2"
dest: "~/{{ e_d }}.yml"
#
# Configuration for the environment-deployment
#
profile: "{{ vpc_aws_profile }}"
vpc_id: "{{ created_vpc.vpc_id }}"
vpc_cidr: "{{ vpc_cidr }}"
vpc_class_b: "{{ vpc_class_b }}"
env: "{{ vpc_environment }}"
deployment: "{{ vpc_deployment }}"
e_d_c: "{{ vpc_environment }}-{{ vpc_deployment }}-{{ '{{' }} cluster {{ '}}' }}"
aws_region: "{{ vpc_aws_region }}"
aws_availability_zones:
{% for subnet in vpc_public_subnets %}
- {{ subnet.az }}
{% endfor %}
# Should this be service specific
ssl_cert: "{{ vpc_ssl_cert }}"
# used for ELB
public_route_table: "{{ created_public_rt.id }}"
# used for service subnet
private_route_table: "{{ created_private_rt.id }}"
instance_key_name: "{{ vpc_keypair }}"
# subject to change #TODO: provide the correct var for the eni
nat_device: "{{ nat_instance.instances[0].id }}"
public_subnet_1: "{{ vpc_public_subnets[0].cidr }}"
public_subnet_2: "{{ vpc_public_subnets[1].cidr }}"
# /28 per AZ NEEDE?
# private_subnet_1: "{{ vpc_class_b }}.110.16/28"
# private_subnet_2: "{{ vpc_class_b }}.120.16/28"
elb_subnets:
{% for subnet in created_public_subnets.results %}
- "{{ subnet.subnet_id }}"
{% endfor %}
#
# Do not use vars in policies :(
# Should be specific to the service right?
role_policies: []
# - name: "{{ '{{ ' + 'e_d_c' + '}}' }}-s3-policy"
# document: |
# {
# "Statement":[
# {
# "Effect":"Allow",
# "Action":["s3:*"],
# "Resource":["arn:aws:s3:::edx-stage-edx"]
# }
# ]
# }
# - name: "{{ '{{ ' + 'e_d_c' + '}}' }}-create-instance-tags"
# document: |
# {
# "Statement": [
# {
# "Effect": "Allow",
# "Action": ["ec2:CreateTags"],
# "Resource": ["arn:aws:ec2:us-east-1:xxxxxxxxxxxx:instance/*"]
# }
# ]
# }
# - name: "{{ '{{ ' + 'e_d_c' + '}}' }}-describe-ec2"
# document: |
# {"Statement":[
# {"Resource":"*",
# "Action":["ec2:DescribeInstances","ec2:DescribeTags","ec2:DescribeVolumes"],
# "Effect":"Allow"}]}
......@@ -44,6 +44,7 @@ EDXAPP_AWS_ACCESS_KEY_ID: "None"
EDXAPP_AWS_SECRET_ACCESS_KEY: "None"
EDXAPP_AWS_QUERYSTRING_AUTH: false
EDXAPP_AWS_STORAGE_BUCKET_NAME: "SET-ME-PLEASE (ex. bucket-name)"
EDXAPP_IMPORT_EXPORT_BUCKET: "SET-ME-PLEASE (ex. bucket-name)"
EDXAPP_AWS_S3_CUSTOM_DOMAIN: "SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com)"
EDXAPP_SWIFT_USERNAME: "None"
EDXAPP_SWIFT_KEY: "None"
......@@ -55,7 +56,6 @@ EDXAPP_SWIFT_REGION_NAME: "None"
EDXAPP_SWIFT_USE_TEMP_URLS: false
EDXAPP_SWIFT_TEMP_URL_KEY: "None"
EDXAPP_SWIFT_TEMP_URL_DURATION: 1800 # seconds
EDXAPP_USE_SWIFT_STORAGE: false
EDXAPP_DEFAULT_FILE_STORAGE: "django.core.files.storage.FileSystemStorage"
EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ]
EDXAPP_XQUEUE_DJANGO_AUTH:
......@@ -134,6 +134,7 @@ EDXAPP_ZENDESK_API_KEY: ""
EDXAPP_CELERY_USER: 'celery'
EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_CELERY_BROKER_VHOST: ""
EDXAPP_CELERY_BROKER_USE_SSL: false
EDXAPP_VIDEO_CDN_URLS:
EXAMPLE_COUNTRY_CODE: "http://example.com/edx/video?s3_url="
......@@ -498,8 +499,8 @@ EDXAPP_CELERY_WORKERS:
monitor: False
max_tasks_per_child: 1
EDXAPP_RECALCULATE_GRADES_ROUTING_KEY: 'edx.lms.core.default'
EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\\\1')|list }}"
EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\\\1')|list }}"
EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\1')|list }}"
EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\1')|list }}"
EDXAPP_DEFAULT_CACHE_VERSION: "1"
EDXAPP_OAUTH_ENFORCE_SECURE: True
......@@ -639,10 +640,12 @@ edxapp_venvs_dir: "{{ edxapp_app_dir }}/venvs"
edxapp_venv_dir: "{{ edxapp_venvs_dir }}/edxapp"
edxapp_venv_bin: "{{ edxapp_venv_dir }}/bin"
edxapp_nodeenv_dir: "{{ edxapp_app_dir }}/nodeenvs/edxapp"
edxapp_node_bin: "{{ edxapp_nodeenv_dir }}/bin"
edxapp_node_version: "0.10.37"
edxapp_nodeenv_bin: "{{ edxapp_nodeenv_dir }}/bin"
edxapp_node_version: "6.9.2"
# This is where node installs modules, not node itself
edxapp_node_bin: "{{ edxapp_code_dir }}/node_modules/.bin"
edxapp_user: edxapp
edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_node_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_node_bin }}:{{ edxapp_nodeenv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_media_dir: "{{ edxapp_data_dir }}/media"
edxapp_course_static_dir: "{{ edxapp_data_dir }}/course_static"
......@@ -799,8 +802,6 @@ edxapp_generic_auth_config: &edxapp_generic_auth
generic_cache_config: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key'
KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
generic_env_config: &edxapp_generic_env
ECOMMERCE_PUBLIC_URL_ROOT: "{{ EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT }}"
......@@ -821,6 +822,7 @@ generic_env_config: &edxapp_generic_env
ANALYTICS_DATA_URL: "{{ EDXAPP_ANALYTICS_DATA_URL }}"
ANALYTICS_DASHBOARD_URL: '{{ EDXAPP_ANALYTICS_DASHBOARD_URL }}'
CELERY_BROKER_VHOST: "{{ EDXAPP_CELERY_BROKER_VHOST }}"
CELERY_BROKER_USE_SSL: "{{ EDXAPP_CELERY_BROKER_USE_SSL }}"
PAYMENT_SUPPORT_EMAIL: "{{ EDXAPP_PAYMENT_SUPPORT_EMAIL }}"
ZENDESK_URL: "{{ EDXAPP_ZENDESK_URL }}"
COURSES_WITH_UNSAFE_CODE: "{{ EDXAPP_COURSES_WITH_UNSAFE_CODE }}"
......@@ -884,23 +886,29 @@ generic_env_config: &edxapp_generic_env
default:
<<: *default_generic_cache
KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
VERSION: "{{ EDXAPP_DEFAULT_CACHE_VERSION }}"
general:
<<: *default_generic_cache
KEY_PREFIX: 'general'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
mongo_metadata_inheritance:
<<: *default_generic_cache
KEY_PREFIX: 'mongo_metadata_inheritance'
TIMEOUT: 300
LOCATION: "{{ EDXAPP_MEMCACHE }}"
staticfiles:
<<: *default_generic_cache
KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general"
LOCATION: "{{ EDXAPP_MEMCACHE }}"
configuration:
<<: *default_generic_cache
KEY_PREFIX: "{{ ansible_hostname|default('configuration') }}"
LOCATION: "{{ EDXAPP_MEMCACHE }}"
celery:
<<: *default_generic_cache
KEY_PREFIX: 'celery'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
TIMEOUT: "7200"
course_structure_cache:
<<: *default_generic_cache
......@@ -1029,6 +1037,7 @@ lms_env_config:
DOC_LINK_BASE_URL: "{{ EDXAPP_LMS_DOC_LINK_BASE_URL }}"
RECALCULATE_GRADES_ROUTING_KEY: "{{ EDXAPP_RECALCULATE_GRADES_ROUTING_KEY }}"
CELERY_QUEUES: "{{ EDXAPP_LMS_CELERY_QUEUES }}"
ALTERNATE_WORKER_QUEUES: "cms"
cms_auth_config:
<<: *edxapp_generic_auth
......@@ -1060,6 +1069,8 @@ cms_env_config:
GIT_REPO_EXPORT_DIR: "{{ EDXAPP_GIT_REPO_EXPORT_DIR }}"
DOC_LINK_BASE_URL: "{{ EDXAPP_CMS_DOC_LINK_BASE_URL }}"
CELERY_QUEUES: "{{ EDXAPP_CMS_CELERY_QUEUES }}"
ALTERNATE_WORKER_QUEUES: "lms"
COURSE_IMPORT_EXPORT_BUCKET: "{{ EDXAPP_IMPORT_EXPORT_BUCKET }}"
# install dir for the edx-platform repo
edxapp_code_dir: "{{ edxapp_app_dir }}/edx-platform"
......
......@@ -8,5 +8,3 @@ dependencies:
theme_users:
- "{{ edxapp_user }}"
when: "{{ EDXAPP_ENABLE_COMPREHENSIVE_THEMING }}"
- role: openstack
when: "{{ EDXAPP_USE_SWIFT_STORAGE }}"
......@@ -45,7 +45,7 @@
# Do A Checkout
- name: checkout edx-platform repo into {{ edxapp_code_dir }}
git_2_0_1:
git:
dest: "{{ edxapp_code_dir }}"
repo: "{{ edx_platform_repo }}"
version: "{{ edx_platform_version }}"
......@@ -72,7 +72,7 @@
# (yes, lowercase) to a Stanford-style theme and set
# edxapp_theme_name (again, lowercase) to its name.
- name: checkout Stanford-style theme
git_2_0_1:
git:
dest: "{{ edxapp_app_dir }}/themes/{{ edxapp_theme_name }}"
repo: "{{ edxapp_theme_source_repo }}"
version: "{{ edxapp_theme_version }}"
......@@ -110,10 +110,10 @@
- install:app-requirements
- name: Create the virtualenv to install the Python requirements
command: >
virtualenv {{ edxapp_venv_dir }}
chdir={{ edxapp_code_dir }}
creates={{ edxapp_venv_dir }}/bin/pip
command: "virtualenv {{ edxapp_venv_dir }}"
args:
chdir: "{{ edxapp_code_dir }}"
creates: "{{ edxapp_venv_dir }}/bin/pip"
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
tags:
......@@ -134,9 +134,9 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
command: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}
chdir={{ edxapp_code_dir }}
command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}"
args:
chdir: "{{ edxapp_code_dir }}"
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
when: item.stat.exists
......@@ -151,9 +151,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ private_requirements_file }}"
become_user: "{{ edxapp_user }}"
......@@ -172,7 +172,7 @@
extra_args: "--exists-action w {{ item.extra_args|default('') }}"
virtualenv: "{{ edxapp_venv_dir }}"
state: present
with_items: EDXAPP_EXTRA_REQUIREMENTS
with_items: "{{ EDXAPP_EXTRA_REQUIREMENTS }}"
become_user: "{{ edxapp_user }}"
tags:
- install
......@@ -197,9 +197,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}"
......@@ -211,8 +211,7 @@
- install:app-requirements
- name: create nodeenv
shell: >
{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt
shell: "{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt"
args:
creates: "{{ edxapp_nodeenv_dir }}"
tags:
......@@ -223,8 +222,7 @@
# This needs to be done as root since npm is weird about
# chown - https://github.com/npm/npm/issues/3565
- name: Set the npm registry
shell: >
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'"
args:
creates: "{{ edxapp_app_dir }}/.npmrc"
environment: "{{ edxapp_environment }}"
......@@ -244,7 +242,7 @@
- name: install node dependencies
npm:
executable: "{{ edxapp_node_bin }}/npm"
executable: "{{ edxapp_nodeenv_bin }}/npm"
path: "{{ edxapp_code_dir }}"
production: yes
environment: "{{ edxapp_environment }}"
......@@ -279,9 +277,9 @@
- install:app-requirements
- name: code sandbox | Install sandbox requirements into sandbox venv
shell: >
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}"
......
......@@ -3,27 +3,35 @@
template:
src: "{{ item[0] }}.{{ item[1] }}.json.j2"
dest: "{{ edxapp_app_dir }}/{{ item[0] }}.{{ item[1] }}.json"
become_user: "{{ edxapp_user }}"
with_nested:
owner: "{{ edxapp_user }}"
group: "{{ common_web_group }}"
mode: 0640
become: true
with_nested:
- "{{ service_variants_enabled }}"
- [ 'env', 'auth' ]
tags:
- install
- install:configuration
- edxapp_cfg
- install:app-configuration
- edxapp_cfg # Old deprecated tag, will remove when possible
- name: create auth and application yaml config
template:
src: "{{ item[0] }}.{{ item[1] }}.yaml.j2"
dest: "{{ EDXAPP_CFG_DIR }}/{{ item[0] }}.{{ item[1] }}.yaml"
become_user: "{{ edxapp_user }}"
owner: "{{ edxapp_user }}"
group: "{{ common_web_group }}"
mode: 0640
become: true
with_nested:
- "{{ service_variants_enabled }}"
- [ 'env', 'auth' ]
tags:
- install
- install:configuration
- edxapp_cfg
- install:app-configuration
- edxapp_cfg # Old deprecated tag, will remove when possible
# write the supervisor scripts for the service variants
- name: "writing {{ item }} supervisor script"
......@@ -32,6 +40,7 @@
dest: "{{ supervisor_available_dir }}/{{ item }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ supervisor_user }}"
mode: 0644
become_user: "{{ supervisor_user }}"
with_items: "{{ service_variants_enabled }}"
tags:
......@@ -45,6 +54,7 @@
dest: "{{ supervisor_available_dir }}/{{ item }}"
owner: "{{ supervisor_user }}"
group: "{{ supervisor_user }}"
mode: 0644
become_user: "{{ supervisor_user }}"
with_items:
- edxapp.conf
......@@ -57,6 +67,7 @@
template:
src: "{{ item }}_gunicorn.py.j2"
dest: "{{ edxapp_app_dir }}/{{ item }}_gunicorn.py"
mode: 0644
become_user: "{{ edxapp_user }}"
with_items: "{{ service_variants_enabled }}"
tags:
......
......@@ -10,7 +10,7 @@ command={{ executable }} -c {{ edxapp_app_dir }}/cms_gunicorn.py {{ EDXAPP_CMS_G
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{ edxapp_cms_gunicorn_port }},ADDRESS={{ edxapp_cms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_CMS_ENV }},SERVICE_VARIANT="cms",ALTERNATE_WORKER_QUEUES="lms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{ edxapp_cms_gunicorn_port }},ADDRESS={{ edxapp_cms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_CMS_ENV }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log
killasgroup=true
......
......@@ -10,7 +10,7 @@ command={{ executable }} -c {{ edxapp_app_dir }}/lms_gunicorn.py lms.wsgi
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},NEW_RELIC_CONFIG_FILE={{ edxapp_app_dir }}/newrelic.ini,{% endif -%} PORT={{ edxapp_lms_gunicorn_port }},ADDRESS={{ edxapp_lms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_LMS_ENV }},SERVICE_VARIANT="lms",ALTERNATE_WORKER_QUEUES="cms",PATH="{{ edxapp_deploy_path }}"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},NEW_RELIC_CONFIG_FILE={{ edxapp_app_dir }}/newrelic.ini,{% endif -%} PORT={{ edxapp_lms_gunicorn_port }},ADDRESS={{ edxapp_lms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_LMS_ENV }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log
killasgroup=true
......
......@@ -19,13 +19,13 @@ edxlocal_databases:
edxlocal_database_users:
- {
db: "{{ ECOMMERCE_DEFAULT_DB_NAME | default(None) }}",
user: "{{ ECOMMERCE_DATABASES.default.USER | default(None) }}",
pass: "{{ ECOMMERCE_DATABASES.default.PASSWORD | default(None) }}"
user: "{{ ECOMMERCE_DATABASE_USER | default(None) }}",
pass: "{{ ECOMMERCE_DATABASE_PASSWORD | default(None) }}"
}
- {
db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}",
user: "{{ INSIGHTS_DATABASES.default.USER | default(None) }}",
pass: "{{ INSIGHTS_DATABASES.default.PASSWORD | default(None) }}"
user: "{{ INSIGHTS_MYSQL_USER | default(None) }}",
pass: "{{ INSIGHTS_MYSQL_USER | default(None) }}"
}
- {
db: "{{ XQUEUE_MYSQL_DB_NAME | default(None) }}",
......@@ -44,18 +44,18 @@ edxlocal_database_users:
}
- {
db: "{{ PROGRAMS_DEFAULT_DB_NAME | default(None) }}",
user: "{{ PROGRAMS_DATABASES.default.USER | default(None) }}",
pass: "{{ PROGRAMS_DATABASES.default.PASSWORD | default(None) }}"
user: "{{ PROGRAMS_DATABASE_USER | default(None) }}",
pass: "{{ PROGRAMS_DATABASE_PASSWORD | default(None) }}"
}
- {
db: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME | default(None) }}",
user: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE.username }}",
pass: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE.password }}"
user: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER | default(None) }}",
pass: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD | default(None) }}"
}
- {
db: "{{ HIVE_METASTORE_DATABASE_NAME | default(None) }}",
user: "{{ HIVE_METASTORE_DATABASE.user | default(None) }}",
pass: "{{ HIVE_METASTORE_DATABASE.password | default(None) }}"
user: "{{ HIVE_METASTORE_DATABASE_USER | default(None) }}",
pass: "{{ HIVE_METASTORE_DATABASE_PASSWORD | default(None) }}"
}
- {
db: "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}",
......
......@@ -21,30 +21,27 @@
#
#
- name: download elasticsearch plugin
shell: >
./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y
shell: "./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
creates: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}.compressed"
become_user: "{{ NEWRELIC_USER }}"
- name: prepare elasticsearch plugin
shell: >
./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n
shell: "./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure elasticsearch plugin
template: >
src=plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2
dest={{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json
owner={{ NEWRELIC_USER }}
mode=0644
template:
src: "plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2"
dest: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json"
owner: "{{ NEWRELIC_USER }}"
mode: 0644
- name: register/start elasticsearch plugin
shell: >
./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}
shell: "./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "root"
......
......@@ -27,7 +27,7 @@
- name: Test for enhanced networking
local_action:
module: shell aws --profile {{ profile }} ec2 describe-instance-attribute --instance-id {{ ansible_ec2_instance_id }} --attribute sriovNetSupport
module: shell aws ec2 describe-instance-attribute --instance-id {{ ansible_ec2_instance_id }} --attribute sriovNetSupport
changed_when: False
become: False
register: enhanced_networking_raw
......@@ -56,7 +56,7 @@
- name: Set enhanced networking instance attribute
local_action:
module: shell aws --profile {{ profile }} ec2 modify-instance-attribute --instance-id {{ ansible_ec2_instance_id }} --sriov-net-support simple
module: shell aws ec2 modify-instance-attribute --instance-id {{ ansible_ec2_instance_id }} --sriov-net-support simple
when: supports_enhanced_networking and has_ixgbevf_kernel_module and not enhanced_networking_already_on
- name: Start instances
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment