Commit 9e11e5b8 by Feanil Patel Committed by GitHub

Merge pull request #3108 from edx/feanil/ansible_2_x

Update to ansible 2.x
parents b338caa4 74bb95fa
# Travis CI configuration file for running tests
language: python
branches:
only:
- master
python:
- "2.7"
......
......@@ -26,7 +26,9 @@ test: docker.test
pkg: docker.pkg
clean:
clean: docker.clean
docker.clean:
rm -rf .build
docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))
......
......@@ -42,8 +42,8 @@ RUN apt-get update && apt-get install -y \
php5-common \
php5-cli
# Install libffi-dev - a dependency for Ansible 2.x
RUN apt-get update && apt-get install -y libffi-dev
# Install dependencies needed for Ansible 2.x
RUN apt-get update && apt-get install -y libffi-dev libssl-dev
# Install drush (drupal shell) for access to Drupal commands/Acquia
RUN php -r "readfile('http://files.drush.org/drush.phar');" > drush && \
......@@ -59,7 +59,19 @@ RUN /bin/bash /tmp/docker/docker_install.sh
RUN usermod -aG docker go
# Assign the go user root privlidges
RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip\n" >> /etc/sudoers
RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip, /usr/local/bin/pip\n" >> /etc/sudoers
# Upgrade pip and setup tools. Needed for Ansible 2.x
# Must upgrade to latest before pinning to work around bug
# https://github.com/pypa/pip/issues/3862
RUN \
pip install --upgrade pip && \
#pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
hash -r && \
pip install --upgrade pip==8.1.2 && \
# upgrade setuptools early to avoid no distribution errors
pip install --upgrade setuptools==24.0.3
# Install AWS command-line interface - for AWS operations in a go-agent task.
RUN pip install awscli
......
......@@ -9,9 +9,10 @@ try:
import hipchat
except ImportError:
hipchat = None
from ansible.plugins.callback import CallbackBase
class CallbackModule(object):
class CallbackModule(CallbackBase):
"""Send status updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
......
......@@ -28,9 +28,10 @@ except ImportError:
else:
import boto.sqs
from boto.exception import NoAuthHandlerFound
from ansible.plugins.callback import CallbackBase
class CallbackModule(object):
class CallbackModule(CallbackBase):
"""
This Ansible callback plugin sends task events
to SQS.
......
......@@ -238,7 +238,7 @@ class CallbackModule(CallbackBase):
Record the start of a play.
"""
self.playbook_name, _ = splitext(
basename(self.play.playbook.filename)
basename(self.play.get_name())
)
self.playbook_timestamp = Timestamp()
......
......@@ -61,7 +61,7 @@
shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} show_unapplied_migrations --database "{{ item }}" --output_file "{{ temp_output_dir.stdout }}/{{ item }}_{{ migration_plan }}" --settings "{{ EDX_PLATFORM_SETTINGS }}"'
become_user: "{{ APPLICATION_USER }}"
when: APPLICATION_NAME == "edxapp" and item != "read_replica"
with_items: edxapp_databases.keys()
with_items: "{{ edxapp_databases.keys() }}"
- name: migrate to apply any unapplied migrations
shell: '{{ COMMAND_PREFIX }} run_migrations --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"'
......@@ -72,7 +72,7 @@
shell: '{{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} run_migrations --database "{{ item }}" --settings "{{ EDX_PLATFORM_SETTINGS }}" --output_file "{{ temp_output_dir.stdout }}/{{ migration_result }}"'
become_user: "{{ APPLICATION_USER }}"
when: APPLICATION_NAME == "edxapp" and item != "read_replica"
with_items: edxapp_databases.keys()
with_items: "{{ edxapp_databases.keys() }}"
- name: List all migration files
action: "command ls -1 {{ temp_output_dir.stdout }}"
......
......@@ -13,25 +13,27 @@
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
- fail:
msg: "You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
- fail:
msg: "public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
copy:
content: "{{ ubuntu_public_keys[public_key] }}"
dest: "{{ mktemp.stdout }}"
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file:
path: "{{ mktemp.stdout }}"
state: absent
- lineinfile:
dest: "{{ keyfile }}"
line: "{{ ubuntu_public_keys[public_key] }}"
- file:
path: "{{ keyfile }}"
owner: "{{ owner }}"
mode: 0600
......@@ -14,7 +14,8 @@
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -26,8 +27,9 @@
become: False
when: elb_pre_post
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- debug:
var: "{{ ansible_ec2_local_ipv4 }}"
with_items: "{{ list.results }}"
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }}
when: hostvars.keys()[0] != ansible_ec2_local_ipv4
......@@ -39,10 +41,9 @@
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -47,11 +47,10 @@
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
#
......
......@@ -13,9 +13,9 @@
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
lineinfile:
dest: /edx/app/edx_ansible/server-vars.yml
line: "{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
......@@ -24,9 +24,9 @@
tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
lineinfile:
dest: /edx/app/edx_ansible/server-vars.yml
line: "{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
......@@ -35,6 +35,5 @@
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform {{ edxapp_version }}
shell: "/edx/bin/update edx-platform {{ edxapp_version }}"
tags: update_edxapp_mysql_host
......@@ -53,27 +53,27 @@
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.name}}
state={{ item.state }}
encoding={{ item.encoding }}
login_host={{ item.login_host }}
login_user={{ item.login_user }}
login_password={{ item.login_password }}
with_items: databases
mysql_db:
db: "{{ item.name}}"
state: "{{ item.state }}"
encoding: "{{ item.encoding }}"
login_host: "{{ item.login_host }}"
login_user: "{{ item.login_user }}"
login_password: "{{ item.login_password }}"
with_items: "{{ databases }}"
tags:
- dbs
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.name }}"
priv="{{ '/'.join(item.privileges) }}"
password="{{ item.password }}"
host={{ item.host }}
login_host={{ item.login_host }}
login_user={{ item.login_user }}
login_password={{ item.login_password }}
append_privs=yes
with_items: database_users
mysql_user:
name: "{{ item.name }}"
priv: "{{ '/'.join(item.privileges) }}"
password: "{{ item.password }}"
host: "{{ item.host }}"
login_host: "{{ item.login_host }}"
login_user: "{{ item.login_user }}"
login_password: "{{ item.login_password }}"
append_privs: yes
with_items: "{{ database_users }}"
tags:
- users
......@@ -41,4 +41,4 @@
roles: "{{ item.roles }}"
state: present
replica_set: "{{ repl_set }}"
with_items: MONGO_USERS
with_items: "{{ MONGO_USERS }}"
......@@ -21,7 +21,7 @@
dns_zone: sandbox.edx.org
name_tag: sandbox-temp
elb: false
vpc_subnet_id: subnet-cd867aba
ec2_vpc_subnet_id: subnet-cd867aba
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
......@@ -34,7 +34,7 @@
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
vpc_subnet_id: "{{ ec2_vpc_subnet_id }}"
assign_public_ip: yes
terminate_instance: true
instance_profile_name: sandbox
......@@ -47,10 +47,10 @@
elb: false
pre_tasks:
- name: Wait for cloud-init to finish
wait_for: >
path=/var/log/cloud-init.log
timeout=15
search_regex="final-message"
wait_for:
path: /var/log/cloud-init.log
timeout: 15
search_regex: "final-message"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/xqueue/defaults/main.yml
......
......@@ -8,9 +8,9 @@
- edxapp
tasks:
- name: migrate lms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws
shell: "python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......@@ -21,9 +21,9 @@
tags:
- always
- name: migrate cms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws
shell: "python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
......@@ -12,7 +12,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -29,16 +30,16 @@
- oraclejdk
- elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -14,11 +14,11 @@
- name: stop certs service
service: name="certificates" state="stopped"
- name: checkout code
git_2_0_1: >
repo="{{ repo_url }}"
dest="{{ repo_path }}"
version="{{ certificates_version }}"
accept_hostkey=yes
git_2_0_1:
repo: "{{ repo_url }}"
dest: "{{ repo_path }}"
version: "{{ certificates_version }}"
accept_hostkey: yes
environment:
GIT_SSH: "{{ git_ssh_script }}"
- name: install requirements
......@@ -29,11 +29,11 @@
# Need to do this because the www-data user is not properly setup
# and can't run ssh.
- name: change owner to www-data
file: >
path="{{ repo_path }}"
owner="www-data"
group="www-data"
recurse=yes
state="directory"
file:
path: "{{ repo_path }}"
owner: "www-data"
group: "www-data"
recurse: yes
state: "directory"
- name: start certs service
service: name="certificates" state="started"
......@@ -86,7 +86,7 @@
manage_group {{ item.name | quote }}
{% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %}
{% if item.get('remove') %}--remove{% endif %}
with_items: django_groups
with_items: "{{ django_groups }}"
- name: Manage users
shell: >
......@@ -98,6 +98,6 @@
{% if item.get('staff') %}--staff{% endif %}
{% if item.get('unusable_password') %}--unusable-password{% endif %}
{% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %}
with_items: django_users
with_items: "{{ django_users }}"
register: manage_users_result
failed_when: (manage_users_result | failed) and not (ignore_user_creation_errors | bool)
......@@ -72,7 +72,7 @@
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
with_items: "{{ mongodb_debian_pkgs }}"
- name: wait for mongo server to start
wait_for:
port: 27017
......
......@@ -48,7 +48,7 @@
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
with_items: "{{ mongodb_debian_pkgs }}"
- name: wait for mongo server to start
wait_for:
port: 27017
......
......@@ -9,5 +9,6 @@
- "roles/ecommerce/defaults/main.yml"
- "roles/programs/defaults/main.yml"
- "roles/credentials/defaults/main.yml"
- "roles/discovery/defaults/main.yml"
roles:
- oauth_client_setup
......@@ -46,9 +46,7 @@
dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}"
register: xblock_config_file
- name: Manage xblock configurations
shell: >
{{ python_path }} {{ manage_path }} lms --settings=aws
populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}
shell: "{{ python_path }} {{ manage_path }} lms --settings=aws populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}"
register: command_result
changed_when: "'Import complete, 0 new entries created' not in command_result.stdout"
- debug: msg="{{ command_result.stdout }}"
......
......@@ -17,7 +17,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -32,16 +33,16 @@
- aws
- rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: ansible_ec2_instance_id
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -17,22 +17,21 @@
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
- shell: "grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}"
- shell: "while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}"
args:
executable: /bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- file:
path: "{{ keyfile }}"
owner: "{{ owner }}"
mode: 0600
- file:
path: "{{ mktemp.stdout }}"
state: absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
......
......@@ -7,6 +7,6 @@
- roles/supervisor/defaults/main.yml
tasks:
- name: supervisor | restart supervisor
service: >
name={{ supervisor_service }}
state=restarted
service:
name: "{{ supervisor_service }}"
state: restarted
......@@ -12,8 +12,8 @@
- name: Set hostname
hostname: name={{ hostname_fqdn.split('.')[0] }}
- name: Update /etc/hosts
lineinfile: >
dest=/etc/hosts
regexp="^127\.0\.1\.1"
line="127.0.1.1{{'\t'}}{{ hostname_fqdn.split('.')[0] }}{{'\t'}}{{ hostname_fqdn }}{{'\t'}}localhost"
state=present
lineinfile:
dest: /etc/hosts
regexp: "^127\\.0\\.1\\.1"
line: "127.0.1.1{{ '\t' }}{{ hostname_fqdn.split('.')[0] }}{{ '\t' }}{{ hostname_fqdn }}{{ '\t' }}localhost"
state: present
......@@ -11,7 +11,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -25,16 +26,16 @@
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -14,7 +14,8 @@
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb
......@@ -38,16 +39,16 @@
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
- debug:
var: "{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
ec2_elbs: "{{ ec2_elbs }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
become: False
when: elb_pre_post
......@@ -96,22 +96,10 @@ def main():
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None),
tags=dict(default=None, type='dict'),
)
)
tags_param = module.params.get('tags')
tags = {}
if isinstance(tags_param, list):
for item in module.params.get('tags'):
for k,v in item.iteritems():
tags[k] = v
elif isinstance(tags_param, dict):
tags = tags_param
else:
module.fail_json(msg="Invalid format for tags")
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
......@@ -137,7 +125,7 @@ def main():
instances = []
instance_ids = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
for tag, value in module.params.get('tags').iteritems()}):
for inst in res.instances:
if inst.state == "running":
instances.append({k: v for k, v in inst.__dict__.iteritems()
......
......@@ -66,7 +66,7 @@ tasks:
- name: tag my launched instances
local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
args:
tags:
Name: webserver
......@@ -76,7 +76,7 @@ tasks:
tasks:
- name: tag my instance
local_action: ec2_ntag resource={{ item.id }} region=us-east-1 state=present
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
args:
tags:
- Name: "{{ some_variable }}"
......@@ -101,7 +101,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(),
tags = dict(required=False, type='list'),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
......
......@@ -24,7 +24,7 @@
apt:
name: "{{ item }}"
state: present
with_items: ad_hoc_reporting_debian_pkgs
with_items: "{{ ad_hoc_reporting_debian_pkgs }}"
tags:
- install:system-requirements
......@@ -58,7 +58,7 @@
name: "{{ item }}"
state: present
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: ad_hoc_reporting_pip_pkgs
with_items: "{{ ad_hoc_reporting_pip_pkgs }}"
tags:
- install:app-requirements
......@@ -92,7 +92,7 @@
- scripts
- scripts:mysql
- install:code
with_items: AD_HOC_REPORTING_REPLICA_DB_HOSTS
with_items: "{{ AD_HOC_REPORTING_REPLICA_DB_HOSTS }}"
# These templates rely on there being a global
# read_only mongo user, you must override the default
......
......@@ -27,3 +27,6 @@
##
# Defaults for role add_user
#
#
#
dirs: []
......@@ -65,8 +65,7 @@
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode | default('0755') }}"
with_items: dirs
when: dirs is defined
with_items: "{{ dirs }}"
tags:
- install
- install:base
......@@ -33,42 +33,40 @@
#
- name: setup the analytics_api env file
template: >
src="edx/app/analytics_api/analytics_api_env.j2"
dest="{{ analytics_api_home }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
template:
src: "edx/app/analytics_api/analytics_api_env.j2"
dest: "{{ analytics_api_home }}/analytics_api_env"
owner: "{{ analytics_api_user }}"
group: "{{ analytics_api_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: "add gunicorn configuration file"
template: >
src=edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest={{ analytics_api_home }}/analytics_api_gunicorn.py
template:
src: edx/app/analytics_api/analytics_api_gunicorn.py.j2
dest: "{{ analytics_api_home }}/analytics_api_gunicorn.py"
become_user: "{{ analytics_api_user }}"
tags:
- install
- install:configuration
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state=present
pip:
requirements: "{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}"
state: present
become_user: "{{ analytics_api_user }}"
with_items: analytics_api_requirements
with_items: "{{ analytics_api_requirements }}"
tags:
- install
- install:app-requirements
- name: migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}'
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput
shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python ./manage.py migrate --noinput"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......@@ -77,9 +75,9 @@
- migrate:db
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput
shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py collectstatic --noinput"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
tags:
......@@ -87,40 +85,44 @@
- assets:gather
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}
shell: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/python manage.py set_api_key {{ item.key }} {{ item.value }}"
args:
chdir: "{{ analytics_api_code_dir }}"
become_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
with_dict: "{{ ANALYTICS_API_USERS }}"
tags:
- manage
- manage:app-users
- name: write out the supervisor wrapper
template: >
src=edx/app/analytics_api/analytics_api.sh.j2
dest={{ analytics_api_home }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
template:
src: edx/app/analytics_api/analytics_api.sh.j2
dest: "{{ analytics_api_home }}/{{ analytics_api_service_name }}.sh"
mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags:
- install
- install:configuration
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: edx/app/supervisor/conf.d.available/analytics_api.conf.j2
dest: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
file:
src: "{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
dest: "{{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
......@@ -134,10 +136,10 @@
- manage:start
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link
file:
src: "{{ analytics_api_home }}/venvs/{{ analytics_api_service_name }}/bin/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state: link
with_items:
- python
- pip
......@@ -147,10 +149,10 @@
- install:base
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state=link
file:
src: "{{ analytics_api_code_dir }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics_api"
state: link
with_items:
- manage.py
tags:
......@@ -158,11 +160,11 @@
- install:base
- name: restart analytics_api
supervisorctl: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name={{ analytics_api_service_name }}
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
name: "{{ analytics_api_service_name }}"
when: not disable_edx_services
become_user: "{{ supervisor_service_user }}"
tags:
......
......@@ -3,13 +3,13 @@
#
# Tasks for role {{ role_name }}
#
#
# Overview:
#
#
#
# Dependencies:
#
#
#
# Example play:
#
#
......@@ -149,7 +149,7 @@
tags:
- install
- install:app-requirements
- name: run collectstatic
command: make static
args:
......@@ -161,7 +161,7 @@
- assets:gather
- name: restart the application
supervisorctl:
supervisorctl:
state: restarted
supervisorctl_path: "{{ '{{' }} supervisor_ctl }}"
config: "{{ '{{' }} supervisor_cfg }}"
......@@ -173,20 +173,24 @@
- manage:start
- name: Copying nginx configs for {{ role_name }}
template: >
src=edx/app/nginx/sites-available/{{ role_name }}.j2
dest={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}
owner=root group={{ '{{' }} common_web_user }} mode=0640
template:
src: "edx/app/nginx/sites-available/{{ role_name }}.j2"
dest: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
owner: root
group: "{{ '{{' }} common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for {{ role_name }}
file: >
src={{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}
dest={{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}
state=link owner=root group=root
file:
src: "{{ '{{' }} nginx_sites_available_dir }}/{{ role_name }}"
dest: "{{ '{{' }} nginx_sites_enabled_dir }}/{{ role_name }}"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
......
......@@ -23,41 +23,41 @@
- name: install antivirus system packages
apt: pkg={{ item }} install_recommends=yes state=present
with_items: antivirus_debian_pkgs
with_items: "{{ antivirus_debian_pkgs }}"
- name: create antivirus scanner user
user: >
name="{{ antivirus_user }}"
home="{{ antivirus_app_dir }}"
createhome=no
shell=/bin/false
user:
name: "{{ antivirus_user }}"
home: "{{ antivirus_app_dir }}"
createhome: no
shell: /bin/false
- name: create antivirus app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ antivirus_user }}"
group="{{ antivirus_user }}"
file:
path: "{{ item }}"
state: directory
owner: "{{ antivirus_user }}"
group: "{{ antivirus_user }}"
with_items:
- "{{ antivirus_app_dir }}"
- "{{ antivirus_app_dir }}/data"
- name: install antivirus s3 scanner script
template: >
src=s3_bucket_virus_scan.sh.j2
dest={{ antivirus_app_dir }}/s3_bucket_virus_scan.sh
mode=0555
owner={{ antivirus_user }}
group={{ antivirus_user }}
template:
src: s3_bucket_virus_scan.sh.j2
dest: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh"
mode: "0555"
owner: "{{ antivirus_user }}"
group: "{{ antivirus_user }}"
- name: install antivirus s3 scanner cronjob
cron: >
name="antivirus-{{ item }}"
job="{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup=yes
cron_file=antivirus-{{ item }}
user={{ antivirus_user }}
hour="*"
minute="0"
day="*"
with_items: ANTIVIRUS_BUCKETS
cron:
name: "antivirus-{{ item }}"
job: "{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup: yes
cron_file: "antivirus-{{ item }}"
user: "{{ antivirus_user }}"
hour: "*"
minute: "0"
day: "*"
with_items: "{{ ANTIVIRUS_BUCKETS }}"
......@@ -102,8 +102,8 @@
file:
path: "{{ item.item }}"
mode: "0644"
when: >
when:
vagrant_home_dir.stat.exists == false and
ansible_distribution in common_debian_variants and
item.stat.exists
with_items: motd_files_exist.results
with_items: "{{ motd_files_exist.results }}"
# Install browsermob-proxy, which is used for page performance testing with bok-choy
---
- name: get zip file
get_url: >
url={{ browsermob_proxy_url }}
dest=/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
get_url:
url: "{{ browsermob_proxy_url }}"
dest: "/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
register: download_browsermob_proxy
- name: unzip into /var/tmp/
shell: >
unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
chdir=/var/tmp
shell: "unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
args:
chdir: "/var/tmp"
when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/
shell: >
mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy
shell: "mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy"
when: download_browsermob_proxy.changed
- name: change permissions of main script
file: >
path=/etc/browsermob-proxy/bin/browsermob-proxy
mode=0755
file:
path: "/etc/browsermob-proxy/bin/browsermob-proxy"
mode: 0755
when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy
copy: >
src=browsermob-proxy
dest=/usr/local/bin/browsermob-proxy
copy:
src: browsermob-proxy
dest: /usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of wrapper script
file: >
path=/usr/local/bin/browsermob-proxy
mode=0755
file:
path: /usr/local/bin/browsermob-proxy
mode: 0755
when: download_browsermob_proxy.changed
......@@ -8,12 +8,12 @@
- name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb
with_items: browser_s3_deb_pkgs
with_items: "{{ browser_s3_deb_pkgs }}"
- name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed
with_items: browser_s3_deb_pkgs
with_items: "{{ browser_s3_deb_pkgs }}"
# Because the source location has been deprecated, we need to
# ensure it does not interfere with subsequent apt commands
......@@ -50,15 +50,15 @@
- "chromedriver.stat.mode == '0755'"
- name: download PhantomJS
get_url: >
url={{ phantomjs_url }}
dest=/var/tmp/{{ phantomjs_tarfile }}
get_url:
url: "{{ phantomjs_url }}"
dest: "/var/tmp/{{ phantomjs_tarfile }}"
register: download_phantom_js
- name: unpack the PhantomJS tarfile
shell: >
tar -xjf /var/tmp/{{ phantomjs_tarfile }}
chdir=/var/tmp
shell: "tar -xjf /var/tmp/{{ phantomjs_tarfile }}"
args:
chdir: "/var/tmp"
when: download_phantom_js.changed
- name: move PhantomJS binary to /usr/local
......
......@@ -30,7 +30,7 @@
file:
path: "{{ cassandra_data_dir_prefix }}/{{ item }}"
state: directory
with_items: cassandra_data_dirs
with_items: "{{ cassandra_data_dirs }}"
- name: Mount ephemeral disks
mount:
......@@ -49,7 +49,7 @@
path: "{{ cassandra_data_dir_prefix }}/{{ item }}"
owner: "{{ cassandra_user }}"
group: "{{ cassandra_group }}"
with_items: cassandra_data_dirs
with_items: "{{ cassandra_data_dirs }}"
- name: Add the datastax repository apt-key
apt_key:
......
......@@ -4,3 +4,4 @@
# role depends. This is to allow sharing vars without creating
# side-effects. Any vars requred by this role should be added to
# common_vars/defaults/main.yml
#
......@@ -3,7 +3,7 @@
fail:
msg: "Configuration Sources Checking (COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING) is enabled, you must define {{ item }}"
when: COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING and ({{ item }} is not defined or {{ item }} != True)
with_items: COMMON_EXTRA_CONFIGURATION_SOURCES
with_items: "{{ COMMON_EXTRA_CONFIGURATION_SOURCES }}"
tags:
- "install"
- "install:configuration"
......
......@@ -10,13 +10,13 @@
#
#
# Tasks for role credentials
#
#
# Overview:
#
#
#
# Dependencies:
#
#
#
# Example play:
#
#
......@@ -43,9 +43,9 @@
- install:app-requirements
- name: create nodeenv
shell: >
creates={{ credentials_nodeenv_dir }}
{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt
shell: "{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --prebuilt"
args:
creates: "{{ credentials_nodeenv_dir }}"
become_user: "{{ credentials_user }}"
tags:
- install
......@@ -74,9 +74,12 @@
# var should have more permissive permissions than the rest
- name: create credentials var dirs
file: >
path="{{ item }}" state=directory mode=0775
owner="{{ credentials_user }}" group="{{ common_web_group }}"
file:
path: "{{ item }}"
state: directory
mode: 0775
owner: "{{ credentials_user }}"
group: "{{ common_web_group }}"
with_items:
- "{{ CREDENTIALS_MEDIA_ROOT }}"
tags:
......@@ -180,7 +183,7 @@
- assets:gather
- name: restart the application
supervisorctl:
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
......@@ -192,20 +195,24 @@
- manage:start
- name: Copying nginx configs for credentials
template: >
src=edx/app/nginx/sites-available/credentials.j2
dest={{ nginx_sites_available_dir }}/credentials
owner=root group={{ common_web_user }} mode=0640
template:
src: edx/app/nginx/sites-available/credentials.j2
dest: "{{ nginx_sites_available_dir }}/credentials"
owner: root
group: "{{ common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for credentials
file: >
src={{ nginx_sites_available_dir }}/credentials
dest={{ nginx_sites_enabled_dir }}/credentials
state=link owner=root group=root
file:
src: "{{ nginx_sites_available_dir }}/credentials"
dest: "{{ nginx_sites_enabled_dir }}/credentials"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
......
---
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_agent_version: '1:5.1.1-546'
datadog_agent_version: '1:5.10.1-1'
datadog_apt_key: "0x226AE980C7A7DA52"
datadog_debian_pkgs:
......
---
- name: check out the demo course
git_2_0_1: >
dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
accept_hostkey=yes
git_2_0_1:
dest: "{{ demo_code_dir }}"
repo: "{{ demo_repo }}"
version: "{{ demo_version }}"
accept_hostkey: yes
become_user: "{{ demo_edxapp_user }}"
register: demo_checkout
- name: import demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: create some test users
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items: demo_test_users
with_items: "{{ demo_test_users }}"
when: demo_checkout.changed
- name: create staff user
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms manage_user staff staff@example.com --initial-password-hash {{ demo_hashed_password | quote }} --staff"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when:
- demo_checkout.changed
- DEMO_CREATE_STAFF_USER
- name: enroll test users in the demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item.email }} -c {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items:
- "{{ demo_test_users }}"
......@@ -43,15 +45,15 @@
- name: add test users to the certificate whitelist
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
with_items: demo_test_users
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
with_items: "{{ demo_test_users }}"
when: demo_checkout.changed
- name: seed the forums for the demo course
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
with_items: demo_test_users
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
with_items: "{{ demo_test_users }}"
when: demo_checkout.changed
......@@ -31,8 +31,10 @@
# - demo
- name: create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ demo_edxapp_user }}" group="{{ common_web_group }}"
file:
path: "{{ demo_app_dir }}"
state: directory
owner: "{{ demo_edxapp_user }}"
group: "{{ common_web_group }}"
- include: deploy.yml tags=deploy
......@@ -77,9 +77,9 @@
- devstack:install
- name: create nodeenv
shell: >
creates={{ discovery_nodeenv_dir }}
{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt
shell: "{{ discovery_venv_dir }}/bin/nodeenv {{ discovery_nodeenv_dir }} --node={{ discovery_node_version }} --prebuilt"
args:
creates: "{{ discovery_nodeenv_dir }}"
become_user: "{{ discovery_user }}"
tags:
- install
......@@ -94,9 +94,9 @@
- install:app-requirements
- name: install bower dependencies
shell: >
chdir={{ discovery_code_dir }}
. {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false
shell: ". {{ discovery_nodeenv_bin }}/activate && {{ discovery_node_bin }}/bower install --production --config.interactive=false"
args:
chdir: "{{ discovery_code_dir }}"
become_user: "{{ discovery_user }}"
tags:
- install
......
......@@ -21,16 +21,20 @@ ECOMMERCE_NGINX_PORT: "18130"
ECOMMERCE_SSL_NGINX_PORT: 48130
ECOMMERCE_DEFAULT_DB_NAME: 'ecommerce'
ECOMMERCE_DATABASE_USER: "ecomm001"
ECOMMERCE_DATABASE_PASSWORD: "password"
ECOMMERCE_DATABASE_HOST: "localhost"
ECOMMERCE_DATABASE_PORT: 3306
ECOMMERCE_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ECOMMERCE_DEFAULT_DB_NAME }}'
USER: 'ecomm001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
USER: '{{ ECOMMERCE_DATABASE_USER }}'
PASSWORD: '{{ ECOMMERCE_DATABASE_PASSWORD }}'
HOST: '{{ ECOMMERCE_DATABASE_HOST }}'
PORT: '{{ ECOMMERCE_DATABASE_PORT }}'
ATOMIC_REQUESTS: true
CONN_MAX_AGE: 60
......
......@@ -84,11 +84,9 @@
- migrate:db
- name: Populate countries
shell: >
chdir={{ ecommerce_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries
shell: "DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }} DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }} {{ ecommerce_venv_dir }}/bin/python ./manage.py oscar_populate_countries"
args:
chdir: "{{ ecommerce_code_dir }}"
become_user: "{{ ecommerce_user }}"
environment: "{{ ecommerce_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......
......@@ -16,7 +16,7 @@
virtualenv: '{{ ecommerce_worker_home }}/venvs/{{ ecommerce_worker_service_name }}'
state: present
become_user: '{{ ecommerce_worker_user }}'
with_items: ecommerce_worker_requirements
with_items: "{{ ecommerce_worker_requirements }}"
- name: write out the supervisor wrapper
template:
......
......@@ -51,7 +51,7 @@
state: present
extra_args: "--exists-action w"
become_user: "{{ edx_notes_api_user }}"
with_items: edx_notes_api_requirements
with_items: "{{ edx_notes_api_requirements }}"
- name: Migrate
shell: >
......
......@@ -16,6 +16,7 @@
#
edx_service_name: edx_service
edx_service_repos: []
#
# OS packages
#
......
......@@ -127,18 +127,19 @@
action: ec2_facts
tags:
- to-remove
#old syntax - should be fixed
- name: Tag instance
ec2_tag_local: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
ec2_tag_local:
args:
resource: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}"
tags:
- Name: version:{{ edx_service_name }}
- Name: "version:{{ edx_service_name }}"
Value: "{{ item.0.DOMAIN }}/{{ item.0.PATH }}/{{ item.0.REPO }} {{ item.1.after |truncate(7,True,'') }}"
when: item.1.after is defined and COMMON_TAG_EC2_INSTANCE and edx_service_repos is defined
with_together:
- edx_service_repos
- code_checkout.results
- "{{ edx_service_repos }}"
- "{{ code_checkout.results }}"
tags:
- to-remove
......
......@@ -50,7 +50,7 @@
shell: /bin/bash
groups: "{{ themes_group }}"
append: yes
with_items: theme_users
with_items: "{{ theme_users }}"
when: theme_users is defined
- name: update .bashrc to set umask value
......
......@@ -499,8 +499,8 @@ EDXAPP_CELERY_WORKERS:
monitor: False
max_tasks_per_child: 1
EDXAPP_RECALCULATE_GRADES_ROUTING_KEY: 'edx.lms.core.default'
EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\\\1')|list }}"
EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\\\1')|list }}"
EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\1')|list }}"
EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\1')|list }}"
EDXAPP_DEFAULT_CACHE_VERSION: "1"
EDXAPP_OAUTH_ENFORCE_SECURE: True
......@@ -802,8 +802,6 @@ edxapp_generic_auth_config: &edxapp_generic_auth
generic_cache_config: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key'
KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
generic_env_config: &edxapp_generic_env
ECOMMERCE_PUBLIC_URL_ROOT: "{{ EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT }}"
......@@ -888,23 +886,29 @@ generic_env_config: &edxapp_generic_env
default:
<<: *default_generic_cache
KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
VERSION: "{{ EDXAPP_DEFAULT_CACHE_VERSION }}"
general:
<<: *default_generic_cache
KEY_PREFIX: 'general'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
mongo_metadata_inheritance:
<<: *default_generic_cache
KEY_PREFIX: 'mongo_metadata_inheritance'
TIMEOUT: 300
LOCATION: "{{ EDXAPP_MEMCACHE }}"
staticfiles:
<<: *default_generic_cache
KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general"
LOCATION: "{{ EDXAPP_MEMCACHE }}"
configuration:
<<: *default_generic_cache
KEY_PREFIX: "{{ ansible_hostname|default('configuration') }}"
LOCATION: "{{ EDXAPP_MEMCACHE }}"
celery:
<<: *default_generic_cache
KEY_PREFIX: 'celery'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
TIMEOUT: "7200"
course_structure_cache:
<<: *default_generic_cache
......
......@@ -110,10 +110,10 @@
- install:app-requirements
- name: Create the virtualenv to install the Python requirements
command: >
virtualenv {{ edxapp_venv_dir }}
chdir={{ edxapp_code_dir }}
creates={{ edxapp_venv_dir }}/bin/pip
command: "virtualenv {{ edxapp_venv_dir }}"
args:
chdir: "{{ edxapp_code_dir }}"
creates: "{{ edxapp_venv_dir }}/bin/pip"
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
tags:
......@@ -134,9 +134,9 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
command: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}
chdir={{ edxapp_code_dir }}
command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item.item }}"
args:
chdir: "{{ edxapp_code_dir }}"
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
when: item.stat.exists
......@@ -151,9 +151,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ private_requirements_file }}"
become_user: "{{ edxapp_user }}"
......@@ -172,7 +172,7 @@
extra_args: "--exists-action w {{ item.extra_args|default('') }}"
virtualenv: "{{ edxapp_venv_dir }}"
state: present
with_items: EDXAPP_EXTRA_REQUIREMENTS
with_items: "{{ EDXAPP_EXTRA_REQUIREMENTS }}"
become_user: "{{ edxapp_user }}"
tags:
- install
......@@ -197,9 +197,9 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}"
......@@ -211,8 +211,7 @@
- install:app-requirements
- name: create nodeenv
shell: >
{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt
shell: "{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt"
args:
creates: "{{ edxapp_nodeenv_dir }}"
tags:
......@@ -223,8 +222,7 @@
# This needs to be done as root since npm is weird about
# chown - https://github.com/npm/npm/issues/3565
- name: Set the npm registry
shell: >
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'"
args:
creates: "{{ edxapp_app_dir }}/.npmrc"
environment: "{{ edxapp_environment }}"
......@@ -279,9 +277,9 @@
- install:app-requirements
- name: code sandbox | Install sandbox requirements into sandbox venv
shell: >
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}
chdir={{ edxapp_code_dir }}
shell: "{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ item }}"
args:
chdir: "{{ edxapp_code_dir }}"
with_items:
- "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}"
......
......@@ -3,8 +3,11 @@
template:
src: "{{ item[0] }}.{{ item[1] }}.json.j2"
dest: "{{ edxapp_app_dir }}/{{ item[0] }}.{{ item[1] }}.json"
become_user: "{{ edxapp_user }}"
with_nested:
owner: "{{ edxapp_user }}"
group: "{{ common_web_group }}"
mode: 0640
become: true
with_nested:
- "{{ service_variants_enabled }}"
- [ 'env', 'auth' ]
tags:
......@@ -17,7 +20,10 @@
template:
src: "{{ item[0] }}.{{ item[1] }}.yaml.j2"
dest: "{{ EDXAPP_CFG_DIR }}/{{ item[0] }}.{{ item[1] }}.yaml"
become_user: "{{ edxapp_user }}"
owner: "{{ edxapp_user }}"
group: "{{ common_web_group }}"
mode: 0640
become: true
with_nested:
- "{{ service_variants_enabled }}"
- [ 'env', 'auth' ]
......@@ -34,6 +40,7 @@
dest: "{{ supervisor_available_dir }}/{{ item }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ supervisor_user }}"
mode: 0644
become_user: "{{ supervisor_user }}"
with_items: "{{ service_variants_enabled }}"
tags:
......@@ -47,6 +54,7 @@
dest: "{{ supervisor_available_dir }}/{{ item }}"
owner: "{{ supervisor_user }}"
group: "{{ supervisor_user }}"
mode: 0644
become_user: "{{ supervisor_user }}"
with_items:
- edxapp.conf
......@@ -59,6 +67,7 @@
template:
src: "{{ item }}_gunicorn.py.j2"
dest: "{{ edxapp_app_dir }}/{{ item }}_gunicorn.py"
mode: 0644
become_user: "{{ edxapp_user }}"
with_items: "{{ service_variants_enabled }}"
tags:
......
......@@ -19,8 +19,8 @@ edxlocal_databases:
edxlocal_database_users:
- {
db: "{{ ECOMMERCE_DEFAULT_DB_NAME | default(None) }}",
user: "{{ ECOMMERCE_DATABASES.default.USER | default(None) }}",
pass: "{{ ECOMMERCE_DATABASES.default.PASSWORD | default(None) }}"
user: "{{ ECOMMERCE_DATABASE_USER | default(None) }}",
pass: "{{ ECOMMERCE_DATABASE_PASSWORD | default(None) }}"
}
- {
db: "{{ INSIGHTS_DATABASE_NAME | default(None) }}",
......@@ -44,8 +44,8 @@ edxlocal_database_users:
}
- {
db: "{{ PROGRAMS_DEFAULT_DB_NAME | default(None) }}",
user: "{{ PROGRAMS_DATABASES.default.USER | default(None) }}",
pass: "{{ PROGRAMS_DATABASES.default.PASSWORD | default(None) }}"
user: "{{ PROGRAMS_DATABASE_USER | default(None) }}",
pass: "{{ PROGRAMS_DATABASE_PASSWORD | default(None) }}"
}
- {
db: "{{ ANALYTICS_PIPELINE_OUTPUT_DATABASE_NAME | default(None) }}",
......
......@@ -21,30 +21,27 @@
#
#
- name: download elasticsearch plugin
shell: >
./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y
shell: "./npi fetch {{ ELASTICSEARCH_MONITOR_PLUGIN }} -y"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
creates: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}.compressed"
become_user: "{{ NEWRELIC_USER }}"
- name: prepare elasticsearch plugin
shell: >
./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n
shell: "./npi prepare {{ ELASTICSEARCH_MONITOR_PLUGIN }} -n"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure elasticsearch plugin
template: >
src=plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2
dest={{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json
owner={{ NEWRELIC_USER }}
mode=0644
template:
src: "plugins/me.snov.newrelic-elasticsearch/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json.j2"
dest: "{{ NEWRELIC_NPI_PREFIX }}/plugins/{{ ELASTICSEARCH_MONITOR_PLUGIN }}/newrelic-elasticsearch-plugin-1.4.1/config/plugin.json"
owner: "{{ NEWRELIC_USER }}"
mode: 0644
- name: register/start elasticsearch plugin
shell: >
./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}
shell: "./npi add-service {{ ELASTICSEARCH_MONITOR_PLUGIN }} --start --user={{ NEWRELIC_USER }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "root"
......
......@@ -30,37 +30,39 @@
---
- name: install pip packages
pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs
with_items: "{{ gh_mirror_pip_pkgs }}"
- name: install debian packages
apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present
update_cache=yes
apt:
pkg: '{{ ",".join(gh_mirror_debian_pkgs) }}'
state: present
update_cache: yes
- name: create gh_mirror user
user: >
name={{ gh_mirror_user }}
state=present
user:
name: "{{ gh_mirror_user }}"
state: present
- name: create the gh_mirror data directory
file: >
path={{ gh_mirror_data_dir }}
state=directory
owner={{ gh_mirror_user }}
group={{ gh_mirror_group }}
file:
path: "{{ gh_mirror_data_dir }}"
state: directory
owner: "{{ gh_mirror_user }}"
group: "{{ gh_mirror_group }}"
- name: create the gh_mirror app directory
file: >
path={{ gh_mirror_app_dir }}
state=directory
file:
path: "{{ gh_mirror_app_dir }}"
state: directory
- name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts
copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }}
with_items: gh_mirror_app_files
copy:
src: "{{ item }}"
dest: "{{ gh_mirror_app_dir }}/{{ item }}"
with_items: "{{ gh_mirror_app_files }}"
- name: creating cron job to update repos
cron:
......
......@@ -23,8 +23,8 @@
- name: Set git fetch.prune to ignore deleted remote refs
shell: git config --global fetch.prune true
become_user: "{{ repo_owner }}"
when: GIT_REPOS is defined
no_log: true
when: repo_owner is defined and GIT_REPOS|length > 0
tags:
- install
- install:code
......@@ -33,7 +33,7 @@
fail:
msg: '{{ GIT_REPOS.PROTOCOL }} must be "https" or "ssh"'
when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined
with_items: GIT_REPOS
with_items: "{{ GIT_REPOS }}"
no_log: true
tags:
- install
......@@ -48,7 +48,7 @@
group: "{{ repo_group }}"
mode: "0600"
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS
with_items: "{{ GIT_REPOS }}"
no_log: true
tags:
- install
......@@ -64,7 +64,7 @@
become_user: "{{ repo_owner }}"
register: code_checkout
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS
with_items: "{{ GIT_REPOS }}"
no_log: true
tags:
- install
......@@ -78,7 +78,7 @@
become_user: "{{ repo_owner }}"
register: code_checkout
when: item.PROTOCOL == "https" and GIT_REPOS is defined
with_items: GIT_REPOS
with_items: "{{ GIT_REPOS }}"
no_log: true
tags:
- install
......@@ -89,7 +89,7 @@
dest: "{{ git_home }}/.ssh/{{ item.REPO }}"
state: absent
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS
with_items: "{{ GIT_REPOS }}"
no_log: true
tags:
- install
......
......@@ -15,9 +15,9 @@
#
#
- name: restart gitreload
supervisorctl: >
name=gitreload
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
supervisorctl:
name: gitreload
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
state: restarted
when: not disable_edx_services
......@@ -3,32 +3,32 @@
- name: clone all course repos
git_2_0_1: dest={{ GITRELOAD_REPODIR }}/{{ item.name }} repo={{ item.url }} version={{ item.commit }}
become_user: "{{ common_web_user }}"
with_items: GITRELOAD_REPOS
with_items: "{{ GITRELOAD_REPOS }}"
- name: do import of courses
shell: >
executable=/bin/bash
chdir="{{ edxapp_code_dir }}"
SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/python manage.py lms --settings=aws git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }}
shell: "SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/python manage.py lms --settings=aws git_add_course {{ item.url }} {{ GITRELOAD_REPODIR }}/{{ item.name }}"
args:
executable: "/bin/bash"
chdir: "{{ edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items: GITRELOAD_REPOS
with_items: "{{ GITRELOAD_REPOS }}"
- name: change ownership on repos for access by edxapp and www-data
file: >
path={{ GITRELOAD_REPODIR }}
state=directory
owner={{ common_web_user }}
owner={{ common_web_group }}
recurse=yes
file:
path: "{{ GITRELOAD_REPODIR }}"
state: directory
owner: "{{ common_web_user }}"
owner: "{{ common_web_group }}"
recurse: yes
- name: change group on repos if using devstack
file: >
path={{ GITRELOAD_REPODIR }}
state=directory
group={{ edxapp_user }}
recurse=yes
file:
path: "{{ GITRELOAD_REPODIR }}"
state: directory
group: "{{ edxapp_user }}"
recurse: yes
when: devstack
- name: change mode on repos with using devstack
command: chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }}
command: "chmod -R o=rwX,g=srwX,o=rX {{ GITRELOAD_REPODIR }}"
when: devstack
- name: create ssh dir for the content repos key
file: path=~/.ssh state=directory mode=0700
file:
path: "~/.ssh"
state: "directory"
mode: "0700"
become_user: "{{ common_web_user }}"
- name: install ssh key for the content repos
copy: content="{{ GITRELOAD_GIT_IDENTITY }}" dest=~/.ssh/id_rsa mode=0600
copy:
content: "{{ GITRELOAD_GIT_IDENTITY }}"
dest: "~/.ssh/id_rsa"
mode: "0600"
become_user: "{{ common_web_user }}"
- include: course_pull.yml
......@@ -11,35 +17,44 @@
tags: course_pull
- name: install gitreload
pip: >
name=git+{{ gitreload_repo }}@{{ gitreload_version }}#egg=gitreload
virtualenv={{ gitreload_venv }}
extra_args="--exists-action w"
pip:
name: "git+{{ gitreload_repo }}@{{ gitreload_version }}#egg=gitreload"
virtualenv: "{{ gitreload_venv }}"
extra_args: "--exists-action w"
become_user: "{{ gitreload_user }}"
notify: restart gitreload
- name: copy configuration
template: src=edx/app/gitreload/gr.env.json.j2 dest={{ gitreload_dir }}/gr.env.json
template:
src: "edx/app/gitreload/gr.env.json.j2"
dest: "{{ gitreload_dir }}/gr.env.json"
become_user: "{{ gitreload_user }}"
notify: restart gitreload
- name: "add gunicorn configuration file"
template: >
src=edx/app/gitreload/gitreload_gunicorn.py.j2 dest={{ gitreload_dir }}/gitreload_gunicorn.py
template:
src: "edx/app/gitreload/gitreload_gunicorn.py.j2"
dest: "{{ gitreload_dir }}/gitreload_gunicorn.py"
become_user: "{{ gitreload_user }}"
notify: restart gitreload
- name: "writing supervisor script"
template: >
src=edx/app/supervisor/conf.available.d/gitreload.conf.j2 dest={{ supervisor_available_dir }}/gitreload.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: "edx/app/supervisor/conf.available.d/gitreload.conf.j2"
dest: "{{ supervisor_available_dir }}/gitreload.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: "0644"
- name: "enable supervisor script"
file: >
src={{ supervisor_available_dir }}/gitreload.conf
dest={{ supervisor_cfg_dir }}/gitreload.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
state=link force=yes
file:
src: "{{ supervisor_available_dir }}/gitreload.conf"
dest: "{{ supervisor_cfg_dir }}/gitreload.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: "0644"
state: link
force: "yes"
when: not disable_edx_services
# call supervisorctl update. this reloads
......@@ -54,9 +69,9 @@
when: not disable_edx_services
- name: ensure gitreload is started
supervisorctl: >
name=gitreload
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
supervisorctl:
name: gitreload
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
state: started
when: not disable_edx_services
......@@ -38,45 +38,45 @@
- deploy
- name: create gitreload user
user: >
name="{{ gitreload_user }}"
home="{{ gitreload_dir }}"
createhome=no
shell=/bin/false
user:
name: "{{ gitreload_user }}"
home: "{{ gitreload_dir }}"
createhome: no
shell: /bin/false
- name: ensure home folder exists
file: >
path={{ gitreload_dir }}
state=directory
owner={{ gitreload_user }}
group={{ gitreload_user }}
file:
path: "{{ gitreload_dir }}"
state: directory
owner: "{{ gitreload_user }}"
group: "{{ gitreload_user }}"
- name: ensure repo dir exists
file: >
path={{ GITRELOAD_REPODIR }}
state=directory
owner={{ common_web_user }}
group={{ common_web_group }}
file:
path: "{{ GITRELOAD_REPODIR }}"
state: directory
owner: "{{ common_web_user }}"
group: "{{ common_web_group }}"
- name: grab ssh host keys
shell: ssh-keyscan {{ item }}
become_user: "{{ common_web_user }}"
with_items: GITRELOAD_HOSTS
with_items: "{{ GITRELOAD_HOSTS }}"
register: gitreload_repo_host_keys
- name: add host keys if needed to known_hosts
lineinfile: >
create=yes
dest=~/.ssh/known_hosts
line="{{ item.stdout }}"
lineinfile:
create: yes
dest: ~/.ssh/known_hosts
line: "{{ item.stdout }}"
become_user: "{{ common_web_user }}"
with_items: gitreload_repo_host_keys.results
with_items: "{{ gitreload_repo_host_keys.results }}"
- name: create a symlink for venv python
file: >
src="{{ gitreload_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.gitreload
state=link
file:
src: "{{ gitreload_venv_bin }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item }}.gitreload"
state: "link"
with_items:
- python
- pip
......
......@@ -24,7 +24,7 @@
# Ignoring error below so that we can move the data folder and have it be a link
- name: all | create folders
file: path={{ item.path }} state=directory
with_items: gluster_volumes
with_items: "{{ gluster_volumes }}"
when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
ignore_errors: yes
......@@ -32,52 +32,52 @@
- name: primary | create peers
command: gluster peer probe {{ item }}
with_items: gluster_peers
with_items: "{{ gluster_peers }}"
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: primary | create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes
with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here
tags: gluster
- name: primary | start volumes
command: gluster volume start {{ item.name }}
with_items: gluster_volumes
with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here
tags: gluster
- name: primary | set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes
with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: primary | set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes
with_items: "{{ gluster_volumes }}"
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: all | mount volume
mount: >
name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }}
fstype=glusterfs
state=mounted
opts=defaults,_netdev
with_items: gluster_volumes
mount:
name: "{{ item.mount_location }}"
src: "{{ gluster_primary_ip }}:{{ item.name }}"
fstype: glusterfs
state: mounted
opts: defaults,_netdev
with_items: "{{ gluster_volumes }}"
tags: gluster
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1
- name: all | sleep mount
lineinfile: >
dest=/etc/rc.local
line='sleep 5; /bin/mount -a'
regexp='sleep 5; /bin/mount -a'
insertbefore='exit 0'
lineinfile:
dest: /etc/rc.local
line: 'sleep 5; /bin/mount -a'
regexp: 'sleep 5; /bin/mount -a'
insertbefore: 'exit 0'
tags: gluster
......@@ -37,13 +37,13 @@
state: present
update_cache: true
cache_valid_time: 3600
with_items: GO_SERVER_BACKUP_APT_PKGS
with_items: "{{ GO_SERVER_BACKUP_APT_PKGS }}"
- name: install required python packages
pip:
name: "{{ item }}"
state: present
with_items: GO_SERVER_BACKUP_PIP_PKGS
with_items: "{{ GO_SERVER_BACKUP_PIP_PKGS }}"
- name: create the temp directory
file:
......
......@@ -52,7 +52,7 @@
state: present
update_cache: true
cache_valid_time: 3600
with_items: GO_SERVER_APT_PKGS
with_items: "{{ GO_SERVER_APT_PKGS }}"
- name: create go-server plugin directory
file:
......@@ -76,20 +76,17 @@
- { url: "{{ GO_SERVER_GITHUB_PR_PLUGIN_JAR_URL }}", md5: "{{ GO_SERVER_GITHUB_PR_PLUGIN_MD5 }}" }
- name: generate line for go-server password file for admin user
command: >
/usr/bin/htpasswd -nbs "{{ GO_SERVER_ADMIN_USERNAME }}" "{{ GO_SERVER_ADMIN_PASSWORD }}"
command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_ADMIN_USERNAME }}\" \"{{ GO_SERVER_ADMIN_PASSWORD }}\""
register: admin_user_password_line
when: GO_SERVER_ADMIN_USERNAME and GO_SERVER_ADMIN_PASSWORD
- name: generate line for go-server password file for backup user
command: >
/usr/bin/htpasswd -nbs "{{ GO_SERVER_BACKUP_USERNAME }}" "{{ GO_SERVER_BACKUP_PASSWORD }}"
command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_BACKUP_USERNAME }}\" \"{{ GO_SERVER_BACKUP_PASSWORD }}\""
register: backup_user_password_line
when: GO_SERVER_BACKUP_USERNAME and GO_SERVER_BACKUP_PASSWORD
- name: generate line for go-server password file for gomatic user
command: >
/usr/bin/htpasswd -nbs "{{ GO_SERVER_GOMATIC_USERNAME }}" "{{ GO_SERVER_GOMATIC_PASSWORD }}"
command: "/usr/bin/htpasswd -nbs \"{{ GO_SERVER_GOMATIC_USERNAME }}\" \"{{ GO_SERVER_GOMATIC_PASSWORD }}\""
register: gomatic_user_password_line
when: GO_SERVER_GOMATIC_USERNAME and GO_SERVER_GOMATIC_PASSWORD
......
......@@ -23,68 +23,84 @@
#
- name: install system packages
apt: >
pkg={{ item }}
state=present
with_items: hadoop_common_debian_pkgs
apt:
pkg: "{{ item }}"
state: present
with_items: "{{ hadoop_common_debian_pkgs }}"
- name: ensure group exists
group: name={{ hadoop_common_group }} system=yes state=present
group:
name: "{{ hadoop_common_group }}"
system: yes
state: present
- name: ensure user exists
user: >
name={{ hadoop_common_user }}
group={{ hadoop_common_group }}
home={{ HADOOP_COMMON_USER_HOME }} createhome=yes
shell=/bin/bash system=yes generate_ssh_key=yes
state=present
user:
name: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
home: "{{ HADOOP_COMMON_USER_HOME }}"
createhome: yes
shell: /bin/bash
system: yes
generate_ssh_key: yes
state: present
- name: own key authorized
file: >
src={{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub
dest={{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link
file:
src: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/id_rsa.pub"
dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/authorized_keys"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: ssh configured
template: >
src=hadoop_user_ssh_config.j2
dest={{ HADOOP_COMMON_USER_HOME }}/.ssh/config
mode=0600 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
template:
src: hadoop_user_ssh_config.j2
dest: "{{ HADOOP_COMMON_USER_HOME }}/.ssh/config"
mode: 0600
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
- name: ensure user is in sudoers
lineinfile: >
dest=/etc/sudoers state=present
regexp='^%hadoop ALL\=' line='%hadoop ALL=(ALL) NOPASSWD:ALL'
validate='visudo -cf %s'
lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%hadoop ALL\='
line: '%hadoop ALL=(ALL) NOPASSWD:ALL'
validate: 'visudo -cf %s'
- name: check if downloaded and extracted
stat: path={{ HADOOP_COMMON_HOME }}
register: extracted_hadoop_dir
- name: distribution downloaded
get_url: >
url={{ hadoop_common_dist.url }}
sha256sum={{ hadoop_common_dist.sha256sum }}
dest={{ hadoop_common_temporary_dir }}
get_url:
url: "{{ hadoop_common_dist.url }}"
sha256sum: "{{ hadoop_common_dist.sha256sum }}"
dest: "{{ hadoop_common_temporary_dir }}"
when: not extracted_hadoop_dir.stat.exists
- name: distribution extracted
shell: >
chdir={{ HADOOP_COMMON_USER_HOME }}
tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }}
shell: "tar -xzf {{ hadoop_common_temporary_dir }}/{{ hadoop_common_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hadoop-{{ HADOOP_COMMON_VERSION }}"
args:
chdir: "{{ HADOOP_COMMON_USER_HOME }}"
when: not extracted_hadoop_dir.stat.exists
- name: versioned directory symlink created
file: >
src={{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }}
dest={{ HADOOP_COMMON_HOME }}
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link
file:
src: "{{ HADOOP_COMMON_USER_HOME }}/hadoop-{{ HADOOP_COMMON_VERSION }}"
dest: "{{ HADOOP_COMMON_HOME }}"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: configuration installed
template: >
src={{ item }}.j2
dest={{ HADOOP_COMMON_CONF_DIR }}/{{ item }}
mode=0640 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
template:
src: "{{ item }}.j2"
dest: "{{ HADOOP_COMMON_CONF_DIR }}/{{ item }}"
mode: 0640
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
with_items:
- hadoop-env.sh
- mapred-site.xml
......@@ -93,79 +109,84 @@
- yarn-site.xml
- name: upstart scripts installed
template: >
src={{ item }}.j2
dest=/etc/init/{{ item }}
mode=0640 owner=root group=root
template:
src: "{{ item }}.j2"
dest: "/etc/init/{{ item }}"
mode: 0640
owner: root
group: root
with_items:
- hdfs.conf
- yarn.conf
- name: hadoop env file exists
file: >
path={{ hadoop_common_env }} state=touch
owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
file:
path: "{{ hadoop_common_env }}"
state: touch
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
- name: env vars sourced in bashrc
lineinfile: >
dest={{ HADOOP_COMMON_USER_HOME }}/.bashrc
state=present
regexp="^. {{ hadoop_common_env }}"
line=". {{ hadoop_common_env }}"
insertbefore=BOF
lineinfile:
dest: "{{ HADOOP_COMMON_USER_HOME }}/.bashrc"
state: present
regexp: "^. {{ hadoop_common_env }}"
line: ". {{ hadoop_common_env }}"
insertbefore: BOF
- name: env vars sourced in hadoop env
lineinfile: >
dest={{ hadoop_common_env }} state=present
regexp="^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh" line=". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
lineinfile:
dest: "{{ hadoop_common_env }}"
state: present
regexp: "^. {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
line: ". {{ HADOOP_COMMON_CONF_DIR }}/hadoop-env.sh"
- name: check if native libraries need to be built
stat: path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built
register: native_libs_built
- name: protobuf downloaded
get_url: >
url={{ hadoop_common_protobuf_dist.url }}
sha256sum={{ hadoop_common_protobuf_dist.sha256sum }}
dest={{ hadoop_common_temporary_dir }}
get_url:
url: "{{ hadoop_common_protobuf_dist.url }}"
sha256sum: "{{ hadoop_common_protobuf_dist.sha256sum }}"
dest: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists
- name: protobuf extracted
shell: >
chdir={{ hadoop_common_temporary_dir }}
tar -xzf {{ hadoop_common_protobuf_dist.filename }}
shell: "tar -xzf {{ hadoop_common_protobuf_dist.filename }}"
args:
chdir: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists
- name: protobuf installed
shell: >
chdir={{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}
./configure --prefix=/usr/local && make && make install
shell: "./configure --prefix=/usr/local && make && make install"
args:
chdir: "{{ hadoop_common_temporary_dir }}/protobuf-{{ HADOOP_COMMON_PROTOBUF_VERSION }}"
when: not native_libs_built.stat.exists
- name: native lib source downloaded
get_url: >
url={{ hadoop_common_native_dist.url }}
sha256sum={{ hadoop_common_native_dist.sha256sum }}
dest={{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }}
get_url:
url: "{{ hadoop_common_native_dist.url }}"
sha256sum: "{{ hadoop_common_native_dist.sha256sum }}"
dest: "{{ hadoop_common_temporary_dir }}/{{ hadoop_common_native_dist.filename }}"
when: not native_libs_built.stat.exists
- name: native lib source extracted
shell: >
chdir={{ hadoop_common_temporary_dir }}
tar -xzf {{ hadoop_common_native_dist.filename }}
shell: "tar -xzf {{ hadoop_common_native_dist.filename }}"
args:
chdir: "{{ hadoop_common_temporary_dir }}"
when: not native_libs_built.stat.exists
- name: native lib built
shell: >
chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project
mvn package -X -Pnative -DskipTests
shell: "mvn package -X -Pnative -DskipTests"
args:
chdir: "{{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project"
environment:
LD_LIBRARY_PATH: /usr/local/lib
when: not native_libs_built.stat.exists
- name: old native libs renamed
shell: >
mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}
shell: "mv {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.name }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item.new_name }}"
with_items:
- { name: libhadoop.a, new_name: libhadoop32.a }
- { name: libhadoop.so, new_name: libhadoop32.so }
......@@ -173,9 +194,9 @@
when: not native_libs_built.stat.exists
- name: new native libs installed
shell: >
shell: "chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}"
args:
chdir={{ hadoop_common_temporary_dir }}/hadoop-common-release-{{ HADOOP_COMMON_VERSION }}/hadoop-common-project/hadoop-common/target/native/target/usr/local/lib
chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ item }} && cp {{ item }} {{ HADOOP_COMMON_HOME }}/lib/native/{{ item }}
with_items:
- libhadoop.a
- libhadoop.so
......@@ -183,13 +204,17 @@
when: not native_libs_built.stat.exists
- name: native lib marker touched
file: >
path={{ HADOOP_COMMON_USER_HOME }}/.native_libs_built
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=touch
file:
path: "{{ HADOOP_COMMON_USER_HOME }}/.native_libs_built"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: touch
when: not native_libs_built.stat.exists
- name: service directory exists
file: >
path={{ HADOOP_COMMON_SERVICES_DIR }}
mode=0750 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
state=directory
file:
path: "{{ HADOOP_COMMON_SERVICES_DIR }}"
mode: "0750"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: directory
......@@ -22,9 +22,11 @@
notify: restart haproxy
- name: Server configuration file
template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644
template:
src: "{{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg"
owner: root
group: root
mode: 0644
notify: reload haproxy
- name: Enabled in default
......
---
# Installs the harprofiler
- name: create harprofiler user
user: >
name="{{ harprofiler_user }}"
createhome=no
home={{ harprofiler_dir }}
shell=/bin/bash
user:
name: "{{ harprofiler_user }}"
createhome: no
home: "{{ harprofiler_dir }}"
shell: /bin/bash
- name: create harprofiler repo
file: >
path={{ harprofiler_dir }} state=directory
owner="{{ harprofiler_user }}" group="{{ common_web_group }}"
mode=0755
file:
path: "{{ harprofiler_dir }}"
state: directory
owner: "{{ harprofiler_user }}"
group: "{{ common_web_group }}"
mode: 0755
- name: check out the harprofiler
git_2_0_1: >
dest={{ harprofiler_dir }}
repo={{ harprofiler_github_url }} version={{ harprofiler_version }}
accept_hostkey=yes
git_2_0_1:
dest: "{{ harprofiler_dir }}"
repo: "{{ harprofiler_github_url }}"
version: "{{ harprofiler_version }}"
accept_hostkey: yes
become_user: "{{ harprofiler_user }}"
- name: set bashrc for harprofiler user
template: >
src=bashrc.j2 dest="{{ harprofiler_dir }}/.bashrc" owner="{{ harprofiler_user }}"
mode=0755
template:
src: bashrc.j2
dest: "{{ harprofiler_dir }}/.bashrc"
owner: "{{ harprofiler_user }}"
mode: 0755
- name: install requirements
pip: >
requirements="{{ harprofiler_dir }}/requirements.txt" virtualenv="{{ harprofiler_venv_dir }}"
pip:
requirements: "{{ harprofiler_dir }}/requirements.txt"
virtualenv: "{{ harprofiler_venv_dir }}"
become_user: "{{ harprofiler_user }}"
- name: update config file
# harprofiler ships with a default config file. Doing a line-replace for the default
# configuration that does not match what this machine will have
lineinfile: >
dest={{ harprofiler_dir }}/config.yaml
regexp="browsermob_dir"
line="browsermob_dir: /usr/local"
state=present
lineinfile:
dest: "{{ harprofiler_dir }}/config.yaml"
regexp: "browsermob_dir"
line: "browsermob_dir: /usr/local"
state: present
- name: create validation shell script
template:
......@@ -47,8 +53,8 @@
mode: 0755
become_user: "{{ harprofiler_user }}"
- name: test install
shell: >
./{{ harprofiler_validation_script }} chdir={{ harprofiler_dir }}
shell: "./{{ harprofiler_validation_script }}"
args:
chdir: "{{ harprofiler_dir }}"
become_user: "{{ harprofiler_user }}"
......@@ -31,7 +31,7 @@
- install
- install:app-requirements
become_user: "{{ harstorage_user }}"
with_items: harstorage_python_pkgs
with_items: "{{ harstorage_python_pkgs }}"
- name: create directories
file:
......
......@@ -21,63 +21,71 @@
- name: check if downloaded and extracted
stat: path={{ HIVE_HOME }}
stat:
path: "{{ HIVE_HOME }}"
register: extracted_dir
- name: distribution downloaded
get_url: >
url={{ hive_dist.url }}
sha256sum={{ hive_dist.sha256sum }}
dest={{ hive_temporary_dir }}
get_url:
url: "{{ hive_dist.url }}"
sha256sum: "{{ hive_dist.sha256sum }}"
dest: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists
- name: distribution extracted
shell: >
chdir={{ HADOOP_COMMON_USER_HOME }}
tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hive-{{ HIVE_VERSION }}-bin
shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_dist.filename }} && chown -R {{ hadoop_common_user }}:{{ hadoop_common_group }} hive-{{ HIVE_VERSION }}-bin"
args:
chdir: "{{ HADOOP_COMMON_USER_HOME }}"
when: not extracted_dir.stat.exists
- name: versioned directory symlink created
file: >
src={{ HADOOP_COMMON_USER_HOME }}/hive-{{ HIVE_VERSION }}-bin
dest={{ HIVE_HOME }}
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=link
file:
src: "{{ HADOOP_COMMON_USER_HOME }}/hive-{{ HIVE_VERSION }}-bin"
dest: "{{ HIVE_HOME }}"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: link
- name: hive mysql connector distribution downloaded
get_url: >
url={{ hive_mysql_connector_dist.url }}
sha256sum={{ hive_mysql_connector_dist.sha256sum }}
dest={{ hive_temporary_dir }}
get_url:
url: "{{ hive_mysql_connector_dist.url }}"
sha256sum: "{{ hive_mysql_connector_dist.sha256sum }}"
dest: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists
- name: hive mysql connector distribution extracted
shell: >
chdir={{ hive_temporary_dir }}
tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }}
shell: "tar -xzf {{ hive_temporary_dir }}/{{ hive_mysql_connector_dist.filename }}"
args:
chdir: "{{ hive_temporary_dir }}"
when: not extracted_dir.stat.exists
- name: hive lib exists
file: >
path={{ HIVE_LIB }}
owner={{ hadoop_common_user }} group={{ hadoop_common_group }} state=directory
file:
path: "{{ HIVE_LIB }}"
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
state: directory
- name: hive mysql connector installed
shell: >
chdir=/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}
cp mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ HIVE_LIB }} &&
chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar
shell: "cp mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar {{ HIVE_LIB }} && chown {{ hadoop_common_user }}:{{ hadoop_common_group }} {{ HIVE_LIB }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}-bin.jar"
args:
chdir: "/{{ hive_temporary_dir }}/mysql-connector-java-{{ HIVE_MYSQL_CONNECTOR_VERSION }}"
when: not extracted_dir.stat.exists
- name: configuration installed
template: >
src={{ item }}.j2
dest={{ HIVE_CONF }}/{{ item }}
mode=0640 owner={{ hadoop_common_user }} group={{ hadoop_common_group }}
template:
src: "{{ item }}.j2"
dest: "{{ HIVE_CONF }}/{{ item }}"
mode: 0640
owner: "{{ hadoop_common_user }}"
group: "{{ hadoop_common_group }}"
with_items:
- hive-env.sh
- hive-site.xml
- name: env vars sourced in hadoop env
lineinfile: >
dest={{ hadoop_common_env }} state=present
regexp="^. {{ HIVE_CONF }}/hive-env.sh" line=". {{ HIVE_CONF }}/hive-env.sh"
lineinfile:
dest: "{{ hadoop_common_env }}"
state: present
regexp: "^. {{ HIVE_CONF }}/hive-env.sh"
line: ". {{ HIVE_CONF }}/hive-env.sh"
......@@ -10,43 +10,44 @@
#
#
# Tasks for role insights
#
#
# Overview:
#
#
#
# Dependencies:
#
#
#
# Example play:
#
#
- name: setup the insights env file
template: >
src="edx/app/insights/insights_env.j2"
dest="{{ insights_app_dir }}/insights_env"
owner={{ insights_user }}
group={{ insights_user }}
mode=0644
template:
src: "edx/app/insights/insights_env.j2"
dest: "{{ insights_app_dir }}/insights_env"
owner: "{{ insights_user }}"
group: "{{ insights_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: install application requirements
pip: >
requirements="{{ insights_requirements_base }}/{{ item }}"
virtualenv="{{ insights_venv_dir }}"
state=present extra_args="--exists-action w"
pip:
requirements: "{{ insights_requirements_base }}/{{ item }}"
virtualenv: "{{ insights_venv_dir }}"
state: present
extra_args: "--exists-action w"
become_user: "{{ insights_user }}"
with_items: insights_requirements
with_items: "{{ insights_requirements }}"
tags:
- install
- install:app-requirements
- name: create nodeenv
shell: >
creates={{ insights_nodeenv_dir }}
{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --prebuilt
shell: "{{ insights_venv_dir }}/bin/nodeenv {{ insights_nodeenv_dir }} --prebuilt"
args:
creates: "{{ insights_nodeenv_dir }}"
become_user: "{{ insights_user }}"
tags:
- install
......@@ -61,21 +62,19 @@
environment: "{{ insights_environment }}"
- name: install bower dependencies
shell: >
chdir={{ insights_code_dir }}
. {{ insights_venv_dir }}/bin/activate &&
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false
shell: ". {{ insights_venv_dir }}/bin/activate && . {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
tags:
- install
- install:app-requirements
- name: migrate
shell: >
chdir={{ insights_code_dir }}
DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}'
DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}'
{{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput
shell: "DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' DB_MIGRATION_PASS='{{ COMMON_MYSQL_MIGRATE_PASS }}' {{ insights_venv_dir }}/bin/python {{ insights_manage }} migrate --noinput"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
environment: "{{ insights_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
......@@ -84,18 +83,18 @@
- migrate:db
- name: run r.js optimizer
shell: >
chdir={{ insights_code_dir }}
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js
shell: ". {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
tags:
- assets
- assets:gather
- name: run collectstatic
shell: >
chdir={{ insights_code_dir }}
{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }}
shell: "{{ insights_venv_dir }}/bin/python {{ insights_manage }} {{ item }}"
args:
chdir: "{{ insights_code_dir }}"
become_user: "{{ insights_user }}"
environment: "{{ insights_environment }}"
with_items:
......@@ -106,38 +105,42 @@
- assets:gather
- name: compile translations
shell: >
chdir={{ insights_code_dir }}/analytics_dashboard
. {{ insights_venv_dir }}/bin/activate && i18n_tool generate -v
shell: ". {{ insights_venv_dir }}/bin/activate && i18n_tool generate -v"
args:
chdir: "{{ insights_code_dir }}/analytics_dashboard"
become_user: "{{ insights_user }}"
tags:
- assets
- assets:gather
- name: write out the supervisior wrapper
template: >
src=edx/app/insights/insights.sh.j2
dest={{ insights_app_dir }}/{{ insights_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
template:
src: "edx/app/insights/insights.sh.j2"
dest: "{{ insights_app_dir }}/{{ insights_service_name }}.sh"
mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags:
- install
- install:configuration
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/insights.conf.j2
dest="{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
template:
src: edx/app/supervisor/conf.d.available/insights.conf.j2
dest: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ insights_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf
state=link
force=yes
file:
src: "{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
dest: "{{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
......@@ -151,10 +154,10 @@
- manage:start
- name: create symlinks from the venv bin dir
file: >
src="{{ insights_venv_dir }}/bin/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_service_name }}"
state=link
file:
src: "{{ insights_venv_dir }}/bin/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_service_name }}"
state: link
with_items:
- python
- pip
......@@ -164,20 +167,20 @@
- install:base
- name: create manage.py symlink
file: >
src="{{ insights_manage }}"
dest="{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}"
state=link
file:
src: "{{ insights_manage }}"
dest: "{{ COMMON_BIN_DIR }}/manage.{{ insights_service_name }}"
state: link
tags:
- install
- install:base
- name: restart insights
supervisorctl: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name={{ insights_service_name }}
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
name: "{{ insights_service_name }}"
when: not disable_edx_services
become_user: "{{ supervisor_service_user }}"
tags:
......
......@@ -34,108 +34,125 @@
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
- name: add admin specific apt repositories
apt_repository: repo="{{ item }}" state=present update_cache=yes
with_items: jenkins_admin_debian_repos
apt_repository:
repo: "{{ item }}"
state: "present"
update_cache: "yes"
with_items: "{{ jenkins_admin_debian_repos }}"
- name: create the scripts directory
file: path={{ jenkins_admin_scripts_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=755
file:
path: "{{ jenkins_admin_scripts_dir }}"
state: "directory"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0755
- name: configure s3 plugin
template: >
src="./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest="{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml"
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0644
template:
src: "./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest: "{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0644
- name: configure the boto profiles for jenkins
template: >
src="./{{ jenkins_home }}/boto.j2"
dest="{{ jenkins_home }}/.boto"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
template:
src: "./{{ jenkins_home }}/boto.j2"
dest: "{{ jenkins_home }}/.boto"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0600
tags:
- aws-config
- name: create the .aws directory
file: path={{ jenkins_home }}/.aws state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
file:
path: "{{ jenkins_home }}/.aws"
state: "directory"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0700
tags:
- aws-config
- name: configure the awscli profiles for jenkins
template: >
src="./{{ jenkins_home }}/aws_config.j2"
dest="{{ jenkins_home }}/.aws/config"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
template:
src: "./{{ jenkins_home }}/aws_config.j2"
dest: "{{ jenkins_home }}/.aws/config"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0600
tags:
- aws-config
- name: create the ssh directory
file: >
path={{ jenkins_home }}/.ssh
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0700
state=directory
file:
path: "{{ jenkins_home }}/.ssh"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0700
state: directory
# Need to add Github to known_hosts to avoid
# being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts"
- name: create job directory
file: >
path="{{ jenkins_home }}/jobs"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode=0755
state=directory
file:
path: "{{ jenkins_home }}/jobs"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0755
state: directory
- name: create admin job directories
file: >
path="{{ jenkins_home }}/jobs/{{ item }}"
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0755
state=directory
file:
path: "{{ jenkins_home }}/jobs/{{ item }}"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0755
state: directory
with_items: jenkins_admin_jobs
- name: create admin job config files
template: >
src="./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2"
dest="{{ jenkins_home }}/jobs/{{ item }}/config.xml"
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0644
template:
src: "./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2"
dest: "{{ jenkins_home }}/jobs/{{ item }}/config.xml"
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0644
with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
apt_repository:
repo: "{{ jenkins_chrislea_ppa }}"
- name: install system packages for edxapp virtualenvs
apt: pkg={{','.join(jenkins_admin_debian_pkgs)}} state=present update_cache=yes
apt:
pkg: "{{ ','.join(jenkins_admin_debian_pkgs) }}"
state: "present"
update_cache: yes
# This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins
- name: grant sudo access to the jenkins user
copy: >
content="{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL"
dest=/etc/sudoers.d/99-jenkins owner=root group=root
mode=0440 validate='visudo -cf %s'
copy:
content: "{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL"
dest: "/etc/sudoers.d/99-jenkins"
owner: "root"
group: "root"
mode: 0440
validate: "visudo -cf %s"
- name: install global gem dependencies
gem: >
name={{ item.name }}
state=present
version={{ item.version }}
user_install=no
gem:
name: "{{ item.name }}"
state: present
version: "{{ item.version }}"
user_install: no
with_items: jenkins_admin_gem_pkgs
- name: get s3 one time url
......@@ -152,7 +169,7 @@
get_url:
url: "{{ s3_one_time_url.url }}"
dest: "/tmp/{{ JENKINS_ADMIN_BACKUP_S3_KEY | basename }}"
mode: "0644"
mode: 0644
owner: "{{ jenkins_user }}"
when: JENKINS_ADMIN_BACKUP_BUCKET is defined and JENKINS_ADMIN_BACKUP_S3_KEY is defined
......
......@@ -26,15 +26,14 @@
dest: "{{ jenkins_cli_jar }}"
- name: execute command
shell: >
{{ jenkins_command_prefix|default('') }} java -jar {{ jenkins_cli_jar }} -s http://localhost:{{ jenkins_port }}
{{ jenkins_auth_realm.cli_auth }}
{{ jenkins_command_string }}
shell: "{{ jenkins_command_prefix|default('') }} java -jar {{ jenkins_cli_jar }} -s http://localhost:{{ jenkins_port }} {{ jenkins_auth_realm.cli_auth }} {{ jenkins_command_string }}"
register: jenkins_command_output
ignore_errors: "{{ jenkins_ignore_cli_errors|default (False) }}"
- name: "clean up --- remove the credentials dir"
file: name=jenkins_cli_root state=absent
file:
name: jenkins_cli_root
state: absent
- name: "clean up --- remove cached Jenkins credentials"
command: rm -rf $HOME/.jenkins
......@@ -3,7 +3,7 @@
- name: install jenkins analytics extra system packages
apt:
pkg={{ item }} state=present update_cache=yes
with_items: JENKINS_ANALYTICS_EXTRA_PKGS
with_items: "{{ JENKINS_ANALYTICS_EXTRA_PKGS }}"
tags:
- jenkins
......@@ -170,9 +170,9 @@
- jenkins-seed-job
- name: generate seed job xml
shell: >
cd {{ jenkins_seed_job_root }} &&
GRADLE_OPTS="-Dorg.gradle.daemon=true" ./gradlew run -Pargs={{ jenkins_seed_job_script }}
shell: "GRADLE_OPTS=\"-Dorg.gradle.daemon=true\" ./gradlew run -Pargs={{ jenkins_seed_job_script }}"
args:
chdir: "{{ jenkins_seed_job_root }}"
become: yes
become_user: "{{ jenkins_user }}"
tags:
......
......@@ -6,22 +6,20 @@
# refers to the --depth-setting of git clone. A value of 1
# will truncate all history prior to the last revision.
- name: Create shallow clone of edx-platform
git_2_0_1: >
repo=https://github.com/edx/edx-platform.git
dest={{ jenkins_home }}/shallow-clone
version={{ jenkins_edx_platform_version }}
depth=1
git_2_0_1:
repo: https://github.com/edx/edx-platform.git
dest: "{{ jenkins_home }}/shallow-clone"
version: "{{ jenkins_edx_platform_version }}"
depth: 1
become_user: "{{ jenkins_user }}"
# Install the platform requirements using pip.
- name: Install edx-platform requirements using pip
pip: >
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}
extra_args="--exists-action=w"
virtualenv={{ jenkins_home }}/edx-venv
virtualenv_command=virtualenv
executable=pip
pip:
requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}"
extra_args: "--exists-action=w"
virtualenv: "{{ jenkins_home }}/edx-venv"
virtualenv_command: virtualenv
with_items:
- pre.txt
- github.txt
......@@ -39,12 +37,11 @@
become_user: "{{ jenkins_user }}"
- name: Install edx-platform post requirements using pip
pip: >
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}
extra_args="--exists-action=w"
virtualenv={{ jenkins_home }}/edx-venv
virtualenv_command=virtualenv
executable=pip
pip:
requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}"
extra_args: "--exists-action=w"
virtualenv: "{{ jenkins_home }}/edx-venv"
virtualenv_command: virtualenv
with_items:
- post.txt
become_user: "{{ jenkins_user }}"
......@@ -55,9 +52,9 @@
# The edx-venv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean virtualenv archive
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
command: "tar -cpzf edx-venv_clean.tar.gz edx-venv"
args:
chdir: "{{ jenkins_home }}"
become_user: "{{ jenkins_user }}"
# Remove the shallow-clone directory now that we are
......
......@@ -39,8 +39,7 @@
# Need to add Github to known_hosts to avoid
# being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
shell: "ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts"
# Edit the /etc/hosts file so that the Preview button will work in Studio
- name: add preview.localhost to /etc/hosts
......
......@@ -12,28 +12,42 @@
- nginx
- name: Ensure {{ kibana_app_dir }} exists
file: path={{ kibana_app_dir }} state=directory owner=root group=root mode=0755
file:
path: "{{ kibana_app_dir }}"
state: directory
owner: root
group: root
mode: 0755
- name: Ensure subdirectories exist
file: path={{ kibana_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
file:
path: "{{ kibana_app_dir }}/{{ item }}"
owner: root
group: root
mode: 0755
state: directory
with_items:
- htdocs
- share
- name: ensure we have the specified kibana release
get_url: url={{ kibana_url }} dest={{ kibana_app_dir }}/share/{{ kibana_file }}
get_url:
url: "{{ kibana_url }}"
dest: "{{ kibana_app_dir }}/share/{{ kibana_file }}"
- name: extract
shell: >
chdir={{ kibana_app_dir }}/share
tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}
creates={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
shell: "tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}"
args:
chdir: "{{ kibana_app_dir }}/share"
creates: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}"
- name: install
shell: >
chdir={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
cp -R * {{ kibana_app_dir }}/htdocs/
shell: "cp -R * {{ kibana_app_dir }}/htdocs/"
args:
chdir: "{{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}"
- name: copy config
template: src=config.js.j2 dest={{ kibana_app_dir }}/htdocs/config.js
template:
src: config.js.j2
dest: "{{ kibana_app_dir }}/htdocs/config.js"
......@@ -8,7 +8,7 @@
module: ec2_lookup
region: "{{ region }}"
tags:
- Name: "{{ name_tag }}"
Name: "{{ name_tag }}"
register: tag_lookup
when: terminate_instance == true
......@@ -64,7 +64,7 @@
ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
- name: Add DNS names for services
local_action:
......@@ -77,7 +77,7 @@
record: "{{ item[1] }}-{{ dns_name }}.{{ dns_zone }}"
value: "{{ item[0].public_dns_name }}"
with_nested:
- ec2.instances
- "{{ ec2.instances }}"
- ['studio', 'ecommerce', 'preview', 'programs', 'discovery', 'credentials']
- name: Add new instance to host group
......@@ -85,7 +85,7 @@
module: add_host
hostname: "{{ item.public_ip }}"
groups: launched
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
- name: Wait for SSH to come up
local_action:
......@@ -94,4 +94,4 @@
search_regex: OpenSSH
port: 22
delay: 10
with_items: ec2.instances
with_items: "{{ ec2.instances }}"
......@@ -8,63 +8,63 @@ localdev_xvfb_display: ":1"
localdev_accounts:
- {
user: "{{ edxapp_user|default('None') }}",
home: "{{ edxapp_app_dir }}",
home: "{{ edxapp_app_dir|default('None') }}",
env: "edxapp_env",
repo: "edx-platform"
}
- {
user: "{{ forum_user|default('None') }}",
home: "{{ forum_app_dir }}",
home: "{{ forum_app_dir|default('None') }}",
env: "forum_env",
repo: "cs_comments_service"
}
- {
user: "{{ notifier_user|default('None') }}",
home: "{{ notifier_app_dir }}",
home: "{{ notifier_app_dir|default('None') }}",
env: "notifier_env",
repo: ""
}
- {
user: "{{ ecommerce_user|default('None') }}",
home: "{{ ecommerce_home }}",
home: "{{ ecommerce_home|default('None') }}",
env: "ecommerce_env",
repo: "ecommerce"
}
- {
user: "{{ ecommerce_worker_user|default('None') }}",
home: "{{ ecommerce_worker_home }}",
home: "{{ ecommerce_worker_home|default('None') }}",
env: "ecommerce_worker_env",
repo: "ecommerce_worker"
}
- {
user: "{{ analytics_api_user|default('None') }}",
home: "{{ analytics_api_home }}",
home: "{{ analytics_api_home|default('None') }}",
env: "analytics_api_env",
repo: "analytics_api"
}
- {
user: "{{ insights_user|default('None') }}",
home: "{{ insights_home }}",
home: "{{ insights_home|default('None') }}",
env: "insights_env",
repo: "edx_analytics_dashboard"
}
- {
user: "{{ programs_user|default('None') }}",
home: "{{ programs_home }}",
home: "{{ programs_home|default('None') }}",
env: "programs_env",
repo: "programs"
}
- {
user: "{{ credentials_user|default('None') }}",
home: "{{ credentials_home }}",
home: "{{ credentials_home|default('None') }}",
env: "credentials_env",
repo: "credentials"
}
......
......@@ -36,7 +36,7 @@
state: "present"
update_cache: true
cache_valid_time: 3600
with_items: locust_debian_pkgs
with_items: "{{ locust_debian_pkgs }}"
- name: Install application requirements
pip:
......
......@@ -49,26 +49,26 @@
- name: Install python requirements
pip: name={{ item }} state=present
with_items: logstash_python_requirements
with_items: "{{ logstash_python_requirements }}"
- name: Checkout logstash rotation scripts
git: repo={{ logstash_scripts_repo }} dest={{ logstash_app_dir }}/share/logstash-elasticsearch-scripts
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index rotation"
hour={{ logstash_rotate_cron.hour }}
minute={{ logstash_rotate_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
cron:
user: root
name: "Elasticsearch logstash index rotation"
hour: "{{ logstash_rotate_cron.hour }}"
minute: "{{ logstash_rotate_cron.minute }}"
job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index optimization"
hour={{ logstash_optimize_cron.hour }}
minute={{ logstash_optimize_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
cron:
user: root
name: "Elasticsearch logstash index optimization"
hour: "{{ logstash_optimize_cron.hour }}"
minute: "{{ logstash_optimize_cron.minute }}"
job: "/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
when: LOGSTASH_ROTATE|bool
- name: copy galera cluster config
template: >
src="etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf"
owner="root"
group="root"
mode=0600
template:
src: "etc/mysql/conf.d/galera.cnf.j2"
dest: "/etc/mysql/conf.d/galera.cnf"
owner: "root"
group: "root"
mode: 0600
- name: check if we have already bootstrapped the cluster
stat: path=/etc/mysql/ansible_cluster_started
......@@ -15,18 +15,18 @@
when: not mariadb_bootstrap.stat.exists
- name: setup bootstrap on primary
lineinfile: >
dest="/etc/mysql/conf.d/galera.cnf"
regexp="^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$"
line="wsrep_cluster_address=gcomm://"
lineinfile:
dest: "/etc/mysql/conf.d/galera.cnf"
regexp: "^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$"
line: "wsrep_cluster_address=gcomm://"
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
- name: fetch debian.cnf file so start-stop will work properly
fetch: >
src=/etc/mysql/debian.cnf
dest=/tmp/debian.cnf
fail_on_missing=yes
flat=yes
fetch:
src: /etc/mysql/debian.cnf
dest: /tmp/debian.cnf
fail_on_missing: yes
flat: yes
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
register: mariadb_new_debian_cnf
......@@ -39,12 +39,12 @@
when: not mariadb_bootstrap.stat.exists
- name: reset galera cluster config since we are bootstrapped
template: >
src="etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf"
owner="root"
group="root"
mode=0600
template:
src: "etc/mysql/conf.d/galera.cnf.j2"
dest: "/etc/mysql/conf.d/galera.cnf"
owner: "root"
group: "root"
mode: 0600
when: not mariadb_bootstrap.stat.exists
- name: touch bootstrap file to confirm we are fully up
......@@ -53,6 +53,5 @@
# This is needed for mysql-check in haproxy or other mysql monitor
# scripts to prevent haproxy checks exceeding `max_connect_errors`.
- name: create haproxy monitor user
command: >
mysql -e "INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;"
with_items: MARIADB_HAPROXY_HOSTS
command: "mysql -e \"INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;\""
with_items: "{{ MARIADB_HAPROXY_HOSTS }}"
......@@ -23,31 +23,32 @@
- name: Install pre-req debian packages
apt: name={{ item }} state=present
with_items: mariadb_debian_pkgs
with_items: "{{ mariadb_debian_pkgs }}"
- name: Add mariadb apt key
apt_key: url="{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}"
apt_key:
url: "{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}"
- name: add the mariadb repo to the sources list
apt_repository: >
repo='{{ MARIADB_REPO }}'
state=present
apt_repository:
repo: "{{ MARIADB_REPO }}"
state: present
- name: install mariadb solo packages
apt: name={{ item }} update_cache=yes
with_items: mariadb_solo_packages
with_items: "{{ mariadb_solo_packages }}"
when: not MARIADB_CLUSTERED|bool
- name: install mariadb cluster packages
apt: name={{ item }} update_cache=yes
with_items: mariadb_cluster_packages
with_items: "{{ mariadb_cluster_packages }}"
when: MARIADB_CLUSTERED|bool
- name: remove bind-address
lineinfile: >
dest=/etc/mysql/my.cnf
regexp="^bind-address\s+=\s+127\.0\.0\.1$"
state=absent
lineinfile:
dest: /etc/mysql/my.cnf
regexp: '^bind-address\s+=\s+127\.0\.0\.1$'
state: absent
when: MARIADB_LISTEN_ALL|bool or MARIADB_CLUSTERED|bool
- include: cluster.yml
......@@ -57,37 +58,37 @@
service: name=mysql state=started
- name: create all databases
mysql_db: >
db={{ item }}
state=present
encoding=utf8
with_items: MARIADB_DATABASES
mysql_db:
db: "{{ item }}"
state: present
encoding: utf8
with_items: "{{ MARIADB_DATABASES }}"
when: MARIADB_CREATE_DBS|bool
- name: create all analytics dbs
mysql_db: >
db={{ item }}
state=present
encoding=utf8
with_items: MARIADB_ANALYTICS_DATABASES
mysql_db:
db: "{{ item }}"
state: present
encoding: utf8
with_items: "{{ MARIADB_ANALYTICS_DATABASES }}"
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
- name: create all users/privs
mysql_user: >
name="{{ item.name }}"
password="{{ item.pass }}"
priv="{{ item.priv }}"
host="{{ item.host }}"
append_privs=yes
with_items: MARIADB_USERS
mysql_user:
name: "{{ item.name }}"
password: "{{ item.pass }}"
priv: "{{ item.priv }}"
host: "{{ item.host }}"
append_privs: yes
with_items: "{{ MARIADB_USERS }}"
when: MARIADB_CREATE_DBS|bool
- name: create all analytics users/privs
mysql_user: >
name="{{ item.name }}"
password="{{ item.pass }}"
priv="{{ item.priv }}"
host="{{ item.host }}"
append_privs=yes
with_items: MARIADB_ANALYTICS_USERS
mysql_user:
name: "{{ item.name }}"
password: "{{ item.pass }}"
priv: "{{ item.priv }}"
host: "{{ item.host }}"
append_privs: yes
with_items: "{{ MARIADB_ANALYTICS_USERS }}"
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
......@@ -55,7 +55,7 @@
install_recommends: yes
force: yes
update_cache: yes
with_items: mongodb_debian_pkgs
with_items: "{{ mongodb_debian_pkgs }}"
tags:
- install
- install:app-requirements
......@@ -292,8 +292,6 @@
register: replset_status
when: MONGO_CLUSTERED
tags:
- configure_replica_set
tags:
- "manage"
- "manage:db"
- "configure_replica_set"
......@@ -314,8 +312,6 @@
run_once: true
when: MONGO_CLUSTERED
tags:
- configure_replica_set
tags:
- "manage"
- "manage:db"
......@@ -330,7 +326,7 @@
roles: "{{ item.roles }}"
state: present
replica_set: "{{ MONGO_REPL_SET }}"
with_items: MONGO_USERS
with_items: "{{ MONGO_USERS }}"
run_once: true
when: MONGO_CLUSTERED
tags:
......@@ -346,7 +342,7 @@
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
with_items: MONGO_USERS
with_items: "{{ MONGO_USERS }}"
when: not MONGO_CLUSTERED
tags:
- "manage"
......
......@@ -11,31 +11,30 @@
when: MMSAPIKEY is not defined
- name: download mongo mms agent
get_url: >
url="{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ pkg_format }}"
dest="/tmp/{{ item.agent }}-{{ item.version }}.{{ pkg_format }}"
get_url:
url: "{{ base_url }}/{{ item.dir }}/{{ item.agent }}_{{ item.version }}_{{ pkg_arch }}.{{ pkg_format }}"
dest: "/tmp/{{ item.agent }}-{{ item.version }}.{{ pkg_format }}"
register: download_mms_deb
with_items:
agents
with_items: "{{ agents }}"
- name: install mongo mms agent
apt: >
deb="/tmp/{{ item.agent }}-{{ item.version }}.deb"
apt:
deb: "/tmp/{{ item.agent }}-{{ item.version }}.deb"
when: download_mms_deb.changed
notify: restart mms
with_items:
agents
- name: add key to monitoring-agent.config
lineinfile: >
dest="{{ item.config }}"
regexp="^mmsApiKey="
line="mmsApiKey={{ MMSAPIKEY }}"
lineinfile:
dest: "{{ item.config }}"
regexp: "^mmsApiKey="
line: "mmsApiKey={{ MMSAPIKEY }}"
notify: restart mms
with_items:
agents
with_items: "{{ agents }}"
- name: start mms service
service: name="{{ item.agent }}" state=started
with_items:
agents
service:
name: "{{ item.agent }}"
state: started
with_items: "{{ agents }}"
......@@ -24,7 +24,7 @@
fstype: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype }}"
state: unmounted
when: "{{ UNMOUNT_DISKS and (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype != item.fstype }}"
with_items: volumes
with_items: "{{ volumes }}"
# Noop & reports "ok" if fstype is correct
# Errors if fstype is wrong and disk is mounted (hence above task)
......@@ -34,7 +34,7 @@
fstype: "{{ item.fstype }}"
# Necessary because AWS gives some ephemeral disks the wrong fstype by default
force: true
with_items: volumes
with_items: "{{ volumes }}"
# This can fail if one volume is mounted on a child directory as another volume
# and it attempts to unmount the parent first. This is generally fixable by rerunning.
......@@ -49,21 +49,21 @@
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: unmounted
when: >
when:
UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount
with_items: ansible_mounts
with_items: "{{ ansible_mounts }}"
# If there are disks we want to be unmounting, but we can't because UNMOUNT_DISKS is false
# that is an errorable condition, since it can easily allow us to double mount a disk.
- name: Check that we don't want to unmount disks when UNMOUNT_DISKS is false
fail: msg="Found disks mounted in the wrong place, but can't unmount them. This role will need to be re-run with -e 'UNMOUNT_DISKS=True' if you believe that is safe."
when: >
when:
not UNMOUNT_DISKS and
volumes | selectattr('device', 'equalto', item.device) | list | length != 0 and
(volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount
with_items: ansible_mounts
with_items: "{{ ansible_mounts }}"
- name: Mount disks
mount:
......@@ -72,4 +72,4 @@
state: mounted
fstype: "{{ item.fstype }}"
opts: "{{ item.options }}"
with_items: volumes
with_items: "{{ volumes }}"
......@@ -61,7 +61,7 @@
name: "{{ item }}"
install_recommends: yes
state: present
with_items: mysql_debian_pkgs
with_items: "{{ mysql_debian_pkgs }}"
- name: Start mysql
service:
......
......@@ -22,41 +22,37 @@
#
- name: Download newrelic NPI
get_url: >
dest="/tmp/{{ newrelic_npi_installer }}"
url="{{ NEWRELIC_NPI_URL }}"
get_url:
dest: "/tmp/{{ newrelic_npi_installer }}"
url: "{{ NEWRELIC_NPI_URL }}"
register: download_npi_installer
- name: create npi install directory {{ NEWRELIC_NPI_PREFIX }}
file: >
path="{{ NEWRELIC_NPI_PREFIX }}"
state=directory
mode=0755
owner="{{ NEWRELIC_USER }}"
file:
path: "{{ NEWRELIC_NPI_PREFIX }}"
state: directory
mode: 0755
owner: "{{ NEWRELIC_USER }}"
- name: install newrelic npi
shell: >
tar -xzf /tmp/{{ newrelic_npi_installer }} --strip-components=1 -C "{{NEWRELIC_NPI_PREFIX}}"
shell: "tar -xzf /tmp/{{ newrelic_npi_installer }} --strip-components=1 -C \"{{NEWRELIC_NPI_PREFIX}}\""
when: download_npi_installer.changed
become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the default user
shell: >
{{ NEWRELIC_NPI_PREFIX }}/bin/node {{ NEWRELIC_NPI_PREFIX }}/npi.js "set user {{ NEWRELIC_USER }}"
shell: "{{ NEWRELIC_NPI_PREFIX }}/bin/node {{ NEWRELIC_NPI_PREFIX }}/npi.js \"set user {{ NEWRELIC_USER }}\""
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the license key
shell: >
./npi set license_key {{ NEWRELIC_LICENSE_KEY }}
shell: "./npi set license_key {{ NEWRELIC_LICENSE_KEY }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
- name: configure npi with the distro
shell: >
./npi set distro {{ NEWRELIC_NPI_DISTRO }}
shell: "./npi set distro {{ NEWRELIC_NPI_DISTRO }}"
args:
chdir: "{{ NEWRELIC_NPI_PREFIX }}"
become_user: "{{ NEWRELIC_USER }}"
......
......@@ -5,23 +5,24 @@
- name: create the nltk data directory and subdirectories
file: path={{ NLTK_DATA_DIR }}/{{ item.path|dirname }} state=directory
with_items: NLTK_DATA
with_items: "{{ NLTK_DATA }}"
tags:
- deploy
- name: download nltk data
get_url: >
dest={{ NLTK_DATA_DIR }}/{{ item.url|basename }}
url={{ item.url }}
with_items: NLTK_DATA
get_url:
dest: "{{ NLTK_DATA_DIR }}/{{ item.url|basename }}"
url: "{{ item.url }}"
with_items: "{{ NLTK_DATA }}"
register: nltk_download
tags:
- deploy
- name: unarchive nltk data
shell: >
unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: NLTK_DATA
shell: "unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }}"
args:
chdir: "{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: "{{ NLTK_DATA }}"
when: nltk_download|changed
tags:
- deploy
......@@ -19,38 +19,38 @@ oauth_client_setup_role_name: oauth_client_setup
oauth_client_setup_oauth2_clients:
- {
name: "{{ ecommerce_service_name | default('None') }}",
url_root: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}",
id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY }}",
secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET }}",
logout_uri: "{{ ECOMMERCE_LOGOUT_URL }}"
url_root: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT | default('None') }}",
id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ ECOMMERCE_LOGOUT_URL | default('None') }}"
}
- {
name: "{{ INSIGHTS_OAUTH2_APP_CLIENT_NAME | default('None') }}",
url_root: "{{ INSIGHTS_BASE_URL }}",
id: "{{ INSIGHTS_OAUTH2_KEY }}",
secret: "{{ INSIGHTS_OAUTH2_SECRET }}",
logout_uri: "{{ INSIGHTS_LOGOUT_URL }}"
url_root: "{{ INSIGHTS_BASE_URL | default('None') }}",
id: "{{ INSIGHTS_OAUTH2_KEY | default('None') }}",
secret: "{{ INSIGHTS_OAUTH2_SECRET | default('None') }}",
logout_uri: "{{ INSIGHTS_LOGOUT_URL | default('None') }}"
}
- {
name: "{{ programs_service_name | default('None') }}",
url_root: "{{ PROGRAMS_URL_ROOT }}",
id: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_KEY }}",
secret: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_SECRET }}",
logout_uri: "{{ PROGRAMS_LOGOUT_URL }}"
url_root: "{{ PROGRAMS_URL_ROOT | default('None') }}",
id: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ PROGRAMS_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ PROGRAMS_LOGOUT_URL | default('None') }}"
}
- {
name: "{{ credentials_service_name | default('None') }}",
url_root: "{{ CREDENTIALS_URL_ROOT }}",
id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY }}",
secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET }}",
logout_uri: "{{ CREDENTIALS_LOGOUT_URL }}"
url_root: "{{ CREDENTIALS_URL_ROOT | default('None') }}",
id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ CREDENTIALS_LOGOUT_URL | default('None') }}"
}
- {
name: "{{ discovery_service_name | default('None') }}",
url_root: "{{ DISCOVERY_URL_ROOT }}",
id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY }}",
secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET }}",
logout_uri: "{{ DISCOVERY_LOGOUT_URL }}"
url_root: "{{ DISCOVERY_URL_ROOT | default('None') }}",
id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}",
secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}",
logout_uri: "{{ DISCOVERY_LOGOUT_URL | default('None') }}"
}
#
......
......@@ -35,5 +35,5 @@
--logout_uri {{ item.logout_uri | default("") }}
become_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
with_items: oauth_client_setup_oauth2_clients
with_items: "{{ oauth_client_setup_oauth2_clients }}"
when: item.name != 'None'
......@@ -55,9 +55,9 @@
# Need to use command rather than pip so that we can maintain the context of our current working directory;
# some requirements are pathed relative to the edx-platform repo.
# Using the pip from inside the virtual environment implicitly installs everything into that virtual environment.
command: >
{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }}
chdir={{ edxapp_code_dir }}
command: "{{ edxapp_venv_dir }}/bin/pip install {{ COMMON_PIP_VERBOSITY }} -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w -r {{ openstack_requirements_file }}"
args:
chdir: "{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
when: edxapp_code_dir is defined
......
......@@ -21,15 +21,20 @@ PROGRAMS_SSL_NGINX_PORT: 48140
PROGRAMS_DEFAULT_DB_NAME: 'programs'
PROGRAMS_DATABASE_USER: 'programs001'
PROGRAMS_DATABASE_PASSWORD: 'password'
PROGRAMS_DATABASE_HOST: 'localhost'
PROGRAMS_DATABASE_PORT: 3306
PROGRAMS_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ PROGRAMS_DEFAULT_DB_NAME }}'
USER: 'programs001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
USER: '{{ PROGRAMS_DATABASE_USER }}'
PASSWORD: '{{ PROGRAMS_DATABASE_PASSWORD }}'
HOST: '{{ PROGRAMS_DATABASE_HOST }}'
PORT: '{{ PROGRAMS_DATABASE_PORT }}'
ATOMIC_REQUESTS: true
CONN_MAX_AGE: 60
......
......@@ -56,9 +56,9 @@
- migrate:db
- name: run collectstatic
shell: >
chdir={{ programs_code_dir }}
{{ programs_venv_dir }}/bin/python manage.py collectstatic --noinput
shell: "{{ programs_venv_dir }}/bin/python manage.py collectstatic --noinput"
args:
chdir: "{{ programs_code_dir }}"
become_user: "{{ programs_user }}"
environment: "{{ programs_environment }}"
when: not devstack
......@@ -68,9 +68,12 @@
# NOTE this isn't used or needed when s3 is used for PROGRAMS_MEDIA_STORAGE_BACKEND
- name: create programs media dir
file: >
path="{{ item }}" state=directory mode=0775
owner="{{ programs_user }}" group="{{ common_web_group }}"
file:
path: "{{ item }}"
state: directory
mode: 0775
owner: "{{ programs_user }}"
group: "{{ common_web_group }}"
with_items:
- "{{ PROGRAMS_MEDIA_ROOT }}"
tags:
......
......@@ -171,8 +171,7 @@
- maintenance
- name: Make queues mirrored
shell: >
/usr/sbin/rabbitmqctl -p {{ item }} set_policy HA "" '{"ha-mode":"all","ha-sync-mode":"automatic"}'
shell: "/usr/sbin/rabbitmqctl -p {{ item }} set_policy HA \"\" '{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}'"
when: RABBITMQ_CLUSTERED_HOSTS|length > 1
with_items: "{{ RABBITMQ_VHOSTS }}"
tags:
......
......@@ -38,42 +38,47 @@
when: rbenv_ruby_version is not defined
- name: create rbenv user {{ rbenv_user }}
user: >
name={{ rbenv_user }} home={{ rbenv_dir }}
shell=/bin/false createhome=no
user:
name: "{{ rbenv_user }}"
home: "{{ rbenv_dir }}"
shell: /bin/false
createhome: no
when: rbenv_user != common_web_user
tags:
- install
- install:base
- name: create rbenv dir if it does not exist
file: >
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}"
state=directory
file:
path: "{{ rbenv_dir }}"
owner: "{{ rbenv_user }}"
state: directory
tags:
- install
- install:base
- name: install build depends
apt: pkg={{ ",".join(rbenv_debian_pkgs) }} update_cache=yes state=present install_recommends=no
with_items: rbenv_debian_pkgs
with_items: "{{ rbenv_debian_pkgs }}"
tags:
- install
- install:base
- name: update rbenv repo
git_2_0_1: >
repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
accept_hostkey=yes
git_2_0_1:
repo: https://github.com/sstephenson/rbenv.git
dest: "{{ rbenv_dir }}/.rbenv"
version: "{{ rbenv_version }}"
accept_hostkey: yes
become_user: "{{ rbenv_user }}"
tags:
- install
- install:base
- name: ensure ruby_env exists
template: >
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env
template:
src: ruby_env.j2
dest: "{{ rbenv_dir }}/ruby_env"
become_user: "{{ rbenv_user }}"
tags:
- install
......@@ -107,9 +112,10 @@
- install:base
- name: clone ruby-build repo
git: >
repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
accept_hostkey=yes
git:
repo: https://github.com/sstephenson/ruby-build.git
dest: "{{ tempdir.stdout }}/ruby-build"
accept_hostkey: yes
when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers))
become_user: "{{ rbenv_user }}"
tags:
......
......@@ -29,13 +29,13 @@
# file:
# path={{ item.mount_point }} owner={{ item.owner }}
# group={{ item.group }} mode={{ item.mode }} state="directory"
# with_items: my_role_s3fs_mounts
# with_items: "{{ my_role_s3fs_mounts }}"
#
# - name: mount s3 buckets
# mount:
# name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs
# opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted
# with_items: myrole_s3fs_mounts
# with_items: "{{ myrole_s3fs_mounts }}"
#
# Example play:
#
......
......@@ -15,12 +15,12 @@
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
- name: Downloads metadata into metadata directory as backup
get_url: >
url={{ shib_metadata_backup_url }}
dest=/etc/shibboleth/metadata/idp-metadata.xml
mode=0640
group=_shibd
owner=_shibd
get_url:
url: "{{ shib_metadata_backup_url }}"
dest: "/etc/shibboleth/metadata/idp-metadata.xml"
mode: 0640
group: _shibd
owner: _shibd
when: shib_download_metadata
- name: writes out key and pem file
......
......@@ -9,39 +9,51 @@
- oinkmaster
- name: configure snort
template: >
src=etc/snort/snort.conf.j2 dest=/etc/snort/snort.conf
owner=root group=root mode=0644
template:
src: etc/snort/snort.conf.j2
dest: /etc/snort/snort.conf
owner: root
group: root
mode: 0644
- name: configure snort (debian)
template: >
src=etc/snort/snort.debian.conf.j2 dest=/etc/snort/snort.debian.conf
owner=root group=root mode=0644
template:
src: etc/snort/snort.debian.conf.j2
dest: /etc/snort/snort.debian.conf
owner: root
group: root
mode: 0644
- name: configure oinkmaster
template: >
src=etc/oinkmaster.conf.j2 dest=/etc/oinkmaster.conf
owner=root group=root mode=0644
template:
src: etc/oinkmaster.conf.j2
dest: /etc/oinkmaster.conf
owner: root
group: root
mode: 0644
- name: update snort
shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/
become: yes
- name: snort service
service: >
name="snort"
state="started"
service:
name: "snort"
state: "started"
- name: open read permissions on snort logs
file: >
name="/var/log/snort"
state="directory"
mode="755"
file:
name: "/var/log/snort"
state: "directory"
mode: "755"
- name: install oinkmaster cronjob
template: >
src=etc/cron.daily/oinkmaster.j2 dest=/etc/cron.daily/oinkmaster
owner=root group=root mode=0755
template:
src: etc/cron.daily/oinkmaster.j2
dest: /etc/cron.daily/oinkmaster
owner: root
group: root
mode: 0755
......@@ -25,7 +25,7 @@
fail:
msg: Please define either "source" or "sourcetype", not both or neither
when: ('source' in item and 'sourcetype' in item) or ('source' not in item and 'sourcetype' not in item)
with_items: SPLUNK_FIELD_EXTRACTIONS
with_items: "{{ SPLUNK_FIELD_EXTRACTIONS }}"
- name: Make sure necessary dirs exist
file:
......@@ -144,7 +144,7 @@
owner: "{{ splunk_user }}"
group: "{{ splunk_user }}"
mode: 0700
with_items: SPLUNK_DASHBOARDS
with_items: "{{ SPLUNK_DASHBOARDS }}"
tags:
- install
- install:configuration
......
......@@ -116,7 +116,7 @@
group: splunk
mode: "0400"
when: "{{ item.ssl_cert is defined }}"
with_items: SPLUNKFORWARDER_SERVERS
with_items: "{{ SPLUNKFORWARDER_SERVERS }}"
- name: Write root CA to disk
copy:
......@@ -126,7 +126,7 @@
group: splunk
mode: "0400"
when: "{{ item.ssl_cert is defined }}"
with_items: SPLUNKFORWARDER_SERVERS
with_items: "{{ SPLUNKFORWARDER_SERVERS }}"
- name: Create inputs and outputs configuration
template:
......
......@@ -3,7 +3,7 @@ description "Tasks before supervisord"
start on runlevel [2345]
task
setuid {{ supervisor_user }}
setuid {{ common_web_user }}
{% if programs_code_dir is defined %}
{% set programs_command = "--programs-env " + programs_home + "/programs_env --programs-code-dir " + programs_code_dir + " --programs-python " + COMMON_BIN_DIR + "/python.programs" %}
......
---
tanguru_debian_pkgs:
- openjdk-7-jre
- unzip
- libmysql-java
- python-mysqldb
- tomcat7
- libspring-instrument-java
- xvfb
- mailutils
- postfix
tanaguru_download_link: "http://download.tanaguru.org/Tanaguru/tanaguru-3.1.0.i386.tar.gz"
# Go this link to find your desired ESR Firefox
# http://download-origin.cdn.mozilla.net/pub/firefox/releases/24.0esr/linux-x86_64/
# Default is en-US in our example
fixfox_esr_link: "http://download-origin.cdn.mozilla.net/pub/firefox/releases/24.0esr/linux-x86_64/en-US/firefox-24.0esr.tar.bz2"
TANAGURU_DATABASE_NAME: 'tgdatabase'
TANAGURU_DATABASE_USER: 'tguser'
TANAGURU_DATABASE_PASSWORD: 'tgPassword'
TANAGURU_URL: 'http://localhost:8080/tanaguru/'
TANAGURU_ADMIN_EMAIL: 'admin@example.com'
TANAGURU_ADMIN_PASSWORD: 'tanaguru15'
tanaguru_parameters:
db_name: "{{ TANAGURU_DATABASE_NAME }}"
db_user: "{{ TANAGURU_DATABASE_USER }}"
db_password: "{{ TANAGURU_DATABASE_PASSWORD }}"
url: "{{ TANAGURU_URL }}"
admin_email: "{{ TANAGURU_ADMIN_EMAIL }}"
admin_passwd: "{{ TANAGURU_ADMIN_PASSWORD }}"
\ No newline at end of file
---
- name: Add the Partner repository
apt_repository:
repo: "{{ item }}"
state: present
with_items:
- "deb http://archive.canonical.com/ubuntu {{ ansible_distribution_release }} partner"
- "deb-src http://archive.canonical.com/ubuntu {{ ansible_distribution_release }} partner"
tags:
- install
- install:base
- name: Set Postfix options
debconf:
name: postifx
question: "{{ item.question }}"
value: "{{ item.value }} "
vtype: "string"
with_items:
- { question: "postfix/mailname", value: " " }
- { question: "postfix/main_mailer_type", value: "Satellite system" }
tags:
- install
- install:configuration
- name: Install the TanaGuru Prerequisites
apt:
name: "{{ item }}"
update_cache: yes
state: installed
with_items: tanguru_debian_pkgs
tags:
- install
- install:base
- name: Modify the my.cnf file for max_allowed_packet option
lineinfile:
dest: /etc/mysql/my.cnf
regexp: '^max_allowed_packet'
line: 'max_allowed_packet = 64M'
state: present
register: my_cnf
tags:
- install
- install:configuration
- name: Restart MySQL
service:
name: mysql
state: restarted
when: my_cnf.changed
- name: Create a soft link for tomcat jar and mysql connector
file:
dest: "{{ item.dest }}"
src: "{{ item.src }}"
state: link
with_items:
- { src: '/usr/share/java/spring3-instrument-tomcat.jar', dest: '/usr/share/tomcat7/lib/spring3-instrument-tomcat.jar' }
- { src: '/usr/share/java/mysql-connector-java.jar', dest: '/usr/share/tomcat7/lib/mysql-connector-java.jar'}
tags:
- install
- install:configuration
- name: Copy the xvfb template to /etc/init.d
template:
dest: /etc/init.d/xvfb
src: xvfb.j2
owner: root
group: root
mode: 0755
register: xvfb
tags:
- install
- install:configuration
- name: Restart xvfb
service:
name: xvfb
pattern: /etc/init.d/xvfb
state: restarted
when: xvfb.changed
- name: Configure xvfb to run at startup
command: update-rc.d xvfb defaults
ignore_errors: yes
when: xvfb.changed
- name: Download the latest ESR Firfox
get_url:
url: "{{ fixfox_esr_link }}"
dest: "/tmp/{{ fixfox_esr_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded Firfox zipped file
unarchive:
src: "/tmp/{{ fixfox_esr_link | basename }}"
dest: /opt
copy: no
tags:
- install
- install:base
- name: Download the latest TanaGuru tarball
get_url:
url: "{{ tanaguru_download_link }}"
dest: "/tmp/{{ tanaguru_download_link | basename }}"
tags:
- install
- install:base
- name: Unzip the downloaded TanaGuru tarball
unarchive:
src: "/tmp/{{ tanaguru_download_link | basename }}"
dest: "/tmp/"
copy: no
tags:
- install
- install:base
- name: Create MySQL database for TanaGuru
mysql_db:
name: "{{ tanaguru_parameters.db_name }}"
state: present
encoding: utf8
collation: utf8_general_ci
tags:
- install
- install:base
- name: Create MySQL user for TanaGuru
mysql_user:
name: "{{ tanaguru_parameters.db_user }}"
password: "{{ tanaguru_parameters.db_password }}"
host: localhost
priv: "{{ tanaguru_parameters.db_name }}.*:ALL"
state: present
tags:
- install
- install:base
- name: Check that tanaguru app is running
shell: >
/bin/ps aux | grep -i tanaguru
register: tanaguru_app
changed_when: no
tags:
- install
- name: Install the TanaGuru
shell: >
/bin/echo "yes" | ./install.sh --mysql-tg-user "{{ tanaguru_parameters.db_user }}" \
--mysql-tg-passwd "{{ tanaguru_parameters.db_password }}" \
--mysql-tg-db "{{ tanaguru_parameters.db_name }}" \
--tanaguru-url "{{ tanaguru_parameters.url }}" \
--tomcat-webapps /var/lib/tomcat7/webapps \
--tomcat-user tomcat7 \
--tg-admin-email "{{ tanaguru_parameters.admin_email }}" \
--tg-admin-passwd "{{ tanaguru_parameters.admin_passwd }}" \
--firefox-esr-path /opt/firefox/firefox \
--display-port ":99.1"
args:
chdir: "/tmp/{{ tanaguru_download_link | basename | regex_replace('.tar.gz$', '') }}"
when: "tanaguru_app.stdout.find('/etc/tanaguru/') == -1"
register: tanaguru_install
tags:
- install
- install:base
- name: Restart tomcat7
service:
name: tomcat7
state: restarted
when: tanaguru_install.changed
\ No newline at end of file
#!/bin/sh
set -e
RUN_AS_USER=tomcat7
OPTS=":99 -screen 1 1024x768x24 -nolisten tcp"
XVFB_DIR=/usr/bin
PIDFILE=/var/run/xvfb
case $1 in
start)
start-stop-daemon --chuid $RUN_AS_USER -b --start --exec $XVFB_DIR/Xvfb --make-pidfile --pidfile $PIDFILE -- $OPTS &
;;
stop)
start-stop-daemon --stop --user $RUN_AS_USER --pidfile $PIDFILE
rm -f $PIDFILE
;;
restart)
if start-stop-daemon --test --stop --user $RUN_AS_USER --pidfile $PIDFILE >/dev/null; then
$0 stop
fi;
$0 start
;;
*)
echo "Usage: $0 (start|restart|stop)"
exit 1
;;
esac
exit 0
\ No newline at end of file
......@@ -21,20 +21,20 @@
#
- name: Create clone of edx-platform
git_2_0_1: >
repo=https://github.com/edx/edx-platform.git
dest={{ test_build_server_repo_path }}/edx-platform-clone
version={{ test_edx_platform_version }}
git_2_0_1:
repo: "https://github.com/edx/edx-platform.git"
dest: "{{ test_build_server_repo_path }}/edx-platform-clone"
version: "{{ test_edx_platform_version }}"
become_user: "{{ test_build_server_user }}"
- name: get xargs limit
shell: "xargs --show-limits"
- name: Copy test-development-environment.sh to somewhere the jenkins user can access it
copy: >
src=test-development-environment.sh
dest="{{ test_build_server_repo_path }}"
mode=0755
copy:
src: test-development-environment.sh
dest: "{{ test_build_server_repo_path }}"
mode: 0755
- name: Validate build environment
shell: "bash test-development-environment.sh {{ item }}"
......
---
- name: import the test courses from github
shell: >
{{ demo_edxapp_venv_bin }}/python /edx/bin/manage.edxapp lms git_add_course --settings=aws "{{ item.github_url }}"
shell: "{{ demo_edxapp_venv_bin }}/python /edx/bin/manage.edxapp lms git_add_course --settings=aws \"{{ item.github_url }}\""
become_user: "{{ common_web_user }}"
when: item.install == True
with_items: TESTCOURSES_EXPORTS
with_items: "{{ TESTCOURSES_EXPORTS }}"
- name: enroll test users in the testcourses
shell: >
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}
chdir={{ demo_edxapp_code_dir }}
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
when: item[1].install == True
with_nested:
- demo_test_users
- TESTCOURSES_EXPORTS
- "{{ demo_test_users }}"
- "{{ TESTCOURSES_EXPORTS }}"
......@@ -10,7 +10,7 @@
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0644
with_items: hpi_files.stdout_lines
with_items: "{{ hpi_files.stdout_lines }}"
when: hpi_files
notify:
- restart Jenkins
......@@ -26,7 +26,7 @@
owner: "{{ jenkins_user }}"
group: "{{ jenkins_group }}"
mode: 0644
with_items: jpi_files.stdout_lines
with_items: "{{ jpi_files.stdout_lines }}"
when: jpi_files
notify:
- restart Jenkins
......
......@@ -46,11 +46,8 @@
- name: Set sandbox limits
template:
src: "{{ item }}"
src: "sandbox.conf.j2"
dest: "/etc/security/limits.d/sandbox.conf"
first_available_file:
- "{{ secure_dir }}/sandbox.conf.j2"
- "sandbox.conf.j2"
- name: Install system dependencies of xserver
apt:
......@@ -60,11 +57,8 @@
- name: Load python-sandbox apparmor profile
template:
src: "{{ item }}"
src: "usr.bin.python-sandbox.j2"
dest: "/etc/apparmor.d/edx_apparmor_sandbox"
first_available_file:
- "{{ secure_dir }}/files/edx_apparmor_sandbox.j2"
- "usr.bin.python-sandbox.j2"
- include: deploy.yml
tags:
......
-r github.txt
PyYAML==3.11
ansible==2.2.0.0
PyYAML==3.12
Jinja2==2.8
MarkupSafe==0.23
boto==2.33.0
ecdsa==0.11
paramiko==1.15.1
paramiko==2.0.2
pycrypto==2.6.1
wsgiref==0.1.2
docopt==0.6.1
......
yml_files:=$(shell find . -name "*.yml")
json_files:=$(shell find . -name "*.json")
jinja_files:=$(shell find . -name "*.j2")
# $(images) is calculated in the docker.mk file
test: test.syntax test.edx_east_roles
test.syntax: test.syntax.yml test.syntax.json test.syntax.jinja test.syntax.dockerfiles
test.syntax: test.syntax.yml test.syntax.json test.syntax.dockerfiles
test.syntax.yml: $(patsubst %,test.syntax.yml/%,$(yml_files))
......@@ -18,13 +17,13 @@ test.syntax.json: $(patsubst %,test.syntax.json/%,$(json_files))
test.syntax.json/%:
jsonlint -v $*
test.syntax.jinja: $(patsubst %,test.syntax.jinja/%,$(jinja_files))
test.syntax.jinja/%:
cd playbooks && python ../tests/jinja_check.py ../$*
test.syntax.dockerfiles:
python util/check_dockerfile_coverage.py "$(images)"
test.edx_east_roles:
tests/test_edx_east_roles.sh
clean: test.clean
test.clean:
rm -rf playbooks/edx-east/test_output
#!/usr/bin/env python
import os
import sys
from jinja2 import FileSystemLoader
from jinja2 import Environment as j
from jinja2.exceptions import UndefinedError
from ansible.utils.template import _get_filters, _get_extensions
from yaml.representer import RepresenterError
input_file = sys.argv[1]
if not os.path.exists(input_file):
print('{0}: deleted in diff'.format(input_file))
sys.exit(0)
# Setup jinja to include ansible filters
j_e = j(trim_blocks=True, extensions=_get_extensions())
j_e.loader = FileSystemLoader(['.', os.path.dirname(input_file)])
j_e.filters.update(_get_filters())
# Go ahead and catch errors for undefined variables and bad yaml
# from `to_nice_yaml` ansible filter
try:
j_e.from_string(file((input_file)).read()).render(func=lambda: None)
except (UndefinedError, RepresenterError), ex:
pass
except TypeError, ex:
if ex.message != 'Undefined is not JSON serializable':
raise Exception(ex.message)
pass
print('{}: ok'.format(input_file))
......@@ -7,7 +7,7 @@ import logging
import sys
import docker_images
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR", ".")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
......
......@@ -45,7 +45,7 @@ ANSIBLE_DIR="/tmp/ansible"
CONFIGURATION_DIR="/tmp/configuration"
EDX_PPA="deb http://ppa.edx.org precise main"
EDX_PPA_KEY_SERVER="hkp://pgp.mit.edu:80"
EDX_PPA_KEY_ID="69464050"
EDX_PPA_KEY_ID="B41E5E3969464050"
cat << EOF
******************************************************************************
......@@ -116,7 +116,8 @@ fi
# which may differ from what is pinned in virtualenvironments
apt-get update -y
apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev
apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev openssl-dev
# Workaround for a 16.04 bug, need to upgrade to latest and then
# potentially downgrade to the preferred version.
......
......@@ -302,14 +302,129 @@ export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM
export HIPCHAT_MSG_COLOR DATADOG_API_KEY
if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
echo "Installing pkg dependencies"
/usr/bin/apt-get update
/usr/bin/apt-get install -y git python-pip python-apt \\
git-core build-essential python-dev libxml2-dev \\
libxslt-dev curl libmysqlclient-dev --force-yes
#################################### Lifted from ansible-bootstrap.sh
if [[ -z "$ANSIBLE_REPO" ]]; then
ANSIBLE_REPO="https://github.com/edx/ansible.git"
fi
if [[ -z "$ANSIBLE_VERSION" ]]; then
ANSIBLE_VERSION="master"
fi
if [[ -z "$CONFIGURATION_REPO" ]]; then
CONFIGURATION_REPO="https://github.com/edx/configuration.git"
fi
if [[ -z "$CONFIGURATION_VERSION" ]]; then
CONFIGURATION_VERSION="master"
fi
if [[ -z "$UPGRADE_OS" ]]; then
UPGRADE_OS=false
fi
#
# Bootstrapping constants
#
VIRTUAL_ENV_VERSION="15.0.2"
PIP_VERSION="8.1.2"
SETUPTOOLS_VERSION="24.0.3"
EDX_PPA="deb http://ppa.edx.org precise main"
EDX_PPA_KEY_SERVER="hkp://pgp.mit.edu:80"
EDX_PPA_KEY_ID="B41E5E3969464050"
cat << EOF
******************************************************************************
Running the abbey with the following arguments:
ANSIBLE_REPO="$ANSIBLE_REPO"
ANSIBLE_VERSION="$ANSIBLE_VERSION"
CONFIGURATION_REPO="$CONFIGURATION_REPO"
CONFIGURATION_VERSION="$CONFIGURATION_VERSION"
******************************************************************************
EOF
if [[ $(id -u) -ne 0 ]] ;then
echo "Please run as root";
exit 1;
fi
if grep -q 'Precise Pangolin' /etc/os-release
then
SHORT_DIST="precise"
elif grep -q 'Trusty Tahr' /etc/os-release
then
SHORT_DIST="trusty"
elif grep -q 'Xenial Xerus' /etc/os-release
then
SHORT_DIST="xenial"
else
cat << EOF
This script is only known to work on Ubuntu Precise, Trusty and Xenial,
exiting. If you are interested in helping make installation possible
on other platforms, let us know.
EOF
exit 1;
fi
EDX_PPA="deb http://ppa.edx.org $SHORT_DIST main"
# Upgrade the OS
apt-get update -y
apt-key update -y
if [ "$UPGRADE_OS" = true ]; then
echo "Upgrading the OS..."
apt-get upgrade -y
fi
# Required for add-apt-repository
apt-get install -y software-properties-common python-software-properties
# Add git PPA
add-apt-repository -y ppa:git-core/ppa
# For older distributions we need to install a PPA for Python 2.7.10
if [[ "precise" = "$SHORT_DIST" || "trusty" = "$SHORT_DIST" ]]; then
# Add python PPA
apt-key adv --keyserver "$EDX_PPA_KEY_SERVER" --recv-keys "$EDX_PPA_KEY_ID"
add-apt-repository -y "$EDX_PPA"
fi
# Install python 2.7 latest, git and other common requirements
# NOTE: This will install the latest version of python 2.7 and
# which may differ from what is pinned in virtualenvironments
apt-get update -y
apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev
# Workaround for a 16.04 bug, need to upgrade to latest and then
# potentially downgrade to the preferred version.
# https://github.com/pypa/pip/issues/3862
if [[ "xenial" = "$SHORT_DIST" ]]; then
pip install --upgrade pip
pip install --upgrade pip=="$PIP_VERSION"
else
pip install --upgrade pip=="$PIP_VERSION"
fi
# pip moves to /usr/local/bin when upgraded
hash -r #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
PATH=/usr/local/bin:$PATH
pip install setuptools=="$SETUPTOOLS_VERSION"
pip install virtualenv=="$VIRTUAL_ENV_VERSION"
##################### END Lifted from ansible-bootstrap.sh
# python3 is required for certain other things
# (currently xqwatcher so it can run python2 and 3 grader code,
# but potentially more in the future). It's not available on Ubuntu 12.04,
......@@ -324,15 +439,6 @@ fi
# only runs on a build from scratch
/usr/bin/apt-get install -y python-httplib2 --force-yes
# Must upgrade to latest before pinning to work around bug
# https://github.com/pypa/pip/issues/3862
pip install --upgrade pip
hash -r #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
pip install --upgrade pip==8.1.2
# upgrade setuptools early to avoid no distribution errors
pip install --upgrade setuptools==24.0.3
rm -rf $base_dir
mkdir -p $base_dir
cd $base_dir
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment