Commit d49a092a by Arbab Nazar Committed by GitHub

Merge branch 'master' into arbab/edx_service-rewrite

parents bde787cb 36a140b6
- Role: hadoop_common
- Enable log retention by default to assist with debugging. Now YARN will retain stdout and stderr logs produced by map reduce tasks for 24 hours. They can be retrieved by running "yarn logs -applicationId YOUR_APPLICATION_ID".
- Role: rabbitmq - Role: rabbitmq
- Removed the RABBITMQ_CLUSTERED var and related tooling. The goal of the var was to be able to setup a cluster in the aws environment without having to know all the IPs of the cluster before hand. It relied on the `hostvars` ansible varible to work correctly which it no longer does in 1.9. This may get fixed in the future but for now, the "magic" setup doesn't work. - Removed the RABBITMQ_CLUSTERED var and related tooling. The goal of the var was to be able to setup a cluster in the aws environment without having to know all the IPs of the cluster before hand. It relied on the `hostvars` ansible varible to work correctly which it no longer does in 1.9. This may get fixed in the future but for now, the "magic" setup doesn't work.
- Changed `rabbitmq_clustered_hosts` to RABBITMQ_CLUSTERED_HOSTS. - Changed `rabbitmq_clustered_hosts` to RABBITMQ_CLUSTERED_HOSTS.
......
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True become: True
roles: roles:
- jenkins_analytics - jenkins_analytics
...@@ -6,8 +6,8 @@ ...@@ -6,8 +6,8 @@
# - instance_id - the ec2 instance ID use to create the AMI # - instance_id - the ec2 instance ID use to create the AMI
# - edx_environment - value to use for the environment tag # - edx_environment - value to use for the environment tag
# - deployment - value to use for the deploy tag # - deployment - value to use for the deploy tag
# - cluster_repo - the url of the github repo for this cluster # - app_repo - the url of the github repo for this app
# - cluster_version - git hash of the cluster (play, service, IDA) being deployed # - app_version - git hash of the app (play, service, IDA) being deployed
# - play - the play that was run # - play - the play that was run
# - configuration_repo - The github url for the configuration repo # - configuration_repo - The github url for the configuration repo
# - configuration_version - The version (git hash) of configuration # - configuration_version - The version (git hash) of configuration
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
# -e play=pipline-test \ # -e play=pipline-test \
# -e deployment=edx \ # -e deployment=edx \
# -e edx_environment=sandbox \ # -e edx_environment=sandbox \
# -e cluster_version=12345 \ # -e app_version=12345 \
# -e configuration_version=12345 # -e configuration_version=12345
# -e configuration_secure_version=12345 # -e configuration_secure_version=12345
# -e cache_id=12345 # -e cache_id=12345
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
- name: Create AMI - name: Create AMI
ec2_ami_2_0_0_1: ec2_ami_2_0_0_1:
instance_id: "{{ instance_id }}" instance_id: "{{ instance_id }}"
name: "{{ play }} -- {{ cluster_version }}" name: "{{ edx_environment }} -- {{ deployment }} -- {{ play }} -- {{ app_version }}"
region: "{{ ec2_region }}" region: "{{ ec2_region }}"
wait: "{{ ami_wait }}" wait: "{{ ami_wait }}"
wait_timeout: "{{ ami_creation_timeout }}" wait_timeout: "{{ ami_creation_timeout }}"
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
description: "AMI built via edX continuous delivery pipeline - Ansible version: {{ ansible_version }}" description: "AMI built via edX continuous delivery pipeline - Ansible version: {{ ansible_version }}"
# used a JSON object here as there is a string interpolation in the keys. # used a JSON object here as there is a string interpolation in the keys.
tags: "{ tags: "{
'version:{{ play }}':'{{ cluster_repo }} {{ cluster_version }}', 'version:{{ play }}':'{{ app_repo }} {{ app_version }}',
'version:configuration':'{{ configuration_repo }} {{ configuration_version }}', 'version:configuration':'{{ configuration_repo }} {{ configuration_version }}',
'version:configuration_secure':'{{ configuration_secure_repo }} {{ configuration_secure_version }}', 'version:configuration_secure':'{{ configuration_secure_repo }} {{ configuration_secure_version }}',
'play':'{{ play }}', 'play':'{{ play }}',
......
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
# This instance will have an autogenerated key. # This instance will have an autogenerated key.
# #
# required variables for this playbook: # required variables for this playbook:
# - ec2_subnet_id - Subnet to bring up the ec2 instance # - base_ami_id - The base base AMI-ID
# - base_ami_id - The base base AMI-ID # - ec2_vpc_subnet_id - The Subnet ID to bring up the instance
# - ec2_vpc_subnet_id - The Subnet ID to bring up the instance # - ec2_security_group_id - The security group ID to use
# - ec2_security_group_id - The security group ID to use # - ec2_instance_profile_name - The instance profile that should be used to launch this AMI
# #
# Other Variables: # Other Variables:
# - ec2_region - The region the server should be brought up in # - ec2_region - The region the server should be brought up in
...@@ -63,11 +63,12 @@ ...@@ -63,11 +63,12 @@
vpc_subnet_id: "{{ ec2_vpc_subnet_id }}" vpc_subnet_id: "{{ ec2_vpc_subnet_id }}"
assign_public_ip: "{{ ec2_assign_public_ip }}" assign_public_ip: "{{ ec2_assign_public_ip }}"
volumes: volumes:
- device_name: /dev/xvda - device_name: /dev/sdf
volume_type: standard volume_type: 'gp2'
volume_size: "{{ ebs_volume_size }}" volume_size: "{{ ebs_volume_size }}"
wait: yes wait: yes
wait_timeout: "{{ ec2_timeout }}" wait_timeout: "{{ ec2_timeout }}"
instance_profile_name: "{{ ec2_instance_profile_name }}"
register: ec2_instance_register register: ec2_instance_register
- name: Wait for SSH to come up - name: Wait for SSH to come up
......
# This playbook will check for migrations that need to be run for Django applications within a larger
# Django application. If migrations exist, it will run the migrations while saving the output as an artifact.
#
# The playbook uses the Django management commands found in this Django app repo:
# https://github.com/edx/edx-django-release-util
# So the Django app above needs to be installed in the Django app being checked for migrations.
#
# Required variables for this playbook:
#
# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath
# this directory in a directory with the same name as APPLICATION_NAME.
# NOTE: It is assumed that edx-django-release-util is one of its INSTALLED_APPS.
# - APPLICATION_NAME - The name of the application that we are migrating.
# - APPLICATION_USER - user which is meant to run the application
# - ARTIFACT_PATH - the path where the migration artifacts should be copied after completion
# - HIPCHAT_TOKEN - API token to send messages to hipchat
# - HIPCHAT_ROOM - ID or name of the room to send the notification
# - HIPCHAT_URL - URL of the hipchat API (defaults to v1 of the api)
#
# Other variables:
# - unapplied_migrations_output - the filename where the unapplied migration YAML output is stored
# - migration_output - the filename where the migration output is saved
#
# Example command line to run this playbook:
# ansible-playbook -vvvv -i "localhost," -c local \
# -e @overrides.yml \
# run_migrations.yml
#
- hosts: all
vars:
unapplied_migrations_output: unapplied_migrations.yml
migration_output: migration_output.yml
HIPCHAT_URL: https://api.hipchat.com/v2/
COMMAND_PREFIX: ". {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; DB_MIGRATION_USER={{ DB_MIGRATION_USER }} DB_MIGRATION_PASS={{ DB_MIGRATION_PASS }} /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }} "
gather_facts: False
tasks:
- name: Create a temporary directory for the migration output.
command: mktemp -d
become_user: "{{ APPLICATION_USER }}"
register: temp_output_dir
- name: generate list of unapplied migrations
shell: '{{ COMMAND_PREFIX }} show_unapplied_migrations --output_file "{{ temp_output_dir.stdout }}/{{ unapplied_migrations_output }}"'
become_user: "{{ APPLICATION_USER }}"
- name: migrate to apply any unapplied migrations
shell: '{{ COMMAND_PREFIX }} run_migrations "{{ temp_output_dir.stdout }}/{{ unapplied_migrations_output }}" --output_file "{{ temp_output_dir.stdout }}/{{ migration_output }}"'
become_user: "{{ APPLICATION_USER }}"
- name: Transfer artifacts to the proper place.
fetch:
src: "{{ temp_output_dir.stdout }}/{{ item }}"
dest: "{{ ARTIFACT_PATH }}"
flat: True
fail_on_missing: True
mode: 0700
with_items:
- "{{ unapplied_migrations_output }}"
- "{{ migration_output }}"
- name: Send Hipchat notification cleanup has finished
hipchat_2_0_0_1:
api: "{{ HIPCHAT_URL }}"
token: "{{ HIPCHAT_TOKEN }}"
room: "{{ HIPCHAT_ROOM }}"
msg: "Migrations have completed."
ignore_errors: yes
when: HIPCHAT_TOKEN is defined
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: "yes" migrate_db: "yes"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
- name: Install go-agent-docker-server - name: Install go-agent-docker-server
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
roles: roles:
- aws - aws
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
- name: Install go-agent - name: Install go-agent
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
roles: roles:
- aws - aws
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
- name: Install go-server - name: Install go-server
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
roles: roles:
- aws - aws
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# analyzing logs. # analyzing logs.
- name: Configure syslog server - name: Configure syslog server
hosts: all hosts: all
sudo: yes become: True
roles: roles:
- common - common
- oraclejdk - oraclejdk
......
--- ---
AIDE_REPORT_EMAIL: 'root' AIDE_REPORT_EMAIL: 'root'
--- ---
# install and configure aide IDS # install and configure aide IDS
# #
- name: Install aide
apt:
name: aide
state: present
- name: install aide - name: Configure aide defaults
apt: pkg="aide" state="present" template:
src: etc/default/aide.j2
dest: /etc/default/aide
owner: root
group: root
mode: "0644"
- name: configure aide defaults - name: Open read permissions on aide logs
template: > file:
src=etc/default/aide.j2 dest=/etc/default/aide name: /var/log/aide
owner=root group=root mode=0644 recurse: yes
state: directory
mode: "0755"
- name: open read permissions on aide logs - name: Aide initial scan (this can take a long time)
file: > command: "aideinit -y -f"
name="/var/log/aide" args:
recurse="yes" creates: "/var/lib/aide/aide.db"
state="directory" become: yes
mode="755" \ No newline at end of file
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
become: yes
...@@ -153,13 +153,11 @@ ...@@ -153,13 +153,11 @@
name: ssh name: ssh
state: restarted state: restarted
become: True become: True
when: sshd_config.changed when: sshd_config.changed and ansible_distribution in common_debian_variants
when: ansible_distribution in common_debian_variants
- name: Restart ssh - name: Restart ssh
service: service:
name: sshd name: sshd
state: restarted state: restarted
become: True become: True
when: sshd_config.changed when: sshd_config.changed and ansible_distribution in common_redhat_variants
when: ansible_distribution in common_redhat_variants
...@@ -102,8 +102,10 @@ common_redhat_pkgs: ...@@ -102,8 +102,10 @@ common_redhat_pkgs:
- rsyslog - rsyslog
- git - git
- unzip - unzip
- acl
common_debian_pkgs: common_debian_pkgs:
- ntp - ntp
- acl
- lynx-cur - lynx-cur
- logrotate - logrotate
- rsyslog - rsyslog
......
...@@ -48,7 +48,7 @@ CREDENTIALS_CACHES: ...@@ -48,7 +48,7 @@ CREDENTIALS_CACHES:
CREDENTIALS_DJANGO_SETTINGS_MODULE: "credentials.settings.production" CREDENTIALS_DJANGO_SETTINGS_MODULE: "credentials.settings.production"
CREDENTIALS_DOMAIN: 'credentials' CREDENTIALS_DOMAIN: 'credentials'
CREDENTIALS_URL_ROOT: 'http://{{ CREDENTIALS_DOMAIN }}:18150' CREDENTIALS_URL_ROOT: 'http://{{ CREDENTIALS_DOMAIN }}:18150'
CREDENTIALS_LOGOUT_URL: '{{ CREDENTIALS_URL_ROOT }}/accounts/logout/' CREDENTIALS_LOGOUT_URL: '{{ CREDENTIALS_URL_ROOT }}/logout/'
CREDENTIALS_OAUTH_URL_ROOT: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/oauth2' CREDENTIALS_OAUTH_URL_ROOT: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/oauth2'
CREDENTIALS_OIDC_LOGOUT_URL: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/logout' CREDENTIALS_OIDC_LOGOUT_URL: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/logout'
......
...@@ -151,8 +151,8 @@ EDXAPP_ENABLE_MOBILE_REST_API: false ...@@ -151,8 +151,8 @@ EDXAPP_ENABLE_MOBILE_REST_API: false
# Settings for API access management # Settings for API access management
EDXAPP_API_ACCESS_MANAGER_EMAIL: "api-access@example.com" EDXAPP_API_ACCESS_MANAGER_EMAIL: "api-access@example.com"
EDXAPP_API_ACCESS_FROM_EMAIL: "api-requests@example.com" EDXAPP_API_ACCESS_FROM_EMAIL: "api-requests@example.com"
EDXAPP_API_DOCUMENTATION_URL: "http://edx.readthedocs.org/projects/edx-platform-api/en/latest/overview.html" EDXAPP_API_DOCUMENTATION_URL: "http://course-catalog-api-guide.readthedocs.io/en/latest/"
EDXAPP_AUTH_DOCUMENTATION_URL: "http://edx.readthedocs.org/projects/edx-platform-api/en/latest/authentication.html" EDXAPP_AUTH_DOCUMENTATION_URL: "http://course-catalog-api-guide.readthedocs.io/en/latest/authentication/index.html"
# Settings for affiliate cookie tracking # Settings for affiliate cookie tracking
EDXAPP_AFFILIATE_COOKIE_NAME: 'dev_affiliate_id' EDXAPP_AFFILIATE_COOKIE_NAME: 'dev_affiliate_id'
......
{% for w in edxapp_workers %} {% for w in edxapp_workers %}
[program:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}] [program:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}]
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE=aws,PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }} environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ edxapp_code_dir }} directory={{ edxapp_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log
command={{ edxapp_venv_dir + '/bin/newrelic-admin run-program ' if w.monitor and COMMON_ENABLE_NEWRELIC_APP else ''}}{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} command={{ edxapp_venv_dir + '/bin/newrelic-admin run-program ' if w.monitor and COMMON_ENABLE_NEWRELIC_APP else ''}}{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings={{ worker_django_settings_module }} celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }}
killasgroup=true killasgroup=true
stopasgroup=true stopasgroup=true
stopwaitsecs={{ w.stopwaitsecs | default(EDXAPP_WORKER_DEFAULT_STOPWAITSECS) }} stopwaitsecs={{ w.stopwaitsecs | default(EDXAPP_WORKER_DEFAULT_STOPWAITSECS) }}
......
...@@ -6,6 +6,6 @@ ...@@ -6,6 +6,6 @@
ec2_tag: ec2_tag:
resource: "{{ ansible_ec2_instance_id }}" resource: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}" region: "{{ ansible_ec2_placement_region }}"
tags: tags:
"version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}" "version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}"
when: forum_checkout.after is defined when: forum_checkout.after is defined
...@@ -17,7 +17,10 @@ ...@@ -17,7 +17,10 @@
# require in our default configuration. # require in our default configuration.
# #
# #
- name: set git fetch.prune to ignore deleted remote refs # Rewrite this task using the ansible git-config module once we'll migrate to Ansible 2.x
# https://docs.ansible.com/ansible/git_config_module.html#git-config
#
- name: Set git fetch.prune to ignore deleted remote refs
shell: git config --global fetch.prune true shell: git config --global fetch.prune true
become_user: "{{ repo_owner }}" become_user: "{{ repo_owner }}"
when: GIT_REPOS is defined when: GIT_REPOS is defined
...@@ -25,28 +28,30 @@ ...@@ -25,28 +28,30 @@
- install - install
- install:code - install:code
- name: validate git protocol - name: Validate git protocol
fail: msg='GIT_REPOS.PROTOCOL must be "https" or "ssh"' fail:
msg: '{{ GIT_REPOS.PROTOCOL }} must be "https" or "ssh"'
when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined
with_items: GIT_REPOS with_items: GIT_REPOS
tags: tags:
- install - install
- install:code - install:code
- name: install read-only ssh key
- name: Install read-only ssh key
copy: copy:
dest: "{{ git_home }}/.ssh/{{ item.REPO }}" dest: "{{ git_home }}/.ssh/{{ item.REPO }}"
content: "{{ item.SSH_KEY }}" content: "{{ item.SSH_KEY }}"
owner: "{{ repo_owner }}" owner: "{{ repo_owner }}"
group: "{{ repo_group }}" group: "{{ repo_group }}"
mode: 0600 mode: "0600"
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS with_items: GIT_REPOS
tags: tags:
- install - install
- install:code - install:code
- name: checkout code over ssh - name: Checkout code over ssh
git_2_0_1: git_2_0_1:
repo: "git@{{ item.DOMAIN }}:{{ item.PATH }}/{{ item.REPO }}" repo: "git@{{ item.DOMAIN }}:{{ item.PATH }}/{{ item.REPO }}"
dest: "{{ item.DESTINATION }}" dest: "{{ item.DESTINATION }}"
...@@ -61,7 +66,7 @@ ...@@ -61,7 +66,7 @@
- install - install
- install:code - install:code
- name: checkout code over https - name: Checkout code over https
git_2_0_1: git_2_0_1:
repo: "https://{{ item.DOMAIN }}/{{ item.PATH }}/{{ item.REPO }}" repo: "https://{{ item.DOMAIN }}/{{ item.PATH }}/{{ item.REPO }}"
dest: "{{ item.DESTINATION }}" dest: "{{ item.DESTINATION }}"
...@@ -74,7 +79,7 @@ ...@@ -74,7 +79,7 @@
- install - install
- install:code - install:code
- name: remove read-only ssh key - name: Remove read-only ssh key
file: file:
dest: "{{ git_home }}/.ssh/{{ item.REPO }}" dest: "{{ git_home }}/.ssh/{{ item.REPO }}"
state: absent state: absent
......
...@@ -79,4 +79,8 @@ hadoop_common_redhat_pkgs: [] ...@@ -79,4 +79,8 @@ hadoop_common_redhat_pkgs: []
# yarn.nodemanager.vmem-pmem-ratio: 2.1 # yarn.nodemanager.vmem-pmem-ratio: 2.1
mapred_site_config: {} mapred_site_config: {}
yarn_site_config: {} yarn_site_config:
yarn.log-aggregation-enable: true
# 24 hour log retention
yarn.log-aggregation.retain-seconds: 86400
--- ---
- name: restart mongo - name: restart mongo
service: name=mongod state=restarted service:
name: mongod
state: restarted
--- ---
- name: check to see that MongoDB 2.4 is not installed - name: Check to see that MongoDB 2.4 is not installed
stat: path=/etc/init.d/mongodb stat:
path: /etc/init.d/mongodb
register: mongodb_needs_upgrade register: mongodb_needs_upgrade
tags: tags:
- install - install
- install:base - install:base
- name: verify 2.4 not installed - name: Verify 2.4 not installed
fail: msg="MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6." fail:
msg: "MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6."
when: mongodb_needs_upgrade.stat.exists and MONGO_CLUSTERED when: mongodb_needs_upgrade.stat.exists and MONGO_CLUSTERED
tags: tags:
- install - install
- install:base - install:base
- name: remove mongo 2.4 if present - name: Remove mongo 2.4 if present
apt: > apt:
pkg=mongodb-10gen pkg: mongodb-10gen
state=absent purge=yes state: absent
force=yes purge: yes
force: yes
when: mongodb_needs_upgrade.stat.exists and not MONGO_CLUSTERED when: mongodb_needs_upgrade.stat.exists and not MONGO_CLUSTERED
tags: tags:
- install - install
- install:base - install:base
- name: install python pymongo for mongo_user ansible module - name: Install python pymongo for mongo_user ansible module
pip: > pip:
name=pymongo state=present name: pymongo
version={{ pymongo_version }} extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" state: present
version: "{{ pymongo_version }}"
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
tags: tags:
- install - install
- install:base - install:base
- name: add the mongodb signing key - name: Add the mongodb signing key
apt_key: > apt_key:
id={{ MONGODB_APT_KEY }} id: "{{ MONGODB_APT_KEY }}"
keyserver={{ MONGODB_APT_KEYSERVER }} keyserver: "{{ MONGODB_APT_KEYSERVER }}"
state=present state: present
tags: tags:
- install - install
- install:base - install:base
- name: add the mongodb repo to the sources list - name: Add the mongodb repo to the sources list
apt_repository: > apt_repository:
repo='{{ MONGODB_REPO }}' repo: "{{ MONGODB_REPO }}"
state=present state: present
tags: tags:
- install - install
- install:base - install:base
- name: install mongo server and recommends - name: Install mongo server and recommends
apt: > apt:
pkg=mongodb-org={{ mongo_version }} name: "mongodb-org={{ mongo_version }}"
state=present install_recommends=yes state: present
force=yes update_cache=yes install_recommends: yes
force: yes
update_cache: yes
tags: tags:
- install - install
- install:base - install:base
- name: create mongo dirs - name: Create mongo dirs
file: > file:
path="{{ item }}" state=directory path: "{{ item }}"
owner="{{ mongo_user }}" state: directory
group="{{ mongo_user }}" owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
with_items: with_items:
- "{{ mongo_data_dir }}" - "{{ mongo_data_dir }}"
- "{{ mongo_dbpath }}" - "{{ mongo_dbpath }}"
...@@ -71,83 +79,97 @@ ...@@ -71,83 +79,97 @@
- install - install
- install:base - install:base
- name: stop mongod service - name: Stop mongod service
service: name=mongod state=stopped service:
name: mongod
state: stopped
tags: tags:
- manage - manage
- manage:stop - manage:stop
- name: move mongodb to {{ mongo_data_dir }} - name: Move mongodb to {{ mongo_data_dir }}
command: > command: "mv /var/lib/mongodb {{ mongo_data_dir}}/."
mv /var/lib/mongodb {{ mongo_data_dir}}/. args:
creates={{ mongo_data_dir }}/mongodb creates: "{{ mongo_data_dir }}/mongodb"
tags: tags:
- install - install
- install:base - install:base
- name: copy mongodb key file - name: Copy mongodb key file
copy: > copy:
content="{{ MONGO_CLUSTER_KEY }}" content: "{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }} dest: "{{ mongo_key_file }}"
mode=0600 mode: "0600"
owner=mongodb owner: mongodb
group=mongodb group: mongodb
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
- install - install
- install:configuration - install:configuration
- name: copy configuration template - name: Copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongod.conf backup=yes template:
notify: restart mongo src: "mongodb.conf.j2"
dest: "/etc/mongod.conf"
backup: yes
notify:
- restart mongo
tags: tags:
- install - install
- install:configuration - install:configuration
- name: start mongo service - name: Start mongo service
service: name=mongod state=started service:
name: mongod
state: started
tags: tags:
- manage - manage
- manage:start - manage:start
- name: wait for mongo server to start - name: Wait for mongo server to start
wait_for: port=27017 delay=2 wait_for:
port: 27017
delay: 2
tags: tags:
- manage - manage
- manage:start - manage:start
- name: drop super user script - name: Drop super user script
template: src="create_root.js.j2" dest="/tmp/create_root.js" template:
src: "create_root.js.j2"
dest: "/tmp/create_root.js"
when: not MONGO_CLUSTERED when: not MONGO_CLUSTERED
tags: tags:
- install - install
- install:configuration - install:configuration
- name: create super user with js - name: Create super user with js
shell: > shell: "/usr/bin/mongo admin /tmp/create_root.js"
/usr/bin/mongo admin /tmp/create_root.js
when: not MONGO_CLUSTERED when: not MONGO_CLUSTERED
tags: tags:
- install - install
- install:configuration - install:configuration
- name: delete super user script - name: Delete super user script
file: path=/tmp/create_root.js state=absent file:
path: /tmp/create_root.js
state: absent
when: not MONGO_CLUSTERED when: not MONGO_CLUSTERED
tags: tags:
- install - install
- install:configuration - install:configuration
- name: Create the file to initialize the mongod replica set - name: Create the file to initialize the mongod replica set
template: src=repset_init.js.j2 dest=/tmp/repset_init.js template:
src: "repset_init.js.j2"
dest: "/tmp/repset_init.js"
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
- install - install
- install:configuration - install:configuration
- name: Initialize the replication set - name: Initialize the replication set
shell: > shell: "/usr/bin/mongo /tmp/repset_init.js"
/usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
- install - install
...@@ -157,81 +179,72 @@ ...@@ -157,81 +179,72 @@
# file: path=/tmp/repset_init.js state=absent # file: path=/tmp/repset_init.js state=absent
# when: MONGO_CLUSTERED # when: MONGO_CLUSTERED
- name: create a mongodb user - name: Create a mongodb user
mongodb_user: > mongodb_user:
database={{ item.database }} database: "{{ item.database }}"
login_user={{ MONGO_ADMIN_USER }} login_user: "{{ MONGO_ADMIN_USER }}"
login_password={{ MONGO_ADMIN_PASSWORD }} login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name={{ item.user }} name: "{{ item.user }}"
password="{{ item.password }}" password: "{{ item.password }}"
roles={{ item.roles }} roles: "{{ item.roles }}"
state=present state: present
with_items: MONGO_USERS with_items: "{{ MONGO_USERS }}"
when: not MONGO_CLUSTERED when: not MONGO_CLUSTERED
tags: tags:
- manage - manage
- manage:app-users - manage:app-users
- name: create a mongodb user - name: Create a mongodb user
mongodb_user: > mongodb_user:
database={{ item.database }} database: "{{ item.database }}"
login_user={{ MONGO_ADMIN_USER }} login_user: "{{ MONGO_ADMIN_USER }}"
login_password={{ MONGO_ADMIN_PASSWORD }} login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name={{ item.user }} name: "{{ item.user }}"
password="{{ item.password }}" password: "{{ item.password }}"
roles={{ item.roles }} roles: "{{ item.roles }}"
state=present state: present
replica_set={{ mongo_repl_set }} replica_set: "{{ mongo_repl_set }}"
with_items: MONGO_USERS with_items: "{{ MONGO_USERS }}"
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
tags: tags:
- manage - manage
- manage:app-users - manage:app-users
- name: install s3cmd - name: Install s3cmd
pip: > pip:
name="s3cmd" name: "s3cmd"
state=present state: present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
when: MONGO_S3_BACKUP when: MONGO_S3_BACKUP
tags: tags:
- install - install
- install:app-requirements - install:app-requirements
- name: configure s3cmd - name: Configure s3cmd and install backup-mongo-to-s3 script
template: > template:
dest="{{ MONGO_S3_S3CMD_CONFIG }}" dest: "{{ item.dest }}"
src=mongo-s3-backup-s3cfg.j2 src: "{{ item.src }}"
owner=root owner: root
group=root group: root
mode=0600 mode: "{{ item.mode }}"
when: MONGO_S3_BACKUP
tags:
- install
- install:configuration
- name: install backup-mongo-to-s3 script
template: >
src=backup-mongo-to-s3.j2
dest=/edx/bin/backup-mongo-to-s3.sh
owner=root
group=root
mode=0700
when: MONGO_S3_BACKUP when: MONGO_S3_BACKUP
with_items:
- { src: 'mongo-s3-backup-s3cfg.j2', dest: '{{ MONGO_S3_S3CMD_CONFIG }}', mode: '0600' }
- { src: 'backup-mongo-to-s3.j2', dest: '/edx/bin/backup-mongo-to-s3.sh', mode: '0700' }
tags: tags:
- install - install
- install:configuration - install:configuration
- name: schedule backup-mongo-to-3s crontab - name: Schedule backup-mongo-to-3s crontab
cron: cron:
name="backup-mongo-to-s3" name: "backup-mongo-to-s3"
job="/edx/bin/backup-mongo-to-s3.sh" job: "/edx/bin/backup-mongo-to-s3.sh"
backup=yes backup: yes
cron_file=backup-mongo-to-s3 cron_file: backup-mongo-to-s3
user=root user: root
hour="{{ MONGO_S3_BACKUP_HOUR }}" hour: "{{ MONGO_S3_BACKUP_HOUR }}"
minute="0" minute: "0"
day="{{ MONGO_S3_BACKUP_DAY }}" day: "{{ MONGO_S3_BACKUP_DAY }}"
when: MONGO_S3_BACKUP when: MONGO_S3_BACKUP
tags: tags:
- install - install
......
--- ---
- name: restart notifier-scheduler - name: restart notifier-scheduler
supervisorctl: > supervisorctl:
name=notifier-scheduler name: "notifier-scheduler"
state=restarted state: restarted
config={{ supervisor_cfg }} config: "{{ supervisor_cfg }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path: "{{ supervisor_ctl }}"
when: not disable_edx_services when: not disable_edx_services
- name: restart notifier-celery-workers - name: restart notifier-celery-workers
supervisorctl: > supervisorctl:
name=notifier-celery-workers name: "notifier-celery-workers"
state=restarted state: restarted
config={{ supervisor_cfg }} config: "{{ supervisor_cfg }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path: "{{ supervisor_ctl }}"
when: not disable_edx_services when: not disable_edx_services
--- ---
- name: Checkout code
- name: checkout code
git_2_0_1: git_2_0_1:
dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }} dest: "{{ NOTIFIER_CODE_DIR }}"
version={{ NOTIFIER_VERSION }} repo: "{{ NOTIFIER_SOURCE_REPO }}"
accept_hostkey=yes version: "{{ NOTIFIER_VERSION }}"
accept_hostkey: yes
become: true become: true
become_user: "{{ notifier_user }}" become_user: "{{ notifier_user }}"
notify: notify:
...@@ -12,48 +12,56 @@ ...@@ -12,48 +12,56 @@
- restart notifier-celery-workers - restart notifier-celery-workers
# Optional auth for git # Optional auth for git
- name: create ssh script for git (not authenticated) - name: Create ssh script for git (not authenticated)
template: > template:
src=git_ssh_noauth.sh.j2 dest={{ notifier_git_ssh }} src: "git_ssh_noauth.sh.j2"
owner={{ notifier_user }} mode=750 dest: "{{ notifier_git_ssh }}"
owner: "{{ notifier_user }}"
mode: "0750"
when: NOTIFIER_GIT_IDENTITY == "" when: NOTIFIER_GIT_IDENTITY == ""
- name: create ssh script for git (authenticated) - name: Create ssh script for git (authenticated)
template: > template:
src=git_ssh_auth.sh.j2 dest={{ notifier_git_ssh }} src: "git_ssh_auth.sh.j2"
owner={{ notifier_user }} mode=750 dest: "{{ notifier_git_ssh }}"
owner: "{{ notifier_user }}"
mode: "0750"
when: NOTIFIER_GIT_IDENTITY != "" when: NOTIFIER_GIT_IDENTITY != ""
- name: install read-only ssh key - name: Install read-only ssh key
copy: > copy:
content="{{ NOTIFIER_GIT_IDENTITY }}" dest={{ notifier_git_identity }} content: "{{ NOTIFIER_GIT_IDENTITY }}"
force=yes owner={{ notifier_user }} mode=0600 dest: "{{ notifier_git_identity }}"
force: yes
owner: "{{ notifier_user }}"
mode: "0600"
when: NOTIFIER_GIT_IDENTITY != "" when: NOTIFIER_GIT_IDENTITY != ""
- name: checkout theme - name: Checkout theme
git_2_0_1: > git_2_0_1:
dest={{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }} dest: "{{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }}"
repo={{ NOTIFIER_THEME_REPO }} repo: "{{ NOTIFIER_THEME_REPO }}"
version={{ NOTIFIER_THEME_VERSION }} version: "{{ NOTIFIER_THEME_VERSION }}"
accept_hostkey=yes accept_hostkey: yes
when: NOTIFIER_THEME_NAME != '' when: NOTIFIER_THEME_NAME != ''
become_user: "{{ notifier_user }}" become_user: "{{ notifier_user }}"
environment: environment:
GIT_SSH: "{{ notifier_git_ssh }}" GIT_SSH: "{{ notifier_git_ssh }}"
- name: write notifier local settings - name: Write notifier local settings
template: > template:
src=settings_local.py.j2 src: "settings_local.py.j2"
dest={{ NOTIFIER_CODE_DIR }}/notifier/settings_local.py dest: "{{ NOTIFIER_CODE_DIR }}/notifier/settings_local.py"
mode=0555 mode: "0555"
when: NOTIFIER_THEME_NAME != '' when: NOTIFIER_THEME_NAME != ''
notify: notify:
- restart notifier-celery-workers - restart notifier-celery-workers
- name: install application requirements - name: Install application requirements
pip: pip:
requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}" requirements: "{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present virtualenv: "{{ NOTIFIER_VENV_DIR }}"
state: present
become: true become: true
become_user: "{{ notifier_user }}" become_user: "{{ notifier_user }}"
notify: notify:
...@@ -63,10 +71,13 @@ ...@@ -63,10 +71,13 @@
# Syncdb for whatever reason always creates the file owned by www-data:www-data, and then # Syncdb for whatever reason always creates the file owned by www-data:www-data, and then
# complains it can't write because it's running as notifier. So this is to touch the file into # complains it can't write because it's running as notifier. So this is to touch the file into
# place with proper perms first. # place with proper perms first.
- name: fix permissions on notifer db file - name: Fix permissions on notifer db file
file: > file:
path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ notifier_user }} group={{ NOTIFIER_WEB_USER }} path: "{{ NOTIFIER_DB_DIR }}/notifier.db"
mode=0664 state: touch
owner: "{{ notifier_user }}"
group: "{{ NOTIFIER_WEB_USER }}"
mode: "0664"
become: true become: true
notify: notify:
- restart notifier-scheduler - restart notifier-scheduler
...@@ -74,9 +85,10 @@ ...@@ -74,9 +85,10 @@
tags: tags:
- deploy - deploy
- name: syncdb - name: Syncdb
shell: > shell: "{{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb"
cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb args:
chdir: "{{ NOTIFIER_CODE_DIR }}"
become: true become: true
become_user: "{{ notifier_user }}" become_user: "{{ notifier_user }}"
environment: notifier_env_vars environment: notifier_env_vars
......
--- ---
# #
# notifier # notifier
# #
...@@ -17,138 +16,145 @@ ...@@ -17,138 +16,145 @@
# - common # - common
# - notifier # - notifier
# #
- name: install notifier specific system packages - name: Install notifier specific system packages
apt: pkg={{','.join(notifier_debian_pkgs)}} state=present apt:
name: "{{ item }}"
- name: check if incommon ca is installed state: present
command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt with_items: "{{ notifier_debian_pkgs }}"
- name: Check if incommon ca is installed
command: "test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt"
register: incommon_present register: incommon_present
ignore_errors: yes ignore_errors: yes
- name: create incommon ca directory - name: Create incommon ca directory
file: file:
path="/usr/share/ca-certificates/incommon" mode=2775 state=directory path: "/usr/share/ca-certificates/incommon"
state: directory
mode: "2775"
when: incommon_present|failed when: incommon_present|failed
- name: retrieve incommon server CA - name: Retrieve incommon server CA
shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt get_url:
url: "https://www.incommon.org/cert/repository/InCommonServerCA.txt"
dest: "/usr/share/ca-certificates/incommon/InCommonServerCA.crt"
when: incommon_present|failed when: incommon_present|failed
- name: add InCommon ca cert - name: Add InCommon ca cert
lineinfile: lineinfile:
dest=/etc/ca-certificates.conf dest: /etc/ca-certificates.conf
regexp='incommon/InCommonServerCA.crt' regexp: 'incommon/InCommonServerCA.crt'
line='incommon/InCommonServerCA.crt' line: 'incommon/InCommonServerCA.crt'
- name: update ca certs globally - name: Update ca certs globally
shell: update-ca-certificates shell: "update-ca-certificates"
- name: create notifier user {{ notifier_user }} - name: Create notifier user {{ notifier_user }}
user: > user:
name="{{ notifier_user }}" state=present shell=/bin/false name: "{{ notifier_user }}"
home="{{ notifier_app_dir }}" createhome=no state: present
shell: /bin/false
- name: create notifier app dir home: "{{ notifier_app_dir }}"
file: > createhome: no
path="{{ notifier_app_dir }}" state=directory
owner="{{ notifier_user }}" group="{{ common_web_group }}" - name: Create notifier app dir
notify: [restart notifier-scheduler, restart notifier-celery-workers] file:
path: "{{ notifier_app_dir }}"
- name: setup the notifier env state: directory
owner: "{{ notifier_user }}"
group: "{{ common_web_group }}"
notify:
- restart notifier-scheduler
- restart notifier-celery-workers
- name: Setup the notifier env
template: template:
src=notifier_env.j2 dest={{ notifier_app_dir }}/notifier_env src: "notifier_env.j2"
owner="{{ notifier_user }}" group="{{ notifier_user }}" dest: "{{ notifier_app_dir }}/notifier_env"
mode=655 owner: "{{ notifier_user }}"
group: "{{ notifier_user }}"
- name: drop a bash_profile mode: "0655"
copy: >
src=../../common/files/bash_profile - name: Drop a bash_profile
dest={{ notifier_app_dir }}/.bash_profile copy:
owner={{ notifier_user }} src: "../../common/files/bash_profile"
group={{ notifier_user }} dest: "{{ notifier_app_dir }}/.bash_profile"
owner: "{{ notifier_user }}"
- name: ensure .bashrc exists group: "{{ notifier_user }}"
shell: touch {{ notifier_app_dir }}/.bashrc
- name: Ensure .bashrc exists
file:
path: "{{ notifier_app_dir }}/.bashrc"
state: touch
become: true become: true
become_user: "{{ notifier_user }}" become_user: "{{ notifier_user }}"
- name: add source of notifier_env to .bashrc - name: Add source of notifier_env to .bashrc
lineinfile: lineinfile:
dest={{ notifier_app_dir }}/.bashrc dest: "{{ notifier_app_dir }}/.bashrc"
regexp='. {{ notifier_app_dir }}/notifier_env' regexp: '. {{ notifier_app_dir }}/notifier_env'
line='. {{ notifier_app_dir }}/notifier_env' line: '. {{ notifier_app_dir }}/notifier_env'
- name: add source venv to .bashrc - name: Add source venv to .bashrc
lineinfile: lineinfile:
dest={{ notifier_app_dir }}/.bashrc dest: "{{ notifier_app_dir }}/.bashrc"
regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate' regexp: '. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ NOTIFIER_VENV_DIR }}/bin/activate' line: '. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: create notifier DB directory
file:
path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ notifier_user }} group={{ NOTIFIER_WEB_USER }}
- name: create notifier/bin directory
file:
path="{{ notifier_app_dir }}/bin" mode=2775 state=directory owner={{ notifier_user }} group={{ notifier_user }}
- name: create notifier/.ssh directory - name: Create desired directories
file: file:
path="{{ notifier_app_dir }}/.ssh" mode=2700 state=directory owner={{ notifier_user }} group={{ notifier_user }} path: "{{ item.path }}"
state: directory
- name: create service log dir owner: "{{ item.owner }}"
file: > group: "{{ item.group }}"
path="{{ item }}" mode: "{{ item.mode }}"
state=directory
owner="syslog"
group="syslog"
with_items: with_items:
- "{{ COMMON_LOG_DIR }}/notifier" - { path: '{{ NOTIFIER_DB_DIR }}', owner: '{{ notifier_user }}', group: '{{ NOTIFIER_WEB_USER }}', mode: '2775' }
- { path: '{{ notifier_app_dir }}/bin', owner: '{{ notifier_user }}', group: '{{ notifier_user }}', mode: '2775' }
- name: write supervisord wrapper for celery workers - { path: '{{ notifier_app_dir }}/.ssh', owner: '{{ notifier_user }}', group: '{{ notifier_user }}', mode: '2700' }
template: > - { path: '{{ COMMON_LOG_DIR }}/notifier', owner: 'syslog', group: 'syslog', mode: '0664' }
src=notifier-celery-workers-supervisor.sh.j2
dest="{{ notifier_app_dir }}/notifier-celery-workers-supervisor.sh"
mode=0775
become_user: "{{ notifier_user }}"
- name: write supervisord wrapper for scheduler - name: Write supervisord wrapper for celery workers and scheduler
template: > template:
src=notifier-scheduler-supervisor.sh.j2 src: "{{ item.src }}"
dest="{{ notifier_app_dir }}/notifier-scheduler-supervisor.sh" dest: "{{ item.dest }}"
mode=0775 mode: "0775"
become_user: "{{ notifier_user }}" become_user: "{{ notifier_user }}"
with_items:
- { src: 'notifier-celery-workers-supervisor.sh.j2', dest: '{{ notifier_app_dir }}/notifier-celery-workers-supervisor.sh' }
- { src: 'notifier-scheduler-supervisor.sh.j2', dest: '{{ notifier_app_dir }}/notifier-scheduler-supervisor.sh' }
- name: write supervisord config for celery workers - name: Write supervisord config for celery workers and scheduler
template: > template:
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 src: "{{ item.src }}"
dest="{{ supervisor_available_dir }}/notifier-celery-workers.conf" dest: "{{ item.dest }}"
become_user: "{{ supervisor_user }}"
- name: write supervisord config for scheduler
template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_available_dir }}/notifier-scheduler.conf"
become_user: "{{ supervisor_user }}" become_user: "{{ supervisor_user }}"
with_items:
- { src: 'edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2', dest: '{{ supervisor_available_dir }}/notifier-celery-workers.conf' }
- { src: 'edx/app/supervisor/conf.d/notifier-scheduler.conf.j2', dest: '{{ supervisor_available_dir }}/notifier-scheduler.conf' }
- name: enable supervisord config for celery workers - name: Enable supervisord config for celery workers
file: > file:
src="{{ supervisor_available_dir }}/notifier-celery-workers.conf" src: "{{ supervisor_available_dir }}/notifier-celery-workers.conf"
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf" dest: "{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
state=link state: link
force=yes force: yes
become_user: "{{ supervisor_user }}" become_user: "{{ supervisor_user }}"
notify: restart notifier-celery-workers notify:
- restart notifier-celery-workers
when: not disable_edx_services when: not disable_edx_services
- name: enable supervisord config for scheduler - name: Enable supervisord config for scheduler
file: > file:
src="{{ supervisor_available_dir }}/notifier-scheduler.conf" src: "{{ supervisor_available_dir }}/notifier-scheduler.conf"
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf" dest: "{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
state=link state: link
force=yes force: yes
become_user: "{{ supervisor_user }}" become_user: "{{ supervisor_user }}"
notify: restart notifier-scheduler notify:
- restart notifier-scheduler
when: not disable_edx_services when: not disable_edx_services
- include: deploy.yml tags=deploy - include: deploy.yml
tags:
- deploy
--- ---
# oraclejdk # oraclejdk
# #
# Dependencies: # Dependencies:
...@@ -12,42 +11,52 @@ ...@@ -12,42 +11,52 @@
# - common # - common
# - oraclejdk # - oraclejdk
- name: install debian needed pkgs - name: Install debian needed pkgs
apt: pkg={{ item }} apt:
with_items: oraclejdk_debian_pkgs name: "{{ item }}"
with_items: "{{ oraclejdk_debian_pkgs }}"
- name: download Oracle Java
shell: > - name: Download Oracle Java
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }} shell: "curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}"
executable=/bin/bash args:
chdir=/var/tmp executable: /bin/bash
creates=/var/tmp/{{ oraclejdk_file }} chdir: /var/tmp
creates: "/var/tmp/{{ oraclejdk_file }}"
- name: create jvm dir
file: > - name: Create jvm dir
path=/usr/lib/jvm file:
state=directory path: /usr/lib/jvm
owner=root state: directory
group=root owner: root
group: root
- name: untar Oracle Java
shell: > - name: Untar Oracle Java
tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }} shell: "tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}"
executable=/bin/bash args:
creates=/usr/lib/jvm/{{ oraclejdk_base }} executable: /bin/bash
creates: "/usr/lib/jvm/{{ oraclejdk_base }}"
- name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link force=yes - name: Create symlink expected by elasticsearch
file:
- name: update alternatives java src: "/usr/lib/jvm/{{ oraclejdk_base }}"
alternatives: > dest: "{{ oraclejdk_link }}"
name={{ item }} state: link
link="/usr/bin/{{ item }}" force: yes
path="/usr/lib/jvm/{{ oraclejdk_base }}/bin/{{ item }}"
- name: Update alternatives java
alternatives:
name: "{{ item }}"
link: "/usr/bin/{{ item }}"
path: "/usr/lib/jvm/{{ oraclejdk_base }}/bin/{{ item }}"
with_items: with_items:
- java - java
- javac - javac
- javaws - javaws
- name: add JAVA_HOME for Oracle Java - name: Add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755 template:
src: "java.sh.j2"
dest: "/etc/profile.d/java.sh"
owner: root
group: root
mode: "0755"
...@@ -22,5 +22,4 @@ ...@@ -22,5 +22,4 @@
# #
- include: security-ubuntu.yml - include: security-ubuntu.yml
when: when: ansible_distribution == 'Ubuntu'
- ansible_distribution == 'Ubuntu'
---
#### Enable periodic security updates #### Enable periodic security updates
- name: Install security packages
apt:
name: "{{ item }}"
state: latest
update_cache: yes
with_items: "{{ security_debian_pkgs }}"
- name: install security packages
apt: name={{ item }} state=latest update_cache=yes
with_items: security_debian_pkgs
- name: Update all system packages
- name: update all system packages apt:
apt: upgrade=safe upgrade: safe
when: SECURITY_UPGRADE_ON_ANSIBLE when: SECURITY_UPGRADE_ON_ANSIBLE
- name: configure periodic unattended-upgrades - name: Configure periodic unattended-upgrades
template: > template:
src=etc/apt/apt.conf.d/10periodic src: "etc/apt/apt.conf.d/10periodic"
dest=/etc/apt/apt.conf.d/10periodic dest: "/etc/apt/apt.conf.d/10periodic"
owner=root group=root mode=0644 owner: root
group: root
mode: "0644"
when: SECURITY_UNATTENDED_UPGRADES when: SECURITY_UNATTENDED_UPGRADES
- name: disable unattended-upgrades - name: Disable unattended-upgrades
file: path=/etc/apt/apt.conf.d/10periodic state=absent file:
path: "/etc/apt/apt.conf.d/10periodic"
state: absent
when: not SECURITY_UNATTENDED_UPGRADES when: not SECURITY_UNATTENDED_UPGRADES
- name: only unattended-upgrade from security repo - name: Only unattended-upgrade from security repo
template: > template:
src=etc/apt/apt.conf.d/20unattended-upgrade src: "etc/apt/apt.conf.d/20unattended-upgrade"
dest=/etc/apt/apt.conf.d/20unattended-upgrade dest: "/etc/apt/apt.conf.d/20unattended-upgrade"
owner=root group=root mode=0644 owner: root
group: root
mode: "0644"
when: SECURITY_UNATTENDED_UPGRADES and not SECURITY_UPDATE_ALL_PACKAGES when: SECURITY_UNATTENDED_UPGRADES and not SECURITY_UPDATE_ALL_PACKAGES
- name: disable security only updates on unattended-upgrades - name: Disable security only updates on unattended-upgrades
file: path=/etc/apt/apt.conf.d/20unattended-upgrade state=absent file:
path: "/etc/apt/apt.conf.d/20unattended-upgrade"
state: absent
when: SECURITY_UPDATE_ALL_PACKAGES or not SECURITY_UNATTENDED_UPGRADES when: SECURITY_UPDATE_ALL_PACKAGES or not SECURITY_UNATTENDED_UPGRADES
#### Bash security vulnerability #### Bash security vulnerability
- name: Check if we are vulnerable - name: Check if we are vulnerable
shell: executable=/bin/bash chdir=/tmp foo='() { echo vulnerable; }' bash -c foo shell: "executable=/bin/bash chdir=/tmp foo='() { echo vulnerable; }' bash -c foo"
register: test_vuln register: test_vuln
ignore_errors: yes ignore_errors: yes
- name: Apply bash security update if we are vulnerable - name: Apply bash security update if we are vulnerable
apt: name=bash state=latest update_cache=true apt:
name: bash
state: latest
update_cache: yes
when: "'vulnerable' in test_vuln.stdout" when: "'vulnerable' in test_vuln.stdout"
- name: Check again and fail if we are still vulnerable - name: Check again and fail if we are still vulnerable
shell: executable=/bin/bash foo='() { echo vulnerable; }' bash -c foo shell: "executable=/bin/bash foo='() { echo vulnerable; }' bash -c foo"
when: "'vulnerable' in test_vuln.stdout" when: "'vulnerable' in test_vuln.stdout"
register: test_vuln register: test_vuln
failed_when: "'vulnerable' in test_vuln.stdout" failed_when: "'vulnerable' in test_vuln.stdout"
...@@ -52,20 +66,23 @@ ...@@ -52,20 +66,23 @@
#### GHOST security vulnerability #### GHOST security vulnerability
- name: GHOST.c - name: GHOST.c
copy: > copy:
src=tmp/GHOST.c src: "tmp/GHOST.c"
dest=/tmp/GHOST.c dest: "/tmp/GHOST.c"
owner=root group=root owner: root
group: root
- name: compile GHOST - name: Compile GHOST
shell: gcc -o /tmp/GHOST /tmp/GHOST.c shell: "gcc -o /tmp/GHOST /tmp/GHOST.c"
- name: Check if we are vulnerable - name: Check if we are vulnerable
shell: /tmp/GHOST shell: "/tmp/GHOST"
register: test_ghost_vuln register: test_ghost_vuln
ignore_errors: yes ignore_errors: yes
- name: Apply glibc security update if we are vulnerable - name: Apply glibc security update if we are vulnerable
apt: name=libc6 state=latest update_cache=true apt:
when: "'vulnerable' in test_ghost_vuln.stdout" name: libc6
state: latest
update_cache: yes
when: "'vulnerable' in test_ghost_vuln.stdout"
\ No newline at end of file
...@@ -35,3 +35,6 @@ user_rbash_links: ...@@ -35,3 +35,6 @@ user_rbash_links:
# will take precedence over the paramter # will take precedence over the paramter
user_info: [] user_info: []
user_debian_pkgs:
# This is needed for the uri module to work correctly.
- python-httplib2
...@@ -70,6 +70,14 @@ ...@@ -70,6 +70,14 @@
# want to provide more binaries add them to user_rbash_links # want to provide more binaries add them to user_rbash_links
# which can be passed in as a parameter to the role. # which can be passed in as a parameter to the role.
# #
- name: Install debian packages user role needs
apt:
name: "{{ item }}"
install_recommends: yes
state: present
update_cache: yes
with_items: "{{ user_debian_pkgs }}"
when: ansible_distribution in common_debian_variants
- debug: - debug:
var: user_info var: user_info
...@@ -116,13 +124,16 @@ ...@@ -116,13 +124,16 @@
- name: Check the ssh key(s) for user(s) over github - name: Check the ssh key(s) for user(s) over github
uri: uri:
url: "https://github.com/{{ item.name }}.keys" url: "https://github.com/{{ item.name }}.keys"
# We don't care if absent users lack ssh keys
when: item.get('state', 'present') == 'present'
with_items: "{{ user_info }}" with_items: "{{ user_info }}"
register: github_users_return register: github_users_return
- fail: - debug:
msg: "User {{ item.item.name }} doesn't have an SSH key associated with their account" msg: "User {{ item.item.name }} doesn't have an SSH key associated with their account"
with_items: "{{ github_users_return.results | default([]) }}" with_items: "{{ github_users_return.results | default([]) }}"
when: item.content_length == "0" # We skip users in the previous task, and they end up with no content_length
when: item.get('content_length') and item.content_length == "0"
- name: Get github key(s) and update the authorized_keys file - name: Get github key(s) and update the authorized_keys file
authorized_key: authorized_key:
......
...@@ -3,91 +3,98 @@ ...@@ -3,91 +3,98 @@
# Tasks related to deploying the code jail for the XQWatcher # Tasks related to deploying the code jail for the XQWatcher
# #
- name: Create sandboxed user - name: Create sandboxed user
user: > user:
name="{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}" name: "{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}"
shell=/bin/false shell: /bin/false
home="/dev/null" home: "/dev/null"
with_items: XQWATCHER_COURSES with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:base - install:base
# #
# Need to disable aa to update the virutalenv # Need to disable apparmor to update the virutalenv
- name: write out apparmor config - name: Write out apparmor config
template: > template:
src=etc/apparmor.d/code.jail.j2 src: "etc/apparmor.d/code.jail.j2"
dest="/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" dest: "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
mode=0644 owner=root group=root owner: root
with_items: XQWATCHER_COURSES group: root
mode: "0644"
with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:configuration - install:configuration
- name: write out sudoers for watcher - name: Write out sudoers for watcher
template: > template:
src=etc/sudoers.d/95-xqwatcher.j2 src: "etc/sudoers.d/95-xqwatcher.j2"
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }} dest: "/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }}"
mode=0440 owner=root group=root validate='visudo -c -f %s' owner: root
with_items: XQWATCHER_COURSES group: root
mode: "0440"
validate: 'visudo -c -f %s'
with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:configuration - install:configuration
# see comment below as to why this is skipped. # see comment below as to why this is skipped.
- name: put code jail into aa-complain - name: Put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- manage - manage
- manage:sandbox - manage:sandbox
- name: create jail virtualenv - name: Create jail virtualenv
shell: > shell: "/usr/local/bin/virtualenv --no-site-packages {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
/usr/local/bin/virtualenv --no-site-packages {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }} with_items: "{{ XQWATCHER_COURSES }}"
with_items: XQWATCHER_COURSES
tags: tags:
- install - install
- install:code - install:code
- name: write out requirements.txt - name: Write out requirements.txt
template: > template:
src=edx/app/xqwatcher/data/requirements.txt.j2 src: "edx/app/xqwatcher/data/requirements.txt.j2"
dest={{ xqwatcher_app_dir }}/data/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt dest: "{{ xqwatcher_app_dir }}/data/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
mode=0440 owner=root group=root owner: root
with_items: XQWATCHER_COURSES group: root
mode: "0440"
with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:code - install:code
- name: install course specific python requirements - name: Install course specific python requirements
pip: > pip:
requirements="{{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt" requirements: "{{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
virtualenv="{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" virtualenv: "{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
state=present state: present
extra_args="{{ XQWATCHER_PIP_EXTRA_ARGS }}" extra_args: "{{ XQWATCHER_PIP_EXTRA_ARGS }}"
with_items: XQWATCHER_COURSES with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:code - install:code
- name: give other read permissions to the virtualenv - name: Give other read permissions to the virtualenv
shell: > shell: "chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }} with_items: "{{ XQWATCHER_COURSES }}"
with_items: XQWATCHER_COURSES
tags: tags:
- install - install
- install:code - install:code
- name: start apparmor service - name: Start apparmor service
service: name=apparmor state=started service:
name: apparmor
state: started
tags: tags:
- manage - manage
- manage:sandbox - manage:sandbox
- name: load code sandbox profile - name: Load code sandbox profile
command: apparmor_parser -r "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" command: apparmor_parser -r "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- manage - manage
- manage:sandbox - manage:sandbox
...@@ -96,20 +103,20 @@ ...@@ -96,20 +103,20 @@
# Leaves aa in either complain or enforce depending upon the value of the # Leaves aa in either complain or enforce depending upon the value of the
# CODE_JAIL_COMPLAIN var. Complain mode should never be run in an # CODE_JAIL_COMPLAIN var. Complain mode should never be run in an
# environment where untrusted users can submit code # environment where untrusted users can submit code
- name: put code jail into aa-complain - name: Put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPLAIN|bool when: CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES with_items: "{{ XQWATCHER_COURSES }}"
# AA having issues on 14.04 # AA having issues on 14.04
# https://github.com/edx/codejail/issues/38 # https://github.com/edx/codejail/issues/38
tags: tags:
- manage - manage
- manage:sandbox - manage:sandbox
- name: put code sandbox into aa-enforce - name: Put code sandbox into aa-enforce
command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: not CODE_JAIL_COMPLAIN|bool when: not CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- manage - manage
- manage:sandbox - manage:sandbox
- name: install courses ssh key - name: Install courses ssh key
copy: > copy:
content="{{ XQWATCHER_GIT_IDENTITY }}" content: "{{ XQWATCHER_GIT_IDENTITY }}"
dest={{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses dest: "{{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses"
owner={{ xqwatcher_user }} group={{ xqwatcher_user }} mode=0600 owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
mode: "0600"
tags: tags:
- install - install
- install:code - install:code
#TODO: remove once xqwatcher.json can be pulled out into /edx/etc/ #TODO: remove once xqwatcher.json can be pulled out into /edx/etc/
- name: write out watcher config file - name: Write out watcher config file
template: > template:
src=edx/app/xqwatcher/xqwatcher.json.j2 src: "edx/app/xqwatcher/xqwatcher.json.j2"
dest={{ xqwatcher_conf_dir }}/xqwatcher.json dest: "{{ xqwatcher_conf_dir }}/xqwatcher.json"
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }} owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
mode: "0644"
tags: tags:
- install - install
- install:configuration - install:configuration
......
...@@ -2,12 +2,13 @@ ...@@ -2,12 +2,13 @@
# checking out the grader code from the repository specified on # checking out the grader code from the repository specified on
# a per queue basis. # a per queue basis.
- name: checkout grader code - name: Checkout grader code
git_2_0_1: > git_2_0_1:
dest={{ xqwatcher_app_dir }}/data/{{ item.COURSE }} repo={{ item.GIT_REPO }} repo: "{{ item.GIT_REPO }}"
version={{ item.GIT_REF }} dest: "{{ xqwatcher_app_dir }}/data/{{ item.COURSE }}"
ssh_opts="{{ xqwatcher_course_git_ssh_opts }}" version: "{{ item.GIT_REF }}"
with_items: XQWATCHER_COURSES ssh_opts: "{{ xqwatcher_course_git_ssh_opts }}"
with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:code - install:code
...@@ -2,59 +2,63 @@ ...@@ -2,59 +2,63 @@
# The watcher can watch one or many queues and dispatch submissions # The watcher can watch one or many queues and dispatch submissions
# to the appropriate grader which lives in a separate SCM repository. # to the appropriate grader which lives in a separate SCM repository.
- name: install application requirements - name: Install application requirements
pip: > pip:
requirements="{{ xqwatcher_requirements_file }}" requirements: "{{ xqwatcher_requirements_file }}"
virtualenv="{{ xqwatcher_app_dir }}/venvs/{{ xqwatcher_service_name }}" state=present virtualenv: "{{ xqwatcher_app_dir }}/venvs/{{ xqwatcher_service_name }}"
state: present
become: true become: true
become_user: "{{ xqwatcher_user }}" become_user: "{{ xqwatcher_user }}"
tags: tags:
- install - install
- install:app-requirements - install:app-requirements
- name: write out course config files - name: Write out course config files
template: > template:
src=edx/app/xqwatcher/conf.d/course.json.j2 src: "edx/app/xqwatcher/conf.d/course.json.j2"
dest={{ xqwatcher_conf_dir }}/conf.d/{{ item.COURSE }}.json dest: "{{ xqwatcher_conf_dir }}/conf.d/{{ item.COURSE }}.json"
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }} owner: "{{ xqwatcher_user }}"
with_items: XQWATCHER_COURSES group: "{{ xqwatcher_user }}"
mode: "0644"
with_items: "{{ XQWATCHER_COURSES }}"
tags: tags:
- install - install
- install:configuration - install:configuration
- name: write supervisord config - name: Write supervisord config
template: > template:
src=edx/app/supervisor/conf.d/xqwatcher.conf.j2 src: "edx/app/supervisor/conf.d/xqwatcher.conf.j2"
dest="{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf" dest: "{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
group={{ xqwatcher_user }} mode=0650 group: "{{ xqwatcher_user }}"
mode: "0650"
tags: tags:
- install - install
- install:configuration - install:configuration
- name: enable supervisor script - name: Enable supervisor script
file: > file:
src={{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf src: "{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
dest={{ xqwatcher_supervisor_cfg_dir }}/xqwatcher.conf dest: "{{ xqwatcher_supervisor_cfg_dir }}/xqwatcher.conf"
state=link state: link
force=yes force: yes
when: not disable_edx_services when: not disable_edx_services
tags: tags:
- install - install
- install:configuration - install:configuration
- name: update supervisor configuration - name: Update supervisor configuration
shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update" shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update"
when: not disable_edx_services when: not disable_edx_services
tags: tags:
- manage - manage
- manage:update - manage:update
- name: restart xqwatcher - name: Restart xqwatcher
supervisorctl: > supervisorctl:
state=restarted name: "{{ xqwatcher_service_name }}"
supervisorctl_path={{ xqwatcher_supervisor_ctl }} supervisorctl_path: "{{ xqwatcher_supervisor_ctl }}"
config={{ xqwatcher_supervisor_app_dir }}/supervisord.conf config: "{{ xqwatcher_supervisor_app_dir }}/supervisord.conf"
name={{ xqwatcher_service_name }} state: restarted
when: not disable_edx_services when: not disable_edx_services
become_user: "{{ xqwatcher_user }}" become_user: "{{ xqwatcher_user }}"
tags: tags:
......
...@@ -86,26 +86,28 @@ ...@@ -86,26 +86,28 @@
# -----END RSA PRIVATE KEY----- # -----END RSA PRIVATE KEY-----
# #
- name: create conf dir - name: Create conf dir
file: > file:
path="{{ xqwatcher_conf_dir }}" path: "{{ xqwatcher_conf_dir }}"
state=directory state: directory
owner="{{ xqwatcher_user }}" owner: "{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}" group: "{{ xqwatcher_user }}"
tags: tags:
- install - install
- install:base - install:base
- name: create conf.d dir - name: Create conf.d dir
file: > file:
path="{{ xqwatcher_conf_dir }}/conf.d" path: "{{ xqwatcher_conf_dir }}/conf.d"
state=directory state: directory
owner="{{ xqwatcher_user }}" owner: "{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}" group: "{{ xqwatcher_user }}"
tags: tags:
- install - install
- install:base - install:base
- include: code_jail.yml CODE_JAIL_COMPLAIN=false - include: code_jail.yml CODE_JAIL_COMPLAIN=false
- include: deploy.yml tags=deploy - include: deploy.yml
tags:
- deploy
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# ansible-playbook ./run_role.yml -i "hostname," -e role=my_awesome_role # ansible-playbook ./run_role.yml -i "hostname," -e role=my_awesome_role
# #
- hosts: all - hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
roles: roles:
- "{{role}}" - "{{role}}"
- name: Apply security role - name: Apply security role
hosts: all hosts: all
sudo: yes become: True
roles: roles:
- security - security
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: 'yes' migrate_db: 'yes'
......
- name: Configure group cluster - name: Configure group cluster
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
vars: vars:
vagrant_cluster: yes vagrant_cluster: yes
......
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: 'yes' migrate_db: 'yes'
......
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True become: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: 'yes' migrate_db: 'yes'
......
...@@ -318,7 +318,6 @@ instance_tags: ...@@ -318,7 +318,6 @@ instance_tags:
root_ebs_size: $root_ebs_size root_ebs_size: $root_ebs_size
name_tag: $name_tag name_tag: $name_tag
dns_zone: $dns_zone dns_zone: $dns_zone
rabbitmq_refresh: True
elb: $elb elb: $elb
EOF EOF
......
...@@ -299,6 +299,11 @@ if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then ...@@ -299,6 +299,11 @@ if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
libxslt-dev curl libmysqlclient-dev --force-yes libxslt-dev curl libmysqlclient-dev --force-yes
fi fi
# this is missing on 14.04 (base package on 12.04)
# we need to do this on any build, since the above apt-get
# only runs on a build from scratch
/usr/bin/apt-get install -y python-httplib2 --force-yes
# upgrade setuptools early to avoid no distributin errors # upgrade setuptools early to avoid no distributin errors
pip install --upgrade setuptools==18.3.2 pip install --upgrade setuptools==18.3.2
...@@ -650,7 +655,7 @@ def launch_and_configure(ec2_args): ...@@ -650,7 +655,7 @@ def launch_and_configure(ec2_args):
system_start = time.time() system_start = time.time()
for _ in xrange(EC2_STATUS_TIMEOUT): for _ in xrange(EC2_STATUS_TIMEOUT):
status = ec2.get_all_instance_status(inst.id) status = ec2.get_all_instance_status(inst.id)
if status[0].system_status.status == u'ok': if status and status[0].system_status.status == u'ok':
system_delta = time.time() - system_start system_delta = time.time() - system_start
run_summary.append(('EC2 Status Checks', system_delta)) run_summary.append(('EC2 Status Checks', system_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment