Commit d49a092a by Arbab Nazar Committed by GitHub

Merge branch 'master' into arbab/edx_service-rewrite

parents bde787cb 36a140b6
- Role: hadoop_common
- Enable log retention by default to assist with debugging. Now YARN will retain stdout and stderr logs produced by map reduce tasks for 24 hours. They can be retrieved by running "yarn logs -applicationId YOUR_APPLICATION_ID".
- Role: rabbitmq
- Removed the RABBITMQ_CLUSTERED var and related tooling. The goal of the var was to be able to setup a cluster in the aws environment without having to know all the IPs of the cluster before hand. It relied on the `hostvars` ansible varible to work correctly which it no longer does in 1.9. This may get fixed in the future but for now, the "magic" setup doesn't work.
- Changed `rabbitmq_clustered_hosts` to RABBITMQ_CLUSTERED_HOSTS.
......
......@@ -2,6 +2,6 @@
- name: Configure instance(s)
hosts: all
sudo: True
become: True
roles:
- jenkins_analytics
......@@ -6,8 +6,8 @@
# - instance_id - the ec2 instance ID use to create the AMI
# - edx_environment - value to use for the environment tag
# - deployment - value to use for the deploy tag
# - cluster_repo - the url of the github repo for this cluster
# - cluster_version - git hash of the cluster (play, service, IDA) being deployed
# - app_repo - the url of the github repo for this app
# - app_version - git hash of the app (play, service, IDA) being deployed
# - play - the play that was run
# - configuration_repo - The github url for the configuration repo
# - configuration_version - The version (git hash) of configuration
......@@ -34,7 +34,7 @@
# -e play=pipline-test \
# -e deployment=edx \
# -e edx_environment=sandbox \
# -e cluster_version=12345 \
# -e app_version=12345 \
# -e configuration_version=12345
# -e configuration_secure_version=12345
# -e cache_id=12345
......@@ -55,7 +55,7 @@
- name: Create AMI
ec2_ami_2_0_0_1:
instance_id: "{{ instance_id }}"
name: "{{ play }} -- {{ cluster_version }}"
name: "{{ edx_environment }} -- {{ deployment }} -- {{ play }} -- {{ app_version }}"
region: "{{ ec2_region }}"
wait: "{{ ami_wait }}"
wait_timeout: "{{ ami_creation_timeout }}"
......@@ -63,7 +63,7 @@
description: "AMI built via edX continuous delivery pipeline - Ansible version: {{ ansible_version }}"
# used a JSON object here as there is a string interpolation in the keys.
tags: "{
'version:{{ play }}':'{{ cluster_repo }} {{ cluster_version }}',
'version:{{ play }}':'{{ app_repo }} {{ app_version }}',
'version:configuration':'{{ configuration_repo }} {{ configuration_version }}',
'version:configuration_secure':'{{ configuration_secure_repo }} {{ configuration_secure_version }}',
'play':'{{ play }}',
......
......@@ -2,10 +2,10 @@
# This instance will have an autogenerated key.
#
# required variables for this playbook:
# - ec2_subnet_id - Subnet to bring up the ec2 instance
# - base_ami_id - The base base AMI-ID
# - ec2_vpc_subnet_id - The Subnet ID to bring up the instance
# - ec2_security_group_id - The security group ID to use
# - base_ami_id - The base base AMI-ID
# - ec2_vpc_subnet_id - The Subnet ID to bring up the instance
# - ec2_security_group_id - The security group ID to use
# - ec2_instance_profile_name - The instance profile that should be used to launch this AMI
#
# Other Variables:
# - ec2_region - The region the server should be brought up in
......@@ -63,11 +63,12 @@
vpc_subnet_id: "{{ ec2_vpc_subnet_id }}"
assign_public_ip: "{{ ec2_assign_public_ip }}"
volumes:
- device_name: /dev/xvda
volume_type: standard
- device_name: /dev/sdf
volume_type: 'gp2'
volume_size: "{{ ebs_volume_size }}"
wait: yes
wait_timeout: "{{ ec2_timeout }}"
instance_profile_name: "{{ ec2_instance_profile_name }}"
register: ec2_instance_register
- name: Wait for SSH to come up
......
# This playbook will check for migrations that need to be run for Django applications within a larger
# Django application. If migrations exist, it will run the migrations while saving the output as an artifact.
#
# The playbook uses the Django management commands found in this Django app repo:
# https://github.com/edx/edx-django-release-util
# So the Django app above needs to be installed in the Django app being checked for migrations.
#
# Required variables for this playbook:
#
# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath
# this directory in a directory with the same name as APPLICATION_NAME.
# NOTE: It is assumed that edx-django-release-util is one of its INSTALLED_APPS.
# - APPLICATION_NAME - The name of the application that we are migrating.
# - APPLICATION_USER - user which is meant to run the application
# - ARTIFACT_PATH - the path where the migration artifacts should be copied after completion
# - HIPCHAT_TOKEN - API token to send messages to hipchat
# - HIPCHAT_ROOM - ID or name of the room to send the notification
# - HIPCHAT_URL - URL of the hipchat API (defaults to v1 of the api)
#
# Other variables:
# - unapplied_migrations_output - the filename where the unapplied migration YAML output is stored
# - migration_output - the filename where the migration output is saved
#
# Example command line to run this playbook:
# ansible-playbook -vvvv -i "localhost," -c local \
# -e @overrides.yml \
# run_migrations.yml
#
- hosts: all
vars:
unapplied_migrations_output: unapplied_migrations.yml
migration_output: migration_output.yml
HIPCHAT_URL: https://api.hipchat.com/v2/
COMMAND_PREFIX: ". {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; DB_MIGRATION_USER={{ DB_MIGRATION_USER }} DB_MIGRATION_PASS={{ DB_MIGRATION_PASS }} /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }} "
gather_facts: False
tasks:
- name: Create a temporary directory for the migration output.
command: mktemp -d
become_user: "{{ APPLICATION_USER }}"
register: temp_output_dir
- name: generate list of unapplied migrations
shell: '{{ COMMAND_PREFIX }} show_unapplied_migrations --output_file "{{ temp_output_dir.stdout }}/{{ unapplied_migrations_output }}"'
become_user: "{{ APPLICATION_USER }}"
- name: migrate to apply any unapplied migrations
shell: '{{ COMMAND_PREFIX }} run_migrations "{{ temp_output_dir.stdout }}/{{ unapplied_migrations_output }}" --output_file "{{ temp_output_dir.stdout }}/{{ migration_output }}"'
become_user: "{{ APPLICATION_USER }}"
- name: Transfer artifacts to the proper place.
fetch:
src: "{{ temp_output_dir.stdout }}/{{ item }}"
dest: "{{ ARTIFACT_PATH }}"
flat: True
fail_on_missing: True
mode: 0700
with_items:
- "{{ unapplied_migrations_output }}"
- "{{ migration_output }}"
- name: Send Hipchat notification cleanup has finished
hipchat_2_0_0_1:
api: "{{ HIPCHAT_URL }}"
token: "{{ HIPCHAT_TOKEN }}"
room: "{{ HIPCHAT_ROOM }}"
msg: "Migrations have completed."
ignore_errors: yes
when: HIPCHAT_TOKEN is defined
......@@ -6,7 +6,7 @@
- name: Configure instance(s)
hosts: all
sudo: True
become: True
gather_facts: True
vars:
migrate_db: "yes"
......
......@@ -2,7 +2,7 @@
- name: Install go-agent-docker-server
hosts: all
sudo: True
become: True
gather_facts: True
roles:
- aws
......
......@@ -2,7 +2,7 @@
- name: Install go-agent
hosts: all
sudo: True
become: True
gather_facts: True
roles:
- aws
......
......@@ -4,7 +4,7 @@
- name: Install go-server
hosts: all
sudo: True
become: True
gather_facts: True
roles:
- aws
......
......@@ -3,7 +3,7 @@
# analyzing logs.
- name: Configure syslog server
hosts: all
sudo: yes
become: True
roles:
- common
- oraclejdk
......
---
# install and configure aide IDS
#
- name: Install aide
apt:
name: aide
state: present
- name: install aide
apt: pkg="aide" state="present"
- name: Configure aide defaults
template:
src: etc/default/aide.j2
dest: /etc/default/aide
owner: root
group: root
mode: "0644"
- name: configure aide defaults
template: >
src=etc/default/aide.j2 dest=/etc/default/aide
owner=root group=root mode=0644
- name: Open read permissions on aide logs
file:
name: /var/log/aide
recurse: yes
state: directory
mode: "0755"
- name: open read permissions on aide logs
file: >
name="/var/log/aide"
recurse="yes"
state="directory"
mode="755"
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
become: yes
- name: Aide initial scan (this can take a long time)
command: "aideinit -y -f"
args:
creates: "/var/lib/aide/aide.db"
become: yes
\ No newline at end of file
......@@ -153,13 +153,11 @@
name: ssh
state: restarted
become: True
when: sshd_config.changed
when: ansible_distribution in common_debian_variants
when: sshd_config.changed and ansible_distribution in common_debian_variants
- name: Restart ssh
service:
name: sshd
state: restarted
become: True
when: sshd_config.changed
when: ansible_distribution in common_redhat_variants
when: sshd_config.changed and ansible_distribution in common_redhat_variants
......@@ -102,8 +102,10 @@ common_redhat_pkgs:
- rsyslog
- git
- unzip
- acl
common_debian_pkgs:
- ntp
- acl
- lynx-cur
- logrotate
- rsyslog
......
......@@ -48,7 +48,7 @@ CREDENTIALS_CACHES:
CREDENTIALS_DJANGO_SETTINGS_MODULE: "credentials.settings.production"
CREDENTIALS_DOMAIN: 'credentials'
CREDENTIALS_URL_ROOT: 'http://{{ CREDENTIALS_DOMAIN }}:18150'
CREDENTIALS_LOGOUT_URL: '{{ CREDENTIALS_URL_ROOT }}/accounts/logout/'
CREDENTIALS_LOGOUT_URL: '{{ CREDENTIALS_URL_ROOT }}/logout/'
CREDENTIALS_OAUTH_URL_ROOT: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/oauth2'
CREDENTIALS_OIDC_LOGOUT_URL: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/logout'
......
......@@ -151,8 +151,8 @@ EDXAPP_ENABLE_MOBILE_REST_API: false
# Settings for API access management
EDXAPP_API_ACCESS_MANAGER_EMAIL: "api-access@example.com"
EDXAPP_API_ACCESS_FROM_EMAIL: "api-requests@example.com"
EDXAPP_API_DOCUMENTATION_URL: "http://edx.readthedocs.org/projects/edx-platform-api/en/latest/overview.html"
EDXAPP_AUTH_DOCUMENTATION_URL: "http://edx.readthedocs.org/projects/edx-platform-api/en/latest/authentication.html"
EDXAPP_API_DOCUMENTATION_URL: "http://course-catalog-api-guide.readthedocs.io/en/latest/"
EDXAPP_AUTH_DOCUMENTATION_URL: "http://course-catalog-api-guide.readthedocs.io/en/latest/authentication/index.html"
# Settings for affiliate cookie tracking
EDXAPP_AFFILIATE_COOKIE_NAME: 'dev_affiliate_id'
......
{% for w in edxapp_workers %}
[program:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}]
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE=aws,PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log
command={{ edxapp_venv_dir + '/bin/newrelic-admin run-program ' if w.monitor and COMMON_ENABLE_NEWRELIC_APP else ''}}{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }}
command={{ edxapp_venv_dir + '/bin/newrelic-admin run-program ' if w.monitor and COMMON_ENABLE_NEWRELIC_APP else ''}}{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings={{ worker_django_settings_module }} celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }}
killasgroup=true
stopasgroup=true
stopwaitsecs={{ w.stopwaitsecs | default(EDXAPP_WORKER_DEFAULT_STOPWAITSECS) }}
......
......@@ -6,6 +6,6 @@
ec2_tag:
resource: "{{ ansible_ec2_instance_id }}"
region: "{{ ansible_ec2_placement_region }}"
tags:
tags:
"version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}"
when: forum_checkout.after is defined
......@@ -17,7 +17,10 @@
# require in our default configuration.
#
#
- name: set git fetch.prune to ignore deleted remote refs
# Rewrite this task using the ansible git-config module once we'll migrate to Ansible 2.x
# https://docs.ansible.com/ansible/git_config_module.html#git-config
#
- name: Set git fetch.prune to ignore deleted remote refs
shell: git config --global fetch.prune true
become_user: "{{ repo_owner }}"
when: GIT_REPOS is defined
......@@ -25,28 +28,30 @@
- install
- install:code
- name: validate git protocol
fail: msg='GIT_REPOS.PROTOCOL must be "https" or "ssh"'
- name: Validate git protocol
fail:
msg: '{{ GIT_REPOS.PROTOCOL }} must be "https" or "ssh"'
when: (item.PROTOCOL != "https") and (item.PROTOCOL != "ssh") and GIT_REPOS is defined
with_items: GIT_REPOS
tags:
- install
- install:code
- name: install read-only ssh key
- name: Install read-only ssh key
copy:
dest: "{{ git_home }}/.ssh/{{ item.REPO }}"
content: "{{ item.SSH_KEY }}"
owner: "{{ repo_owner }}"
group: "{{ repo_group }}"
mode: 0600
mode: "0600"
when: item.PROTOCOL == "ssh" and GIT_REPOS is defined
with_items: GIT_REPOS
tags:
- install
- install:code
- name: checkout code over ssh
- name: Checkout code over ssh
git_2_0_1:
repo: "git@{{ item.DOMAIN }}:{{ item.PATH }}/{{ item.REPO }}"
dest: "{{ item.DESTINATION }}"
......@@ -61,7 +66,7 @@
- install
- install:code
- name: checkout code over https
- name: Checkout code over https
git_2_0_1:
repo: "https://{{ item.DOMAIN }}/{{ item.PATH }}/{{ item.REPO }}"
dest: "{{ item.DESTINATION }}"
......@@ -74,7 +79,7 @@
- install
- install:code
- name: remove read-only ssh key
- name: Remove read-only ssh key
file:
dest: "{{ git_home }}/.ssh/{{ item.REPO }}"
state: absent
......
......@@ -79,4 +79,8 @@ hadoop_common_redhat_pkgs: []
# yarn.nodemanager.vmem-pmem-ratio: 2.1
mapred_site_config: {}
yarn_site_config: {}
yarn_site_config:
yarn.log-aggregation-enable: true
# 24 hour log retention
yarn.log-aggregation.retain-seconds: 86400
---
- name: restart mongo
service: name=mongod state=restarted
service:
name: mongod
state: restarted
---
- name: check to see that MongoDB 2.4 is not installed
stat: path=/etc/init.d/mongodb
- name: Check to see that MongoDB 2.4 is not installed
stat:
path: /etc/init.d/mongodb
register: mongodb_needs_upgrade
tags:
- install
- install:base
- name: verify 2.4 not installed
fail: msg="MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6."
- name: Verify 2.4 not installed
fail:
msg: "MongoDB 2.4 is currently installed and cannot be safely upgraded in a clustered configuration. Please read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6."
when: mongodb_needs_upgrade.stat.exists and MONGO_CLUSTERED
tags:
- install
- install:base
- name: remove mongo 2.4 if present
apt: >
pkg=mongodb-10gen
state=absent purge=yes
force=yes
- name: Remove mongo 2.4 if present
apt:
pkg: mongodb-10gen
state: absent
purge: yes
force: yes
when: mongodb_needs_upgrade.stat.exists and not MONGO_CLUSTERED
tags:
- install
- install:base
- name: install python pymongo for mongo_user ansible module
pip: >
name=pymongo state=present
version={{ pymongo_version }} extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: Install python pymongo for mongo_user ansible module
pip:
name: pymongo
state: present
version: "{{ pymongo_version }}"
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
tags:
- install
- install:base
- name: add the mongodb signing key
apt_key: >
id={{ MONGODB_APT_KEY }}
keyserver={{ MONGODB_APT_KEYSERVER }}
state=present
- name: Add the mongodb signing key
apt_key:
id: "{{ MONGODB_APT_KEY }}"
keyserver: "{{ MONGODB_APT_KEYSERVER }}"
state: present
tags:
- install
- install:base
- name: add the mongodb repo to the sources list
apt_repository: >
repo='{{ MONGODB_REPO }}'
state=present
- name: Add the mongodb repo to the sources list
apt_repository:
repo: "{{ MONGODB_REPO }}"
state: present
tags:
- install
- install:base
- name: install mongo server and recommends
apt: >
pkg=mongodb-org={{ mongo_version }}
state=present install_recommends=yes
force=yes update_cache=yes
- name: Install mongo server and recommends
apt:
name: "mongodb-org={{ mongo_version }}"
state: present
install_recommends: yes
force: yes
update_cache: yes
tags:
- install
- install:base
- name: create mongo dirs
file: >
path="{{ item }}" state=directory
owner="{{ mongo_user }}"
group="{{ mongo_user }}"
- name: Create mongo dirs
file:
path: "{{ item }}"
state: directory
owner: "{{ mongo_user }}"
group: "{{ mongo_user }}"
with_items:
- "{{ mongo_data_dir }}"
- "{{ mongo_dbpath }}"
......@@ -71,83 +79,97 @@
- install
- install:base
- name: stop mongod service
service: name=mongod state=stopped
- name: Stop mongod service
service:
name: mongod
state: stopped
tags:
- manage
- manage:stop
- name: move mongodb to {{ mongo_data_dir }}
command: >
mv /var/lib/mongodb {{ mongo_data_dir}}/.
creates={{ mongo_data_dir }}/mongodb
- name: Move mongodb to {{ mongo_data_dir }}
command: "mv /var/lib/mongodb {{ mongo_data_dir}}/."
args:
creates: "{{ mongo_data_dir }}/mongodb"
tags:
- install
- install:base
- name: copy mongodb key file
copy: >
content="{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }}
mode=0600
owner=mongodb
group=mongodb
- name: Copy mongodb key file
copy:
content: "{{ MONGO_CLUSTER_KEY }}"
dest: "{{ mongo_key_file }}"
mode: "0600"
owner: mongodb
group: mongodb
when: MONGO_CLUSTERED
tags:
- install
- install:configuration
- name: copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongod.conf backup=yes
notify: restart mongo
- name: Copy configuration template
template:
src: "mongodb.conf.j2"
dest: "/etc/mongod.conf"
backup: yes
notify:
- restart mongo
tags:
- install
- install:configuration
- name: start mongo service
service: name=mongod state=started
- name: Start mongo service
service:
name: mongod
state: started
tags:
- manage
- manage:start
- name: wait for mongo server to start
wait_for: port=27017 delay=2
- name: Wait for mongo server to start
wait_for:
port: 27017
delay: 2
tags:
- manage
- manage:start
- name: drop super user script
template: src="create_root.js.j2" dest="/tmp/create_root.js"
- name: Drop super user script
template:
src: "create_root.js.j2"
dest: "/tmp/create_root.js"
when: not MONGO_CLUSTERED
tags:
- install
- install:configuration
- name: create super user with js
shell: >
/usr/bin/mongo admin /tmp/create_root.js
- name: Create super user with js
shell: "/usr/bin/mongo admin /tmp/create_root.js"
when: not MONGO_CLUSTERED
tags:
- install
- install:configuration
- name: delete super user script
file: path=/tmp/create_root.js state=absent
- name: Delete super user script
file:
path: /tmp/create_root.js
state: absent
when: not MONGO_CLUSTERED
tags:
- install
- install:configuration
- name: Create the file to initialize the mongod replica set
template: src=repset_init.js.j2 dest=/tmp/repset_init.js
template:
src: "repset_init.js.j2"
dest: "/tmp/repset_init.js"
when: MONGO_CLUSTERED
tags:
- install
- install:configuration
- name: Initialize the replication set
shell: >
/usr/bin/mongo /tmp/repset_init.js
shell: "/usr/bin/mongo /tmp/repset_init.js"
when: MONGO_CLUSTERED
tags:
- install
......@@ -157,81 +179,72 @@
# file: path=/tmp/repset_init.js state=absent
# when: MONGO_CLUSTERED
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
login_user={{ MONGO_ADMIN_USER }}
login_password={{ MONGO_ADMIN_PASSWORD }}
name={{ item.user }}
password="{{ item.password }}"
roles={{ item.roles }}
state=present
with_items: MONGO_USERS
- name: Create a mongodb user
mongodb_user:
database: "{{ item.database }}"
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
with_items: "{{ MONGO_USERS }}"
when: not MONGO_CLUSTERED
tags:
- manage
- manage:app-users
- name: create a mongodb user
mongodb_user: >
database={{ item.database }}
login_user={{ MONGO_ADMIN_USER }}
login_password={{ MONGO_ADMIN_PASSWORD }}
name={{ item.user }}
password="{{ item.password }}"
roles={{ item.roles }}
state=present
replica_set={{ mongo_repl_set }}
with_items: MONGO_USERS
- name: Create a mongodb user
mongodb_user:
database: "{{ item.database }}"
login_user: "{{ MONGO_ADMIN_USER }}"
login_password: "{{ MONGO_ADMIN_PASSWORD }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
roles: "{{ item.roles }}"
state: present
replica_set: "{{ mongo_repl_set }}"
with_items: "{{ MONGO_USERS }}"
when: MONGO_CLUSTERED
tags:
- manage
- manage:app-users
- name: install s3cmd
pip: >
name="s3cmd"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: Install s3cmd
pip:
name: "s3cmd"
state: present
extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}"
when: MONGO_S3_BACKUP
tags:
- install
- install:app-requirements
- name: configure s3cmd
template: >
dest="{{ MONGO_S3_S3CMD_CONFIG }}"
src=mongo-s3-backup-s3cfg.j2
owner=root
group=root
mode=0600
when: MONGO_S3_BACKUP
tags:
- install
- install:configuration
- name: install backup-mongo-to-s3 script
template: >
src=backup-mongo-to-s3.j2
dest=/edx/bin/backup-mongo-to-s3.sh
owner=root
group=root
mode=0700
- name: Configure s3cmd and install backup-mongo-to-s3 script
template:
dest: "{{ item.dest }}"
src: "{{ item.src }}"
owner: root
group: root
mode: "{{ item.mode }}"
when: MONGO_S3_BACKUP
with_items:
- { src: 'mongo-s3-backup-s3cfg.j2', dest: '{{ MONGO_S3_S3CMD_CONFIG }}', mode: '0600' }
- { src: 'backup-mongo-to-s3.j2', dest: '/edx/bin/backup-mongo-to-s3.sh', mode: '0700' }
tags:
- install
- install:configuration
- name: schedule backup-mongo-to-3s crontab
- name: Schedule backup-mongo-to-3s crontab
cron:
name="backup-mongo-to-s3"
job="/edx/bin/backup-mongo-to-s3.sh"
backup=yes
cron_file=backup-mongo-to-s3
user=root
hour="{{ MONGO_S3_BACKUP_HOUR }}"
minute="0"
day="{{ MONGO_S3_BACKUP_DAY }}"
name: "backup-mongo-to-s3"
job: "/edx/bin/backup-mongo-to-s3.sh"
backup: yes
cron_file: backup-mongo-to-s3
user: root
hour: "{{ MONGO_S3_BACKUP_HOUR }}"
minute: "0"
day: "{{ MONGO_S3_BACKUP_DAY }}"
when: MONGO_S3_BACKUP
tags:
- install
......
---
- name: restart notifier-scheduler
supervisorctl: >
name=notifier-scheduler
state=restarted
config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }}
supervisorctl:
name: "notifier-scheduler"
state: restarted
config: "{{ supervisor_cfg }}"
supervisorctl_path: "{{ supervisor_ctl }}"
when: not disable_edx_services
- name: restart notifier-celery-workers
supervisorctl: >
name=notifier-celery-workers
state=restarted
config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }}
supervisorctl:
name: "notifier-celery-workers"
state: restarted
config: "{{ supervisor_cfg }}"
supervisorctl_path: "{{ supervisor_ctl }}"
when: not disable_edx_services
---
- name: checkout code
- name: Checkout code
git_2_0_1:
dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }}
version={{ NOTIFIER_VERSION }}
accept_hostkey=yes
dest: "{{ NOTIFIER_CODE_DIR }}"
repo: "{{ NOTIFIER_SOURCE_REPO }}"
version: "{{ NOTIFIER_VERSION }}"
accept_hostkey: yes
become: true
become_user: "{{ notifier_user }}"
notify:
......@@ -12,48 +12,56 @@
- restart notifier-celery-workers
# Optional auth for git
- name: create ssh script for git (not authenticated)
template: >
src=git_ssh_noauth.sh.j2 dest={{ notifier_git_ssh }}
owner={{ notifier_user }} mode=750
- name: Create ssh script for git (not authenticated)
template:
src: "git_ssh_noauth.sh.j2"
dest: "{{ notifier_git_ssh }}"
owner: "{{ notifier_user }}"
mode: "0750"
when: NOTIFIER_GIT_IDENTITY == ""
- name: create ssh script for git (authenticated)
template: >
src=git_ssh_auth.sh.j2 dest={{ notifier_git_ssh }}
owner={{ notifier_user }} mode=750
- name: Create ssh script for git (authenticated)
template:
src: "git_ssh_auth.sh.j2"
dest: "{{ notifier_git_ssh }}"
owner: "{{ notifier_user }}"
mode: "0750"
when: NOTIFIER_GIT_IDENTITY != ""
- name: install read-only ssh key
copy: >
content="{{ NOTIFIER_GIT_IDENTITY }}" dest={{ notifier_git_identity }}
force=yes owner={{ notifier_user }} mode=0600
- name: Install read-only ssh key
copy:
content: "{{ NOTIFIER_GIT_IDENTITY }}"
dest: "{{ notifier_git_identity }}"
force: yes
owner: "{{ notifier_user }}"
mode: "0600"
when: NOTIFIER_GIT_IDENTITY != ""
- name: checkout theme
git_2_0_1: >
dest={{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }}
repo={{ NOTIFIER_THEME_REPO }}
version={{ NOTIFIER_THEME_VERSION }}
accept_hostkey=yes
- name: Checkout theme
git_2_0_1:
dest: "{{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }}"
repo: "{{ NOTIFIER_THEME_REPO }}"
version: "{{ NOTIFIER_THEME_VERSION }}"
accept_hostkey: yes
when: NOTIFIER_THEME_NAME != ''
become_user: "{{ notifier_user }}"
environment:
GIT_SSH: "{{ notifier_git_ssh }}"
- name: write notifier local settings
template: >
src=settings_local.py.j2
dest={{ NOTIFIER_CODE_DIR }}/notifier/settings_local.py
mode=0555
- name: Write notifier local settings
template:
src: "settings_local.py.j2"
dest: "{{ NOTIFIER_CODE_DIR }}/notifier/settings_local.py"
mode: "0555"
when: NOTIFIER_THEME_NAME != ''
notify:
- restart notifier-celery-workers
- name: install application requirements
- name: Install application requirements
pip:
requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present
requirements: "{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv: "{{ NOTIFIER_VENV_DIR }}"
state: present
become: true
become_user: "{{ notifier_user }}"
notify:
......@@ -63,10 +71,13 @@
# Syncdb for whatever reason always creates the file owned by www-data:www-data, and then
# complains it can't write because it's running as notifier. So this is to touch the file into
# place with proper perms first.
- name: fix permissions on notifer db file
file: >
path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ notifier_user }} group={{ NOTIFIER_WEB_USER }}
mode=0664
- name: Fix permissions on notifer db file
file:
path: "{{ NOTIFIER_DB_DIR }}/notifier.db"
state: touch
owner: "{{ notifier_user }}"
group: "{{ NOTIFIER_WEB_USER }}"
mode: "0664"
become: true
notify:
- restart notifier-scheduler
......@@ -74,9 +85,10 @@
tags:
- deploy
- name: syncdb
shell: >
cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb
- name: Syncdb
shell: "{{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb"
args:
chdir: "{{ NOTIFIER_CODE_DIR }}"
become: true
become_user: "{{ notifier_user }}"
environment: notifier_env_vars
......
---
#
# notifier
#
......@@ -17,138 +16,145 @@
# - common
# - notifier
#
- name: install notifier specific system packages
apt: pkg={{','.join(notifier_debian_pkgs)}} state=present
- name: check if incommon ca is installed
command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt
- name: Install notifier specific system packages
apt:
name: "{{ item }}"
state: present
with_items: "{{ notifier_debian_pkgs }}"
- name: Check if incommon ca is installed
command: "test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt"
register: incommon_present
ignore_errors: yes
- name: create incommon ca directory
- name: Create incommon ca directory
file:
path="/usr/share/ca-certificates/incommon" mode=2775 state=directory
path: "/usr/share/ca-certificates/incommon"
state: directory
mode: "2775"
when: incommon_present|failed
- name: retrieve incommon server CA
shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt
- name: Retrieve incommon server CA
get_url:
url: "https://www.incommon.org/cert/repository/InCommonServerCA.txt"
dest: "/usr/share/ca-certificates/incommon/InCommonServerCA.crt"
when: incommon_present|failed
- name: add InCommon ca cert
- name: Add InCommon ca cert
lineinfile:
dest=/etc/ca-certificates.conf
regexp='incommon/InCommonServerCA.crt'
line='incommon/InCommonServerCA.crt'
- name: update ca certs globally
shell: update-ca-certificates
- name: create notifier user {{ notifier_user }}
user: >
name="{{ notifier_user }}" state=present shell=/bin/false
home="{{ notifier_app_dir }}" createhome=no
- name: create notifier app dir
file: >
path="{{ notifier_app_dir }}" state=directory
owner="{{ notifier_user }}" group="{{ common_web_group }}"
notify: [restart notifier-scheduler, restart notifier-celery-workers]
- name: setup the notifier env
dest: /etc/ca-certificates.conf
regexp: 'incommon/InCommonServerCA.crt'
line: 'incommon/InCommonServerCA.crt'
- name: Update ca certs globally
shell: "update-ca-certificates"
- name: Create notifier user {{ notifier_user }}
user:
name: "{{ notifier_user }}"
state: present
shell: /bin/false
home: "{{ notifier_app_dir }}"
createhome: no
- name: Create notifier app dir
file:
path: "{{ notifier_app_dir }}"
state: directory
owner: "{{ notifier_user }}"
group: "{{ common_web_group }}"
notify:
- restart notifier-scheduler
- restart notifier-celery-workers
- name: Setup the notifier env
template:
src=notifier_env.j2 dest={{ notifier_app_dir }}/notifier_env
owner="{{ notifier_user }}" group="{{ notifier_user }}"
mode=655
- name: drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ notifier_app_dir }}/.bash_profile
owner={{ notifier_user }}
group={{ notifier_user }}
- name: ensure .bashrc exists
shell: touch {{ notifier_app_dir }}/.bashrc
src: "notifier_env.j2"
dest: "{{ notifier_app_dir }}/notifier_env"
owner: "{{ notifier_user }}"
group: "{{ notifier_user }}"
mode: "0655"
- name: Drop a bash_profile
copy:
src: "../../common/files/bash_profile"
dest: "{{ notifier_app_dir }}/.bash_profile"
owner: "{{ notifier_user }}"
group: "{{ notifier_user }}"
- name: Ensure .bashrc exists
file:
path: "{{ notifier_app_dir }}/.bashrc"
state: touch
become: true
become_user: "{{ notifier_user }}"
- name: add source of notifier_env to .bashrc
- name: Add source of notifier_env to .bashrc
lineinfile:
dest={{ notifier_app_dir }}/.bashrc
regexp='. {{ notifier_app_dir }}/notifier_env'
line='. {{ notifier_app_dir }}/notifier_env'
dest: "{{ notifier_app_dir }}/.bashrc"
regexp: '. {{ notifier_app_dir }}/notifier_env'
line: '. {{ notifier_app_dir }}/notifier_env'
- name: add source venv to .bashrc
- name: Add source venv to .bashrc
lineinfile:
dest={{ notifier_app_dir }}/.bashrc
regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: create notifier DB directory
file:
path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ notifier_user }} group={{ NOTIFIER_WEB_USER }}
- name: create notifier/bin directory
file:
path="{{ notifier_app_dir }}/bin" mode=2775 state=directory owner={{ notifier_user }} group={{ notifier_user }}
dest: "{{ notifier_app_dir }}/.bashrc"
regexp: '. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line: '. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: create notifier/.ssh directory
- name: Create desired directories
file:
path="{{ notifier_app_dir }}/.ssh" mode=2700 state=directory owner={{ notifier_user }} group={{ notifier_user }}
- name: create service log dir
file: >
path="{{ item }}"
state=directory
owner="syslog"
group="syslog"
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
with_items:
- "{{ COMMON_LOG_DIR }}/notifier"
- name: write supervisord wrapper for celery workers
template: >
src=notifier-celery-workers-supervisor.sh.j2
dest="{{ notifier_app_dir }}/notifier-celery-workers-supervisor.sh"
mode=0775
become_user: "{{ notifier_user }}"
- { path: '{{ NOTIFIER_DB_DIR }}', owner: '{{ notifier_user }}', group: '{{ NOTIFIER_WEB_USER }}', mode: '2775' }
- { path: '{{ notifier_app_dir }}/bin', owner: '{{ notifier_user }}', group: '{{ notifier_user }}', mode: '2775' }
- { path: '{{ notifier_app_dir }}/.ssh', owner: '{{ notifier_user }}', group: '{{ notifier_user }}', mode: '2700' }
- { path: '{{ COMMON_LOG_DIR }}/notifier', owner: 'syslog', group: 'syslog', mode: '0664' }
- name: write supervisord wrapper for scheduler
template: >
src=notifier-scheduler-supervisor.sh.j2
dest="{{ notifier_app_dir }}/notifier-scheduler-supervisor.sh"
mode=0775
- name: Write supervisord wrapper for celery workers and scheduler
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0775"
become_user: "{{ notifier_user }}"
with_items:
- { src: 'notifier-celery-workers-supervisor.sh.j2', dest: '{{ notifier_app_dir }}/notifier-celery-workers-supervisor.sh' }
- { src: 'notifier-scheduler-supervisor.sh.j2', dest: '{{ notifier_app_dir }}/notifier-scheduler-supervisor.sh' }
- name: write supervisord config for celery workers
template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_available_dir }}/notifier-celery-workers.conf"
become_user: "{{ supervisor_user }}"
- name: write supervisord config for scheduler
template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_available_dir }}/notifier-scheduler.conf"
- name: Write supervisord config for celery workers and scheduler
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
become_user: "{{ supervisor_user }}"
with_items:
- { src: 'edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2', dest: '{{ supervisor_available_dir }}/notifier-celery-workers.conf' }
- { src: 'edx/app/supervisor/conf.d/notifier-scheduler.conf.j2', dest: '{{ supervisor_available_dir }}/notifier-scheduler.conf' }
- name: enable supervisord config for celery workers
file: >
src="{{ supervisor_available_dir }}/notifier-celery-workers.conf"
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
state=link
force=yes
- name: Enable supervisord config for celery workers
file:
src: "{{ supervisor_available_dir }}/notifier-celery-workers.conf"
dest: "{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
state: link
force: yes
become_user: "{{ supervisor_user }}"
notify: restart notifier-celery-workers
notify:
- restart notifier-celery-workers
when: not disable_edx_services
- name: enable supervisord config for scheduler
file: >
src="{{ supervisor_available_dir }}/notifier-scheduler.conf"
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
state=link
force=yes
- name: Enable supervisord config for scheduler
file:
src: "{{ supervisor_available_dir }}/notifier-scheduler.conf"
dest: "{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
state: link
force: yes
become_user: "{{ supervisor_user }}"
notify: restart notifier-scheduler
notify:
- restart notifier-scheduler
when: not disable_edx_services
- include: deploy.yml tags=deploy
- include: deploy.yml
tags:
- deploy
---
# oraclejdk
#
# Dependencies:
......@@ -12,42 +11,52 @@
# - common
# - oraclejdk
- name: install debian needed pkgs
apt: pkg={{ item }}
with_items: oraclejdk_debian_pkgs
- name: download Oracle Java
shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}
executable=/bin/bash
chdir=/var/tmp
creates=/var/tmp/{{ oraclejdk_file }}
- name: create jvm dir
file: >
path=/usr/lib/jvm
state=directory
owner=root
group=root
- name: untar Oracle Java
shell: >
tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
executable=/bin/bash
creates=/usr/lib/jvm/{{ oraclejdk_base }}
- name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link force=yes
- name: update alternatives java
alternatives: >
name={{ item }}
link="/usr/bin/{{ item }}"
path="/usr/lib/jvm/{{ oraclejdk_base }}/bin/{{ item }}"
- name: Install debian needed pkgs
apt:
name: "{{ item }}"
with_items: "{{ oraclejdk_debian_pkgs }}"
- name: Download Oracle Java
shell: "curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}"
args:
executable: /bin/bash
chdir: /var/tmp
creates: "/var/tmp/{{ oraclejdk_file }}"
- name: Create jvm dir
file:
path: /usr/lib/jvm
state: directory
owner: root
group: root
- name: Untar Oracle Java
shell: "tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}"
args:
executable: /bin/bash
creates: "/usr/lib/jvm/{{ oraclejdk_base }}"
- name: Create symlink expected by elasticsearch
file:
src: "/usr/lib/jvm/{{ oraclejdk_base }}"
dest: "{{ oraclejdk_link }}"
state: link
force: yes
- name: Update alternatives java
alternatives:
name: "{{ item }}"
link: "/usr/bin/{{ item }}"
path: "/usr/lib/jvm/{{ oraclejdk_base }}/bin/{{ item }}"
with_items:
- java
- javac
- javaws
- name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
- name: Add JAVA_HOME for Oracle Java
template:
src: "java.sh.j2"
dest: "/etc/profile.d/java.sh"
owner: root
group: root
mode: "0755"
......@@ -22,5 +22,4 @@
#
- include: security-ubuntu.yml
when:
- ansible_distribution == 'Ubuntu'
when: ansible_distribution == 'Ubuntu'
---
#### Enable periodic security updates
- name: Install security packages
apt:
name: "{{ item }}"
state: latest
update_cache: yes
with_items: "{{ security_debian_pkgs }}"
- name: install security packages
apt: name={{ item }} state=latest update_cache=yes
with_items: security_debian_pkgs
- name: update all system packages
apt: upgrade=safe
- name: Update all system packages
apt:
upgrade: safe
when: SECURITY_UPGRADE_ON_ANSIBLE
- name: configure periodic unattended-upgrades
template: >
src=etc/apt/apt.conf.d/10periodic
dest=/etc/apt/apt.conf.d/10periodic
owner=root group=root mode=0644
- name: Configure periodic unattended-upgrades
template:
src: "etc/apt/apt.conf.d/10periodic"
dest: "/etc/apt/apt.conf.d/10periodic"
owner: root
group: root
mode: "0644"
when: SECURITY_UNATTENDED_UPGRADES
- name: disable unattended-upgrades
file: path=/etc/apt/apt.conf.d/10periodic state=absent
- name: Disable unattended-upgrades
file:
path: "/etc/apt/apt.conf.d/10periodic"
state: absent
when: not SECURITY_UNATTENDED_UPGRADES
- name: only unattended-upgrade from security repo
template: >
src=etc/apt/apt.conf.d/20unattended-upgrade
dest=/etc/apt/apt.conf.d/20unattended-upgrade
owner=root group=root mode=0644
- name: Only unattended-upgrade from security repo
template:
src: "etc/apt/apt.conf.d/20unattended-upgrade"
dest: "/etc/apt/apt.conf.d/20unattended-upgrade"
owner: root
group: root
mode: "0644"
when: SECURITY_UNATTENDED_UPGRADES and not SECURITY_UPDATE_ALL_PACKAGES
- name: disable security only updates on unattended-upgrades
file: path=/etc/apt/apt.conf.d/20unattended-upgrade state=absent
- name: Disable security only updates on unattended-upgrades
file:
path: "/etc/apt/apt.conf.d/20unattended-upgrade"
state: absent
when: SECURITY_UPDATE_ALL_PACKAGES or not SECURITY_UNATTENDED_UPGRADES
#### Bash security vulnerability
- name: Check if we are vulnerable
shell: executable=/bin/bash chdir=/tmp foo='() { echo vulnerable; }' bash -c foo
shell: "executable=/bin/bash chdir=/tmp foo='() { echo vulnerable; }' bash -c foo"
register: test_vuln
ignore_errors: yes
- name: Apply bash security update if we are vulnerable
apt: name=bash state=latest update_cache=true
apt:
name: bash
state: latest
update_cache: yes
when: "'vulnerable' in test_vuln.stdout"
- name: Check again and fail if we are still vulnerable
shell: executable=/bin/bash foo='() { echo vulnerable; }' bash -c foo
shell: "executable=/bin/bash foo='() { echo vulnerable; }' bash -c foo"
when: "'vulnerable' in test_vuln.stdout"
register: test_vuln
failed_when: "'vulnerable' in test_vuln.stdout"
......@@ -52,20 +66,23 @@
#### GHOST security vulnerability
- name: GHOST.c
copy: >
src=tmp/GHOST.c
dest=/tmp/GHOST.c
owner=root group=root
copy:
src: "tmp/GHOST.c"
dest: "/tmp/GHOST.c"
owner: root
group: root
- name: compile GHOST
shell: gcc -o /tmp/GHOST /tmp/GHOST.c
- name: Compile GHOST
shell: "gcc -o /tmp/GHOST /tmp/GHOST.c"
- name: Check if we are vulnerable
shell: /tmp/GHOST
shell: "/tmp/GHOST"
register: test_ghost_vuln
ignore_errors: yes
- name: Apply glibc security update if we are vulnerable
apt: name=libc6 state=latest update_cache=true
when: "'vulnerable' in test_ghost_vuln.stdout"
apt:
name: libc6
state: latest
update_cache: yes
when: "'vulnerable' in test_ghost_vuln.stdout"
\ No newline at end of file
......@@ -35,3 +35,6 @@ user_rbash_links:
# will take precedence over the paramter
user_info: []
user_debian_pkgs:
# This is needed for the uri module to work correctly.
- python-httplib2
......@@ -70,6 +70,14 @@
# want to provide more binaries add them to user_rbash_links
# which can be passed in as a parameter to the role.
#
- name: Install debian packages user role needs
apt:
name: "{{ item }}"
install_recommends: yes
state: present
update_cache: yes
with_items: "{{ user_debian_pkgs }}"
when: ansible_distribution in common_debian_variants
- debug:
var: user_info
......@@ -116,13 +124,16 @@
- name: Check the ssh key(s) for user(s) over github
uri:
url: "https://github.com/{{ item.name }}.keys"
# We don't care if absent users lack ssh keys
when: item.get('state', 'present') == 'present'
with_items: "{{ user_info }}"
register: github_users_return
- fail:
- debug:
msg: "User {{ item.item.name }} doesn't have an SSH key associated with their account"
with_items: "{{ github_users_return.results | default([]) }}"
when: item.content_length == "0"
# We skip users in the previous task, and they end up with no content_length
when: item.get('content_length') and item.content_length == "0"
- name: Get github key(s) and update the authorized_keys file
authorized_key:
......
......@@ -3,91 +3,98 @@
# Tasks related to deploying the code jail for the XQWatcher
#
- name: Create sandboxed user
user: >
name="{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}"
shell=/bin/false
home="/dev/null"
with_items: XQWATCHER_COURSES
user:
name: "{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}"
shell: /bin/false
home: "/dev/null"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:base
#
# Need to disable aa to update the virutalenv
- name: write out apparmor config
template: >
src=etc/apparmor.d/code.jail.j2
dest="/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
mode=0644 owner=root group=root
with_items: XQWATCHER_COURSES
# Need to disable apparmor to update the virutalenv
- name: Write out apparmor config
template:
src: "etc/apparmor.d/code.jail.j2"
dest: "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
owner: root
group: root
mode: "0644"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:configuration
- name: write out sudoers for watcher
template: >
src=etc/sudoers.d/95-xqwatcher.j2
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: Write out sudoers for watcher
template:
src: "etc/sudoers.d/95-xqwatcher.j2"
dest: "/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }}"
owner: root
group: root
mode: "0440"
validate: 'visudo -c -f %s'
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:configuration
# see comment below as to why this is skipped.
- name: put code jail into aa-complain
- name: Put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- manage
- manage:sandbox
- name: create jail virtualenv
shell: >
/usr/local/bin/virtualenv --no-site-packages {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}
with_items: XQWATCHER_COURSES
- name: Create jail virtualenv
shell: "/usr/local/bin/virtualenv --no-site-packages {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:code
- name: write out requirements.txt
template: >
src=edx/app/xqwatcher/data/requirements.txt.j2
dest={{ xqwatcher_app_dir }}/data/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt
mode=0440 owner=root group=root
with_items: XQWATCHER_COURSES
- name: Write out requirements.txt
template:
src: "edx/app/xqwatcher/data/requirements.txt.j2"
dest: "{{ xqwatcher_app_dir }}/data/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
owner: root
group: root
mode: "0440"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:code
- name: install course specific python requirements
pip: >
requirements="{{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
virtualenv="{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
state=present
extra_args="{{ XQWATCHER_PIP_EXTRA_ARGS }}"
with_items: XQWATCHER_COURSES
- name: Install course specific python requirements
pip:
requirements: "{{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
virtualenv: "{{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
state: present
extra_args: "{{ XQWATCHER_PIP_EXTRA_ARGS }}"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:code
- name: give other read permissions to the virtualenv
shell: >
chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}
with_items: XQWATCHER_COURSES
- name: Give other read permissions to the virtualenv
shell: "chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_app_dir }}/venvs/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:code
- name: start apparmor service
service: name=apparmor state=started
- name: Start apparmor service
service:
name: apparmor
state: started
tags:
- manage
- manage:sandbox
- name: load code sandbox profile
- name: Load code sandbox profile
command: apparmor_parser -r "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- manage
- manage:sandbox
......@@ -96,20 +103,20 @@
# Leaves aa in either complain or enforce depending upon the value of the
# CODE_JAIL_COMPLAIN var. Complain mode should never be run in an
# environment where untrusted users can submit code
- name: put code jail into aa-complain
- name: Put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES
with_items: "{{ XQWATCHER_COURSES }}"
# AA having issues on 14.04
# https://github.com/edx/codejail/issues/38
tags:
- manage
- manage:sandbox
- name: put code sandbox into aa-enforce
- name: Put code sandbox into aa-enforce
command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: not CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- manage
- manage:sandbox
- name: install courses ssh key
copy: >
content="{{ XQWATCHER_GIT_IDENTITY }}"
dest={{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses
owner={{ xqwatcher_user }} group={{ xqwatcher_user }} mode=0600
- name: Install courses ssh key
copy:
content: "{{ XQWATCHER_GIT_IDENTITY }}"
dest: "{{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses"
owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
mode: "0600"
tags:
- install
- install:code
#TODO: remove once xqwatcher.json can be pulled out into /edx/etc/
- name: write out watcher config file
template: >
src=edx/app/xqwatcher/xqwatcher.json.j2
dest={{ xqwatcher_conf_dir }}/xqwatcher.json
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }}
- name: Write out watcher config file
template:
src: "edx/app/xqwatcher/xqwatcher.json.j2"
dest: "{{ xqwatcher_conf_dir }}/xqwatcher.json"
owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
mode: "0644"
tags:
- install
- install:configuration
......
......@@ -2,12 +2,13 @@
# checking out the grader code from the repository specified on
# a per queue basis.
- name: checkout grader code
git_2_0_1: >
dest={{ xqwatcher_app_dir }}/data/{{ item.COURSE }} repo={{ item.GIT_REPO }}
version={{ item.GIT_REF }}
ssh_opts="{{ xqwatcher_course_git_ssh_opts }}"
with_items: XQWATCHER_COURSES
- name: Checkout grader code
git_2_0_1:
repo: "{{ item.GIT_REPO }}"
dest: "{{ xqwatcher_app_dir }}/data/{{ item.COURSE }}"
version: "{{ item.GIT_REF }}"
ssh_opts: "{{ xqwatcher_course_git_ssh_opts }}"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:code
......@@ -2,59 +2,63 @@
# The watcher can watch one or many queues and dispatch submissions
# to the appropriate grader which lives in a separate SCM repository.
- name: install application requirements
pip: >
requirements="{{ xqwatcher_requirements_file }}"
virtualenv="{{ xqwatcher_app_dir }}/venvs/{{ xqwatcher_service_name }}" state=present
- name: Install application requirements
pip:
requirements: "{{ xqwatcher_requirements_file }}"
virtualenv: "{{ xqwatcher_app_dir }}/venvs/{{ xqwatcher_service_name }}"
state: present
become: true
become_user: "{{ xqwatcher_user }}"
tags:
- install
- install:app-requirements
- name: write out course config files
template: >
src=edx/app/xqwatcher/conf.d/course.json.j2
dest={{ xqwatcher_conf_dir }}/conf.d/{{ item.COURSE }}.json
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }}
with_items: XQWATCHER_COURSES
- name: Write out course config files
template:
src: "edx/app/xqwatcher/conf.d/course.json.j2"
dest: "{{ xqwatcher_conf_dir }}/conf.d/{{ item.COURSE }}.json"
owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
mode: "0644"
with_items: "{{ XQWATCHER_COURSES }}"
tags:
- install
- install:configuration
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d/xqwatcher.conf.j2
dest="{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
group={{ xqwatcher_user }} mode=0650
- name: Write supervisord config
template:
src: "edx/app/supervisor/conf.d/xqwatcher.conf.j2"
dest: "{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
group: "{{ xqwatcher_user }}"
mode: "0650"
tags:
- install
- install:configuration
- name: enable supervisor script
file: >
src={{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf
dest={{ xqwatcher_supervisor_cfg_dir }}/xqwatcher.conf
state=link
force=yes
- name: Enable supervisor script
file:
src: "{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
dest: "{{ xqwatcher_supervisor_cfg_dir }}/xqwatcher.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
- install:configuration
- name: update supervisor configuration
- name: Update supervisor configuration
shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update"
when: not disable_edx_services
tags:
- manage
- manage:update
- name: restart xqwatcher
supervisorctl: >
state=restarted
supervisorctl_path={{ xqwatcher_supervisor_ctl }}
config={{ xqwatcher_supervisor_app_dir }}/supervisord.conf
name={{ xqwatcher_service_name }}
- name: Restart xqwatcher
supervisorctl:
name: "{{ xqwatcher_service_name }}"
supervisorctl_path: "{{ xqwatcher_supervisor_ctl }}"
config: "{{ xqwatcher_supervisor_app_dir }}/supervisord.conf"
state: restarted
when: not disable_edx_services
become_user: "{{ xqwatcher_user }}"
tags:
......
......@@ -86,26 +86,28 @@
# -----END RSA PRIVATE KEY-----
#
- name: create conf dir
file: >
path="{{ xqwatcher_conf_dir }}"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- name: Create conf dir
file:
path: "{{ xqwatcher_conf_dir }}"
state: directory
owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
tags:
- install
- install:base
- name: create conf.d dir
file: >
path="{{ xqwatcher_conf_dir }}/conf.d"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- name: Create conf.d dir
file:
path: "{{ xqwatcher_conf_dir }}/conf.d"
state: directory
owner: "{{ xqwatcher_user }}"
group: "{{ xqwatcher_user }}"
tags:
- install
- install:base
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- include: deploy.yml tags=deploy
- include: deploy.yml
tags:
- deploy
......@@ -4,7 +4,7 @@
# ansible-playbook ./run_role.yml -i "hostname," -e role=my_awesome_role
#
- hosts: all
sudo: True
become: True
gather_facts: True
roles:
- "{{role}}"
- name: Apply security role
hosts: all
sudo: yes
become: True
roles:
- security
- name: Configure instance(s)
hosts: all
sudo: True
become: True
gather_facts: True
vars:
migrate_db: 'yes'
......
- name: Configure group cluster
hosts: all
sudo: True
become: True
gather_facts: True
vars:
vagrant_cluster: yes
......
- name: Configure instance(s)
hosts: all
sudo: True
become: True
gather_facts: True
vars:
migrate_db: 'yes'
......
- name: Configure instance(s)
hosts: all
sudo: True
become: True
gather_facts: True
vars:
migrate_db: 'yes'
......
......@@ -318,7 +318,6 @@ instance_tags:
root_ebs_size: $root_ebs_size
name_tag: $name_tag
dns_zone: $dns_zone
rabbitmq_refresh: True
elb: $elb
EOF
......
......@@ -299,6 +299,11 @@ if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
libxslt-dev curl libmysqlclient-dev --force-yes
fi
# this is missing on 14.04 (base package on 12.04)
# we need to do this on any build, since the above apt-get
# only runs on a build from scratch
/usr/bin/apt-get install -y python-httplib2 --force-yes
# upgrade setuptools early to avoid no distributin errors
pip install --upgrade setuptools==18.3.2
......@@ -650,7 +655,7 @@ def launch_and_configure(ec2_args):
system_start = time.time()
for _ in xrange(EC2_STATUS_TIMEOUT):
status = ec2.get_all_instance_status(inst.id)
if status[0].system_status.status == u'ok':
if status and status[0].system_status.status == u'ok':
system_delta = time.time() - system_start
run_summary.append(('EC2 Status Checks', system_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment