Commit b0051518 by Joe Blaylock

Merge 'origin/release' into edx-west/release-merge

Conflicts:
	playbooks/edx-west/carnegie-prod-app.yml
	playbooks/edx-west/prod-jumpbox.yml
	playbooks/edx-west/stage-all.yml
	playbooks/roles/common/tasks/create_users.yml (deleted)
	playbooks/roles/datadog/handlers/main.yml
	playbooks/roles/edxapp/defaults/main.yml
	playbooks/roles/edxapp/tasks/deploy.yml
	playbooks/roles/edxapp/tasks/main.yml
	playbooks/roles/nginx/tasks/main.yml
	playbooks/roles/nginx/templates/basic-auth.j2
	playbooks/roles/nginx/templates/edx_logrotate_nginx_access.j2
	playbooks/roles/nginx/templates/edx_logrotate_nginx_error.j2
	playbooks/roles/xqueue/tasks/main.yml
parents 775299d2 3e037702
......@@ -5,3 +5,7 @@
\#*\#
*~
.#*
vagrant/devstack/cs_comments_service
vagrant/devstack/edx-platform
vagrant/release/*/devstack/cs_comments_service
vagrant/release/*/devstack/edx-platform
......@@ -8,14 +8,14 @@ The goal of the edx/configuration project is to provide a simple, but
flexible, way for anyone to stand up an instance of Open edX that is
fully configured and ready-to-go.
Building the platform takes place to two phases:
Building the platform takes place in two phases:
* Infrastruce provisioning
* Infrastructure provisioning
* Service configuration
As much as possible, we have tried to keep a clean distinction between
provisioning and configuration. You are not obliged to use our tools
and are free to use one, but not the other. The provisioing phase
and are free to use one, but not the other. The provisioning phase
stands-up the required resources and tags them with role identifiers
so that the configuration tool can come in and complete the job.
......
---
# Creates a new ansible role
# Usage:
# ansible-playbook -c local --limit "localhost," ./create_role.yml -i "localhost," -e role_name=my_awesome_role
#
- hosts: localhost
gather_facts: False
roles:
- ansible-role
......@@ -269,7 +269,8 @@ class Ec2Inventory(object):
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
instances = sorted(reservation.instances)
for instance in instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
......@@ -363,6 +364,7 @@ class Ec2Inventory(object):
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
self.keep_first(self.inventory, 'first_in_' + key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
......@@ -532,6 +534,9 @@ class Ec2Inventory(object):
else:
my_dict[key] = [element]
def keep_first(self, my_dict, key, element):
if key not in my_dict:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
......
- name: Deploy certs
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- certs
- name: Deploy common
hosts: all
sudo: True
gather_facts: True
roles:
- gh_users
- common
- name: Deploy devpi
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- devpi
- name: Deploy discern
hosts: all
sudo: True
gather_facts: True
roles:
- common
- supervisor
- discern
......@@ -3,4 +3,6 @@
sudo: True
gather_facts: True
roles:
- common
- supervisor
- edxapp
......@@ -3,4 +3,6 @@
sudo: True
gather_facts: True
roles:
- common
- supervisor
- forum
......@@ -3,4 +3,6 @@
sudo: True
gather_facts: True
roles:
- common
- supervisor
- ora
- name: Deploy rabbitmq
hosts: all
sudo: True
gather_facts: False
roles:
- rabbitmq
......@@ -3,4 +3,7 @@
sudo: True
gather_facts: True
roles:
- xqueue
- common
- supervisor
- role: xqueue
tags: ['xqueue']
......@@ -3,4 +3,7 @@
sudo: True
gather_facts: True
roles:
- xserver
- common
- supervisor
- role: xserver
tags: ['xserver']
......@@ -7,33 +7,27 @@
migrate_db: "yes"
openid_workaround: True
roles:
- ansible_debug
- common
- role: nginx
nginx_sites:
- cms
- lms
- lms-preview
- ora
- xqueue
- xserver
#- discern
- edxlocal
- supervisor
- mongo
- edxapp
- role: demo
tags: ['demo']
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk
- elasticsearch
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_user_home: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
- role: virtualenv
virtualenv_user: "{{ xqueue_user }}"
virtualenv_user_home: "{{ xqueue_user_home }}"
virtualenv_name: "{{ xqueue_user }}"
- { role: "xqueue", update_users: True }
- xserver
- ora
#- discern
- discern
- certs
......@@ -15,6 +15,7 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
......@@ -34,6 +35,7 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
......
---
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_edxapp
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_edxapp
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
......@@ -7,17 +7,18 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_worker
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
......@@ -25,30 +26,32 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xserver
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xserver
- xserver
- xserver
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_rabbitmq
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_rabbitmq
serial: 1
sudo: True
vars_files:
......@@ -56,17 +59,28 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- rabbitmq
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xqueue
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- role: 'mongo'
mongo_clustered: true
......@@ -5,6 +5,7 @@
gather_facts: False
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- devpi
......@@ -17,4 +18,4 @@
tags: ['r_devpi']
- role: gh_mirror
tags: ['r_gh_mirror']
......@@ -7,6 +7,7 @@
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -22,6 +23,7 @@
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -37,6 +39,7 @@
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -52,6 +55,7 @@
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -68,6 +72,7 @@
vars:
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......
......@@ -19,8 +19,16 @@
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
gather_facts: False
pre_tasks:
- name: Wait for cloud-init to finish
wait_for: >
path=/var/log/cloud-init.log
timeout=15
search_regex="final-message"
roles:
# rerun common to set the hostname
- common
# gh_users hash must be passed
# in as a -e variable
- gh_users
......@@ -13,6 +13,7 @@
mysql5_workaround: True
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
......@@ -27,6 +28,6 @@
- { role: 'edxapp', celery_worker: True }
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_user_home: "{{ forum_home }}"
rbenv_dir: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
---
- hosts: first_in_tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- role: 'mongo'
mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - common
# - mongo
- hosts: first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
migrate_db: 'yes'
openid_workaround: 'yes'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_edxapp:!first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xserver
- xserver
- splunkforwarder
- hosts: tag_role_rabbitmq
serial: 1
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- rabbitmq
- splunkforwarder
- hosts: first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
migrate_db: 'yes'
- splunkforwarder
- hosts: tag_role_xqueue:!first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
- hosts: tag_role_forum
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- oraclejdk
- elasticsearch
- forum
......@@ -6,6 +6,7 @@
- "{{ secure_dir }}/vars/mlapi_prod_users.yml"
roles:
- common
- supervisor
- discern
sudo: True
- hosts:
......@@ -17,4 +18,4 @@
- "{{ secure_dir }}/vars/mlapi_prod_users.yml"
roles:
- common
sudo: True
\ No newline at end of file
sudo: True
......@@ -6,6 +6,7 @@
- "{{ secure_dir }}/vars/mlapi_sandbox_users.yml"
roles:
- common
- supervisor
- discern
sudo: True
- hosts:
......
......@@ -6,6 +6,7 @@
- "{{ secure_dir }}/vars/mlapi_stage_users.yml"
roles:
- common
- supervisor
- discern
sudo: True
- hosts:
......
- name: restarts supervisor
hosts: all
sudo: True
gather_facts: False
vars_files:
- roles/common/defaults/main.yml
- roles/supervisor/defaults/main.yml
tasks:
- name: supervisor | restart supervisor
service: >
name={{ supervisor_service }}
state=restarted
......@@ -22,6 +22,7 @@
- "{{ secure_dir }}/vars/datadog_carn.yml"
roles:
- common
- supervisor
# - php
- role: nginx
nginx_sites:
......@@ -30,7 +31,8 @@
- lms-preview
# - phpmyadmin
nginx_template_directory: "{{local_dir}}/nginx/templates/carnegie/"
- {'role': 'edxapp', 'openid_workaround': true}
nginx_conf: true
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'carnegie'}
- datadog
#- splunkforwarder
# run this role last
......
......@@ -16,6 +16,7 @@
- "{{ secure_dir }}/vars/datadog_carn.yml"
roles:
- common
- supervisor
- { role: 'edxapp', celery_worker: True }
- datadog
#- splunkforwarder
......
......@@ -36,6 +36,7 @@
roles:
#- ansible_debug
- common
- supervisor
- role: nginx
nginx_conf: true
nginx_sites:
......
......@@ -16,6 +16,7 @@
- "{{ secure_dir }}/vars/datadog_cme.yml"
roles:
- common
- supervisor
- { role: 'edxapp', celery_worker: True }
- datadog
#- splunkforwarder
......
# ansible-playbook -v --user=ubuntu edxapp_rolling_example.yml -i ./ec2.py --private-key=/path/to/deployment.pem
# ansible-playbook -v --user=ubuntu edxapp_rolling_example.yml -i ./ec2.py --private-key=/path/to/deployment.pem
- hosts: tag_Group_anothermulti
serial: 2
......@@ -6,8 +6,8 @@
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml"
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Gathering ec2 facts
ec2_facts:
- name: Removing instance from the ELB
local_action: ec2_elb
args:
......@@ -15,7 +15,8 @@
state: 'absent'
roles:
- common
- role: nginx
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
......
......@@ -28,6 +28,7 @@
- "{{ secure_dir }}/vars/datadog_prod.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
......
......@@ -8,4 +8,4 @@
local_dir: '../../../configuration-secure/ansible/local'
roles:
- common
- datadog
- supervisor
......@@ -18,6 +18,7 @@
- "{{ secure_dir }}/vars/datadog_prod.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- ora
......
......@@ -17,6 +17,7 @@
- "{{ secure_dir }}/vars/datadog_prod.yml"
roles:
- common
- supervisor
- { role: 'edxapp', celery_worker: True }
- datadog
#- splunkforwarder
......
......@@ -16,6 +16,7 @@
- "{{ secure_dir }}/vars/datadog_prod.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
......
......@@ -13,5 +13,5 @@
- "{{ secure_dir }}/vars/datadog_stage.yml"
roles:
- common
- datadog
- supervisor
......@@ -18,6 +18,7 @@
- "{{ secure_dir }}/vars/datadog_stage.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
......
......@@ -13,6 +13,7 @@
- "{{ secure_dir }}/vars/datadog_stage.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
......
......@@ -8,6 +8,7 @@
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/notifier_stage_vars.yml"
roles:
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......
......@@ -11,6 +11,7 @@
- "{{ secure_dir }}/vars/datadog_stage.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- ora
......
......@@ -10,6 +10,7 @@
- "{{ secure_dir }}/vars/edxapp_stage_users.yml"
roles:
- common
- supervisor
- rabbitmq
#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_edxapp
......
......@@ -16,6 +16,7 @@
- "{{ secure_dir }}/vars/datadog_stage.yml"
roles:
- common
- supervisor
- { role: 'edxapp', celery_worker: True }
- datadog
#- splunkforwarder
......
......@@ -11,6 +11,7 @@
- "{{ secure_dir }}/vars/datadog_stage.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
......
......@@ -23,11 +23,13 @@
openid_workaround: True
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xqueue
- ora
- edxlocal
- mongo
......@@ -36,18 +38,6 @@
- { role: 'edxapp', celery_worker: True }
- oraclejdk
- elasticsearch
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_user_home: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
- role: virtualenv
virtualenv_user: "{{ xqueue_user }}"
virtualenv_user_home: "{{ xqueue_user_home }}"
virtualenv_name: "{{ xqueue_user }}"
- { role: "xqueue", update_users: True }
- role: virtualenv
virtualenv_user: "{{ ora_user }}"
virtualenv_user_home: "{{ ora_user_home }}"
virtualenv_name: "{{ ora_user }}"
- role: ora
- ora
---
# This should only have variables
# that are applicable to all edX roles
storage_base_dir: /mnt
app_base_dir: /opt/wwc
log_base_dir: "{{ storage_base_dir }}/logs"
venv_dir: /opt/edx
os_name: ubuntu
ENV_NAME: 'default_env'
ENV_TYPE: 'default_type'
# these pathes are relative to the playbook dir
# directory for secret settings (keys, etc)
secure_dir: 'secure_example'
#
secure_dir: 'path/to/secure_example'
# this indicates the path to site-specific (with precedence)
# things like nginx template files
local_dir: '../../ansible_local'
# include http/https
PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
# do not include http/https
GIT_MIRROR: 'github.com'
local_dir: 'path/to/ansible_local'
......@@ -6,5 +6,7 @@
hosts: jenkins_master
sudo: True
gather_facts: True
vars:
COMMON_DATA_DIR: "/mnt"
roles:
- jenkins_master
......@@ -7,4 +7,8 @@
sudo: True
gather_facts: True
roles:
- common
- edxlocal
- mongo
- browsers
- jenkins_worker
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
DOCUMENTATION = '''
---
module: supervisorctl
short_description: Manage the state of a program or group of programs running via Supervisord
description:
- Manage the state of a program or group of programs running via I(Supervisord)
version_added: "0.7"
options:
name:
description:
- The name of the I(supervisord) program/process to manage
required: true
default: null
config:
description:
- configuration file path, passed as -c to supervisorctl
required: false
default: null
version_added: "1.3"
server_url:
description:
- URL on which supervisord server is listening, passed as -s to supervisorctl
required: false
default: null
version_added: "1.3"
username:
description:
- username to use for authentication with server, passed as -u to supervisorctl
required: false
default: null
version_added: "1.3"
password:
description:
- password to use for authentication with server, passed as -p to supervisorctl
required: false
default: null
version_added: "1.3"
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted" ]
supervisorctl_path:
description:
- Path to supervisorctl executable to use
required: false
default: null
version_added: "1.4"
requirements:
- supervisorctl
requirements: [ ]
author: Matt Wright
'''
EXAMPLES = '''
# Manage the state of program to be in 'started' state.
- supervisorctl: name=my_app state=started
# Restart my_app, reading supervisorctl configuration from a specified file.
- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf
# Restart my_app, connecting to supervisord with credentials and server URL.
- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001
'''
def main():
arg_spec = dict(
name=dict(required=True),
config=dict(required=False),
server_url=dict(required=False),
username=dict(required=False),
password=dict(required=False),
supervisorctl_path=dict(required=False),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
config = module.params.get('config')
server_url = module.params.get('server_url')
username = module.params.get('username')
password = module.params.get('password')
supervisorctl_path = module.params.get('supervisorctl_path')
if supervisorctl_path:
supervisorctl_path = os.path.expanduser(supervisorctl_path)
if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path):
supervisorctl_args = [ supervisorctl_path ]
else:
module.fail_json(msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
else:
supervisorctl_args = [ module.get_bin_path('supervisorctl', True) ]
if config:
supervisorctl_args.extend(['-c', os.path.expanduser(config)])
if server_url:
supervisorctl_args.extend(['-s', server_url])
if username:
supervisorctl_args.extend(['-u', username])
if password:
supervisorctl_args.extend(['-p', password])
def run_supervisorctl(cmd, name=None, **kwargs):
args = list(supervisorctl_args) # copy the master args
args.append(cmd)
if name:
args.append(name)
return module.run_command(args, **kwargs)
rc, out, err = run_supervisorctl('status')
present = name in out
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('add', name)
if '%s: added process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
rc, out, err = run_supervisorctl('status', name)
running = 'RUNNING' in out
if running and state == 'started':
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = run_supervisorctl('stop', name)
if '%s: stopped' % name in out:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg=out)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = run_supervisorctl('update', name)
rc, out, err = run_supervisorctl('restart', name)
if '%s: started' % name in out:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg=out)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = run_supervisorctl('start',name)
if '%s: started' % name in out:
module.exit_json(changed=True, name=name, state=state)
elif '%s: ERROR (already started)' % name in out:
# addresses a race condition if update is called
# immediately before started and the service is set
# to start automatically
module.exit_json(changed=False, name=name, state=state)
module.fail_json(msg=out)
module.exit_json(changed=False, name=name, state=state)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import socket
import datetime
import time
import sys
import re
DOCUMENTATION = '''
---
module: wait_for
short_description: Waits for a condition before continuing.
description:
- Waiting for a port to become available is useful for when services
are not immediately available after their init scripts return -
which is true of certain Java application servers. It is also
useful when starting guests with the M(virt) module and
needing to pause until they are ready. This module can
also be used to wait for a file to be available on the filesystem
or with a regex match a string to be present in a file.
version_added: "0.7"
options:
host:
description:
- hostname or IP address to wait for
required: false
default: "127.0.0.1"
aliases: []
timeout:
description:
- maximum number of seconds to wait for
required: false
default: 300
delay:
description:
- number of seconds to wait before starting to poll
required: false
default: 0
port:
description:
- port number to poll
required: false
state:
description:
- either C(present), C(started), or C(stopped)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing
choices: [ "present", "started", "stopped" ]
default: "started"
path:
version_added: "1.4"
required: false
description:
- path to a file on the filesytem that must exist before continuing
search_regex:
version_added: "1.4"
required: false
description:
- with the path option can be used match a string in the file that must match before continuing. Defaults to a multiline regex.
notes: []
requirements: []
author: Jeroen Hoekx, John Jarvis
'''
EXAMPLES = '''
# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
- wait_for: port=8000 delay=10"
# wait until the file /tmp/foo is present before continuing
- wait_for: path=/tmp/foo
# wait until the string "completed" is in the file /tmp/foo before continuing
- wait_for: path=/tmp/foo search_regex=completed
'''
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default='127.0.0.1'),
timeout=dict(default=300),
connect_timeout=dict(default=5),
delay=dict(default=0),
port=dict(default=None),
path=dict(default=None),
search_regex=dict(default=None),
state=dict(default='started', choices=['started', 'stopped', 'present']),
),
)
params = module.params
host = params['host']
timeout = int(params['timeout'])
connect_timeout = int(params['connect_timeout'])
delay = int(params['delay'])
if params['port']:
port = int(params['port'])
else:
port = None
state = params['state']
path = params['path']
search_regex = params['search_regex']
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if state == 'stopped':
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(connect_timeout)
try:
s.connect( (host, port) )
s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(1)
except:
break
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
with open(path) as f:
if search_regex:
if re.search(search_regex, f.read(), re.MULTILINE):
break
else:
time.sleep(1)
else:
break
except IOError:
time.sleep(1)
pass
elif port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(connect_timeout)
try:
s.connect( (host, port) )
s.shutdown(socket.SHUT_RDWR)
s.close()
break
except:
time.sleep(1)
pass
else:
elapsed = datetime.datetime.now() - start
if port:
module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
......@@ -10,13 +10,15 @@
#
#
# Handlers for role analytics-server
#
#
# Overview:
#
#
#
- name: analytics-server | stop the analytics service
service: name=analytics state=stopped
tags: deploy
- name: analytics-server | start the analytics service
service: name=analytics state=started
tags: deploy
......@@ -10,13 +10,15 @@
#
#
# Handlers for role analytics
#
#
# Overview:
#
#
#
- name: analytics | stop the analytics service
service: name=analytics state=stopped
tags: deploy
- name: analytics | start the analytics service
service: name=analytics state=started
tags: deploy
---
- name: apache | restart apache
service: name=apache2 state=restarted
tags: deploy
WSGIPythonHome {{venv_dir}}
WSGIPythonHome {{ edxapp_venv_dir }}
WSGIRestrictEmbedded On
<VirtualHost *:{{apache_port}}>
......@@ -15,9 +15,9 @@ WSGIRestrictEmbedded On
SetEnv SERVICE_VARIANT lms
WSGIScriptAlias / {{edx_platform_code_dir}}/lms/wsgi_apache_lms.py
WSGIScriptAlias / {{ edxapp_code_dir }}/lms/wsgi_apache_lms.py
<Directory {{edx_platform_code_dir}}/lms>
<Directory {{ edxapp_code_dir }}/lms>
<Files wsgi_apache_lms.py>
Order deny,allow
Allow from all
......@@ -39,7 +39,7 @@ WSGIRestrictEmbedded On
require valid-user
</Location>
WSGIDaemonProcess lms user=www-data group=adm processes=1 python-path={{edx_platform_code_dir}}:{{venv_dir}}/lib/python2.7/site-packages display-name=%{GROUP}
WSGIDaemonProcess lms user=www-data group=adm processes=1 python-path={{ edxapp_code_dir }}:{{ edxapp_venv_dir }}/lib/python2.7/site-packages display-name=%{GROUP}
WSGIProcessGroup lms
WSGIApplicationGroup %{GLOBAL}
......@@ -48,4 +48,4 @@ WSGIRestrictEmbedded On
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx
CustomLog ${APACHE_LOG_DIR}/apache-edx-access.log apache-edx
</VirtualHost>
\ No newline at end of file
</VirtualHost>
browser_deb_pkgs:
- xvfb
- dbus-x11
- libgconf2-4
- libxss1
- libnss3-1d
- libcurl3
- xdg-utils
- gdebi
# Debian packages we host in S3 to ensure correct browser version
# Both Chrome and FireFox update their apt repos with the latest version,
# which often causes spurious acceptance test failures.
browser_s3_deb_pkgs:
- { name: "google-chrome-stable_30.0.1599.114-1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_30.0.1599.114-1_amd64.deb" }
- { name: "firefox_25.0+build3-0ubuntu0.12.04.1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_25.0%2Bbuild3-0ubuntu0.12.04.1_amd64.deb" }
# Chrome and ChromeDriver
chromedriver_version: 2.6
chromedriver_url: "http://chromedriver.storage.googleapis.com/{{ chromedriver_version }}/chromedriver_linux64.zip"
browser_xvfb_display: ":1"
# Install browsers required to run the JavaScript
# and acceptance test suite locally without a display
---
- name: browsers | install system packages
apt: pkg={{','.join(browser_deb_pkgs)}}
state=present update_cache=yes
- name: browsers | download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb
with_items: "{{ browser_s3_deb_pkgs }}"
- name: browsers | install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed
with_items: "{{ browser_s3_deb_pkgs }}"
- name: browsers | Install ChromeDriver
get_url:
url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: browsers | Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp
- name: browsers | Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755
- name: browsers | create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: browsers | start xvfb
shell: start xvfb
ignore_errors: yes
description "Xvfb X Server"
start on (net-device-up
and local-filesystems
and runlevel [2345])
start on (net-device-up and local-filesystems and runlevel [2345])
stop on runlevel [016]
exec /usr/bin/Xvfb :1 -screen 0 1024x768x24
exec /usr/bin/Xvfb {{ browser_xvfb_display }} -screen 0 1024x768x24
respawn
respawn limit 15 5
\ No newline at end of file
respawn limit 15 5
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role certs
#
CERTS_QUEUE_URL: "http://localhost:18040"
CERTS_BUCKET: ""
# basic auth credentials for connecting
# to the xqueue server
CERTS_XQUEUE_AUTH_USER: "edx"
CERTS_XQUEUE_AUTH_PASS: "edx"
# credentials for connecting to the xqueue server
CERTS_QUEUE_USER: "lms"
CERTS_QUEUE_PASS: "password"
# AWS credentials for certificate upload
CERTS_AWS_KEY: ""
CERTS_AWS_ID: ""
# GPG key ID, defaults to the dummy key
CERTS_KEY_ID: "FEF8D954"
# Path to git identity file for pull access to
# the edX certificates repo - REQUIRED
# Example - {{ secure_dir }}/files/git-identity
CERTS_LOCAL_GIT_IDENTITY: !!null
# Path to public and private gpg key for signing
# the edX certificate. Default is a dummy key
CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt"
########## Internal role vars below
certs_user: certs
certs_app_dir: "{{ COMMON_APP_DIR }}/certs"
certs_code_dir: "{{ certs_app_dir }}/certificates"
certs_venvs_dir: "{{ certs_app_dir }}/venvs"
certs_venv_dir: "{{ certs_venvs_dir }}/certs"
certs_venv_bin: "{{ certs_venv_dir }}/bin"
certs_git_ssh: /tmp/git_ssh.sh
certs_git_identity: "{{ certs_app_dir }}/git-identity"
certs_requirements_file: "{{ certs_code_dir }}/requirements.txt"
certs_repo: "git@github.com:/edx/certificates"
certs_version: 'master'
certs_gpg_dir: "{{ certs_app_dir }}/gnupg"
certs_env_config:
# CERTS_DATA is legacy, not used
CERT_DATA: {}
QUEUE_NAME: "certificates"
QUEUE_URL: $CERTS_QUEUE_URL
CERT_BUCKET: $CERTS_BUCKET
# gnupg signing key
CERT_KEY_ID: $CERTS_KEY_ID
LOGGING_ENV: ""
CERT_GPG_DIR: $certs_gpg_dir
certs_auth_config:
QUEUE_USER: $CERTS_QUEUE_USER
QUEUE_PASS: $CERTS_QUEUE_PASS
QUEUE_AUTH_USER: $CERTS_XQUEUE_AUTH_USER
QUEUE_AUTH_PASS: $CERTS_XQUEUE_AUTH_PASS
CERT_KEY_ID: $CERTS_KEY_ID
CERT_AWS_ID: $CERTS_AWS_ID
CERT_AWS_KEY: $CERTS_AWS_KEY
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1.4.11 (GNU/Linux)
lQOYBFJwVOkBCAC4heT6+P1sGgITAB5C+hKNr4RACS47K1nxgIiEqiFMIycluDmM
4kdqFInzDK8GHF2W5KijZmYf7LrWIg4+PmnyYAB7cO+eJUDfTE7n7bjGQL3LohJN
FTlRsXKOKGWoBqlytE3D16lQIIp0JkqB9sHO3Y9yOgEsSy3cMWKtT8U6qx40xV+e
t0FYmqL7pBE7OFfvCIe7+kthsTqFys/jkRNFvbSo5fjA1m9ubjEJqqfnhuvLaL5O
YHGe1nKQRLi45gmZ1JYvxfZrWUO2BeulNY/mvAFQnRNRRiWfM3Ic4Ya9Wv62wS3p
dYY4HEtDQDyKpOkJ2R31+1FhZYIKJTYR89jxABEBAAEAB/wOApyQMbeMLa1ao/eo
PjSKbXktI4VPGMuLeqbi68f7b+/Y/VPhToz9kPGocp4XaK/ydQoY3f2DDwZgm9VZ
BIQm0wM2XCzVZR631aNoGLSe2OuQOo4JLENd4ItCH+8YAul6vBXreMRyQQZCK2Yc
2A9/FXN+yMiuBEdHILjNT/E5swNm0J85YlXpIW6Jm3aR6OjzfFS/j+7AEDSL5MZX
JotfGjYXuC1MOw5YJZKWkQBz+5IaVceOd9s8TlZFq/eYrN5sqAWh06CBY+Zye3fg
/WiWFUdTgpG81lbAXGxHrjQ5f22saOzkbv0FjdEfx1M9Wcj6OAIRXI7k8EkZJia1
IYEBBADYRvRE2zyR5M72MfCX1XUOpx/9OZCrVsYoKqp1BORXjt58Szs7UFdeAXE+
bPzbpcjENiVYVjoeQKCNTU78gzjD+NzkfTdsF7rrvXObo6NpChTCOdQfg56Ll+3y
3nUDKIcFXsYP1NIC2SL0APcpUtDLPIWb0XRnlvBQlakmnyb7bQQA2mnn15LVRK0J
1wYZiSwrRIcE7X+zy6t8iERr+E0jIyQQV4vaOYItCDTP8fzNiiX10Nkt5imRqML3
NBPs0jInjmYxMmzvjVxyUDv4rGGbiXXeh+W1v/mweMH2pbiItjhyeSVt4U2l6GKI
Ob/K+khx1ftfOTktLTZVMPg6NzPRfRUD/jyLL92V7eoHshreqFIUcBmUdQnMVyz2
NBhci7RHGn3J84TTMCXBJL8MLUu9PKxfFcZjw1Wet8avX5oPjgl3OiQpjjx69sO5
S/UWpGaEOrz87j4VRGPb6zmegkZs44sQEfwhDJk6O1eQ5dYzniRC46nzMpETFfIF
U8m8bJrrus4HPEy0HmV4YW1wbGUga2V5IDx0ZXN0QGV4YW1wbGUuY29tPokBOAQT
AQIAIgUCUnBU6QIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQBECJtv74
2VSWKQf/f0PWmbGxdiBIK5gf2pmaK0aDDM09v7vedysIn/URnj26BMN/YEyDYnZS
BN+iuU6VartvEYlNeYiRAnaG/6gl7DJh2l2X/iuDn0xKT0GjqDpjh7n6964OKAz7
RHWADXqsr5BWms1EPFtDVnAJfN2A3cxTeA5vUUl41WvCJQa7L8Bw7SezkS0yn8Rn
u4icNKyew7TrFofIydws6LTM1DhHpCB32z6b7HHt85OOzpuUm07HP83S59lxBp6x
x3NH9AH/WPeXiS5QRh1jP6qzUAHoHQpsV2XonmC4JXl+ZFxNyZeJ000ldDFfEHrO
RLg3d5GkZ1pDVDn3HlZ+SKqYilRXCJ0DmARScFTpAQgA1KGTRGcqwla5/VOuHwxw
ABpLYdpsetYoOORjJQvHakG8QBchxsJVniBijD09gFmHYpdSJaeHnvqkeHGO1fJa
E4QxS4AYt/HVoi86RhBLD/Gr0/DWC/0XUV5613PSmWkYCCTgWLaxT9MpPjtGVd4v
L6Iv/d8Go/Wrq55zCl82PTA7ao4PxSSxlforfZOZqsJ/pzjCRkF6Z7co+LO24KSl
Lt4iN2vwJ2VhvOrMFuV91WQeEJWdTX+yx035eU/MFu9u243CE0UGNzWHjYLpgBxl
Pg0W5GFRZM/LYkXAfHAM4/Ic2ex2LQ0RLiH4i0FbzoSwjvz586v2Sagc5nsYMoGu
gQARAQABAAf/W6W+23taJ0SJSuLACJrsRWcP+b/TBQj8cjUidKvEioyFztwJj2lg
zNSplUeqFAHCxGBzpE42uvPOYymTBq08XPAb7S5ruREP4yVXCS7po5gnVyUVpToz
zDscWHQQIFZ3aL84QZSRDVZ3Dt8unEE1dmMCK3rvGkl/8mtLq3tJXgp7/wdsK4G0
3AuJVQ918XlozNTayGfdCPhWicE7fv5peUlWRWlSuSNmTrHiAbysd2xwXnMq/OdQ
Q+z7ogQHhUvQQ+31msGlcCJQqqWr250/HBrTATrRJNIVvvzCgpw3/6r99MNwlSWV
ZhDotwf745fdzZiwdgJ04nhEj9QmKynKXwQA29p7DmMWMZg61qU481YNWgc5RMjL
ADUS2iC5nr/Y5HuAsGWj9ZkDvRXKSyexkZ+OXi6EonGCGjCNONPaB8JWRO7LssEc
VG+lPp4mwASE38cjfFy7DdEGpxn3eZPDsNwv7vnWhNyGSh8FXKoYXyZiJ6F3zvkU
aWwfaTtxVplfn88EAPeXHLkwl+D8zkk8ILYnsJKEKjcqUwiQ6L7JMEhd+GVo5xR+
WUDdhnmEkH/QZZt8zTpYL3Hl3JsqQYidq0uzy39qg+cVvD9yJkHP4KMAqe9QcRYR
eQvpopMYt5va7pyaebZbpxfP9M7Y2/5VT59GBO6uHy4CMR1uM4Z50QA0kZCvA/oD
D/9qEaWzwqLtXjN1iRxOv7ioor8ExvA/8HY8xtCsCLuFuo9P44xtYzSCzLdoOYCE
4Lrn7DeE2hXEoq/2VEWoRS4+kU1vUBIJEAxfHk6HozA0apFPqm8ODH44s68VRTce
pGwORxsFhMHw1/m5A1RBZF8UP7VXFxluYuwx6S5NyjSfiQEfBBgBAgAJBQJScFTp
AhsMAAoJEARAibb++NlUojkIAKHwS1VSeW6fgWv7H2qaTjdMeNG7vXUYKUE7KMpQ
UmvdHobMfbO9SEgihwG+WdgPy96RlYx5PuVfeWkPVdVsbrU9BuR+9qdYyGGH4FvP
qAaruT3dFLRFvDj/ta94gDFGCH1LrtGI/t78wjjIEd8QOGIj+8Uo1Z6HKExSsNuG
+8usut6je50a2CsAyoZtrPmybZdkU6eOuM5ZSGDpgfTlFNpeK3sf7CTnYA5NTLPC
wWbyCxUb7EUrch+StmJWsIzS4mClMd6nB4480FwwhGbdFejSF20z64c6hbxuwgfS
nyXklWktEX0d5T7wdAi+UOvNsdoigzUMWpBoo07VOlzjMFU=
=iNqX
-----END PGP PRIVATE KEY BLOCK-----
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.11 (GNU/Linux)
mQENBFJwVOkBCAC4heT6+P1sGgITAB5C+hKNr4RACS47K1nxgIiEqiFMIycluDmM
4kdqFInzDK8GHF2W5KijZmYf7LrWIg4+PmnyYAB7cO+eJUDfTE7n7bjGQL3LohJN
FTlRsXKOKGWoBqlytE3D16lQIIp0JkqB9sHO3Y9yOgEsSy3cMWKtT8U6qx40xV+e
t0FYmqL7pBE7OFfvCIe7+kthsTqFys/jkRNFvbSo5fjA1m9ubjEJqqfnhuvLaL5O
YHGe1nKQRLi45gmZ1JYvxfZrWUO2BeulNY/mvAFQnRNRRiWfM3Ic4Ya9Wv62wS3p
dYY4HEtDQDyKpOkJ2R31+1FhZYIKJTYR89jxABEBAAG0HmV4YW1wbGUga2V5IDx0
ZXN0QGV4YW1wbGUuY29tPokBOAQTAQIAIgUCUnBU6QIbAwYLCQgHAwIGFQgCCQoL
BBYCAwECHgECF4AACgkQBECJtv742VSWKQf/f0PWmbGxdiBIK5gf2pmaK0aDDM09
v7vedysIn/URnj26BMN/YEyDYnZSBN+iuU6VartvEYlNeYiRAnaG/6gl7DJh2l2X
/iuDn0xKT0GjqDpjh7n6964OKAz7RHWADXqsr5BWms1EPFtDVnAJfN2A3cxTeA5v
UUl41WvCJQa7L8Bw7SezkS0yn8Rnu4icNKyew7TrFofIydws6LTM1DhHpCB32z6b
7HHt85OOzpuUm07HP83S59lxBp6xx3NH9AH/WPeXiS5QRh1jP6qzUAHoHQpsV2Xo
nmC4JXl+ZFxNyZeJ000ldDFfEHrORLg3d5GkZ1pDVDn3HlZ+SKqYilRXCLkBDQRS
cFTpAQgA1KGTRGcqwla5/VOuHwxwABpLYdpsetYoOORjJQvHakG8QBchxsJVniBi
jD09gFmHYpdSJaeHnvqkeHGO1fJaE4QxS4AYt/HVoi86RhBLD/Gr0/DWC/0XUV56
13PSmWkYCCTgWLaxT9MpPjtGVd4vL6Iv/d8Go/Wrq55zCl82PTA7ao4PxSSxlfor
fZOZqsJ/pzjCRkF6Z7co+LO24KSlLt4iN2vwJ2VhvOrMFuV91WQeEJWdTX+yx035
eU/MFu9u243CE0UGNzWHjYLpgBxlPg0W5GFRZM/LYkXAfHAM4/Ic2ex2LQ0RLiH4
i0FbzoSwjvz586v2Sagc5nsYMoGugQARAQABiQEfBBgBAgAJBQJScFTpAhsMAAoJ
EARAibb++NlUojkIAKHwS1VSeW6fgWv7H2qaTjdMeNG7vXUYKUE7KMpQUmvdHobM
fbO9SEgihwG+WdgPy96RlYx5PuVfeWkPVdVsbrU9BuR+9qdYyGGH4FvPqAaruT3d
FLRFvDj/ta94gDFGCH1LrtGI/t78wjjIEd8QOGIj+8Uo1Z6HKExSsNuG+8usut6j
e50a2CsAyoZtrPmybZdkU6eOuM5ZSGDpgfTlFNpeK3sf7CTnYA5NTLPCwWbyCxUb
7EUrch+StmJWsIzS4mClMd6nB4480FwwhGbdFejSF20z64c6hbxuwgfSnyXklWkt
EX0d5T7wdAi+UOvNsdoigzUMWpBoo07VOlzjMFU=
=WP59
-----END PGP PUBLIC KEY BLOCK-----
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role certs
#
# Overview:
#
- name: certs | restart certs
supervisorctl: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
tags: deploy
---
- name: certs | create certificate application config
template: >
src=certs.env.json.j2
dest={{ certs_app_dir }}/env.json
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
tags: deploy
- name: certs | create certificate auth file
template: >
src=certs.auth.json.j2
dest={{ certs_app_dir }}/auth.json
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
tags: deploy
- name: certs | writing supervisor script for certificates
template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644
notify: certs | restart certs
tags: deploy
- name: certs | create ssh script for git
template: >
src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }}
owner={{ certs_user }} mode=750
notify: certs | restart certs
tags: deploy
- name: certs | install read-only ssh key for the certs repo
copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600
notify: certs | restart certs
tags: deploy
- name: certs | checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }}
sudo_user: "{{ certs_user }}"
environment:
GIT_SSH: "{{ certs_git_ssh }}"
notify: certs | restart certs
tags: deploy
- name: certs | remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent
notify: certs | restart certs
tags: deploy
- name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
sudo_user: "{{ certs_user }}"
notify: certs | restart certs
tags: deploy
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: certs | update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
- name: certs | ensure certs has started
supervisorctl: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
sudo_user: "{{ supervisor_service_user }}"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role certs
#
# Overview:
#
# Installs the edX certificate server.
#
# The certificates repo is currently *not* public
# due to sensitive information in it, it may be made
# public in the future.
#
# Dependencies:
# - common
# - supervisor
#
#
# Example play:
#
# - roles:
# - common
# - supervisor
# - certs
#
- name: Checking to see if git identity is set
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY
- name: certs | create application user
user: >
name="{{ certs_user }}"
home="{{ certs_app_dir }}"
createhome=no
shell=/bin/false
notify: certs | restart certs
- name: certs | create certs app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ certs_user }}"
group="{{ common_web_group }}"
notify: certs | restart certs
with_items:
- "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}"
- name: certs | create certs gpg dir
file: >
path="{{ certs_gpg_dir }}" state=directory
owner="{{ certs_user }}" group="{{ certs_user }}"
mode=0700
notify: certs | restart certs
- name: certs | copy the private gpg signing key
copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
owner={{ certs_user }} mode=0600
notify: certs | restart certs
register: certs_gpg_key
- name: certs | load the gpg key
shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ certs_user }}"
when: certs_gpg_key.changed
notify: certs | restart certs
- include: deploy.yml
- name: certs | create a symlink for venv python
file: >
src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link
notify: certs | restart certs
with_items:
- python
- pip
{{ certs_auth_config | to_nice_json }}
[program:certs]
command={{ certs_venv_bin }}/python {{ certs_code_dir }}/certificate_agent.py
priority=999
environment=SERVICE_VARIANT="certs",HOME="/"
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
{{ certs_env_config | to_nice_json }}
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i /etc/git-identity "$@"
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ certs_git_identity }} "$@"
# Override these variables
# to change the base directory
# where edX is installed
COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
COMMON_LOG_DIR: "{{ COMMON_DATA_DIR }}/log"
# these directories contain
# symlinks for convenience
COMMON_BIN_DIR: "{{ COMMON_BASE_DIR }}/bin"
COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENV_NAME: 'default_env'
COMMON_ENV_TYPE: 'default_type'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
# do not include http/https
COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname
COMMON_HOSTNAME: !!null
common_debian_pkgs:
- ack-grep
- lynx-cur
......@@ -8,3 +31,16 @@ common_debian_pkgs:
- tree
- git
- unzip
- python2.7
- python-pip
- python2.7-dev
common_pip_pkgs:
- virtualenv
- virtualenvwrapper
common_web_user: www-data
common_web_group: www-data
common_log_user: syslog
common_git_ppa: "ppa:git-core/ppa"
......@@ -2,3 +2,4 @@
- name: common | restart rsyslogd
service: name=rsyslog state=restarted
sudo: True
tags: deploy
---
# Overview:
#
# Creates OS accounts for users based on their github credential.
# Expects to find a list in scope named GITHUB_USERS with
# the following structure:
#
# GITHUB_USERS:
# - user: me_at_github
# groups:
# - adm
# - user: otheruser
# groups:
# - users
#
- name: common | create local user for github user
user:
name={{ item.user }}
groups={{ ",".join(item.groups) }}
shell=/bin/bash
with_items: GITHUB_USERS
tags:
- users
- update
- name: common | create .ssh directory
file:
path=/home/{{ item.user }}/.ssh state=directory mode=0700
owner={{ item.user }} group={{ item.user }}
with_items: GITHUB_USERS
tags:
- users
- update
- name: common | copy github key[s] to .ssh/authorized_keys
get_url:
url=https://github.com/{{ item.user }}.keys
dest=/home/{{ item.user }}/.ssh/authorized_keys mode=0600
owner={{ item.user }} group={{ item.user }}
with_items: GITHUB_USERS
tags:
- users
- update
---
# create the 'edx' virtual environment in /opt so that roles can populate it
- name: common | Install python and pip
apt: pkg={{item}} install_recommends=yes state=present update_cache=yes
with_items:
- python2.7
- python-pip
- python2.7-dev
tags:
- pre_install
- install
- name: common | pip install virtualenv
pip: >
name=virtualenv
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- venv_base
- install
- name: common | pip install virtualenvwrapper
pip: >
name=virtualenvwrapper
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- venv_base
- install
- name: common | create edx virtualenv directory
file: path={{ venv_dir }} owner=ubuntu group=adm mode=2775 state=directory
tags:
- venv_base
- install
- name: common | create the edx virtualenv directory initial contents
command: /usr/local/bin/virtualenv {{ venv_dir }} --distribute creates=$venv_dir/bin/activate
tags:
- venv_base
- install
- name: common | pip install gunicorn
pip: >
name=gunicorn
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- gunicorn
- install
---
#- name: common | Install rsyslog configuration for ansible runs
# template: dest=/etc/rsyslog.d/90-edx.conf src=ansible_rsyslog.j2 owner=root group=root mode=644
# notify: common | restart rsyslogd
# tags:
# - lms-env
# - cms-env
# - logging
# - update
- name: common | Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd
tags:
- logging
- update
- name: common | Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
tags:
- logging
- update
- name: common | Touch tracking file into existence
command: touch -a {{log_base_dir}}/tracking.log creates={{log_base_dir}}/tracking.log
tags:
- logging
- update
- name: common | Set permissions on tracking file
file: path={{log_base_dir}}/tracking.log owner=syslog group=adm mode=640
tags:
- logging
- update
- name: common | Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
tags:
- logging
- update
---
- include: create_users.yml
- include: create_github_users.yml
when: GITHUB_USERS is defined
- name: common | Add user www-data
# This user should be created on the system by default
user: name=www-data
tags:
- pre_install
- update
# This is the default user for nginx
user: >
name="{{ common_web_user }}"
shell=/bin/false
- name: common | Create common directories
file: >
path={{ item }} state=directory owner=root
group=root mode=0755
with_items:
- "{{ COMMON_DATA_DIR }}"
- "{{ COMMON_APP_DIR }}"
- "{{ COMMON_BIN_DIR }}"
- "{{ COMMON_CFG_DIR }}"
- name: common | Create the base directory for storage
- name: common | Create common log directory
file: >
path={{ storage_base_dir }}
state=directory
owner=root
group=root
mode=0755
path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755
- name: common | Create application root
# In the future consider making group edx r/t adm
file: path={{ app_base_dir }} state=directory owner=root group=adm mode=2775
tags:
- pre_install
- update
# Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes
- name: common | Create upload directory
file: path={{ app_base_dir }}/uploads mode=2775 state=directory owner=root group=adm
tags:
- pre_install
- update
# Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors
- name: common | Add git apt repository
apt_repository: repo="{{ common_git_ppa }}"
- name: common | Create data dir
file: path={{ app_base_dir }}/data state=directory owner=www-data group=root
tags:
- pre_install
- update
- name: common | Create staticfiles dir
file: path={{ app_base_dir }}/staticfiles state=directory owner=www-data group=adm mode=2775
tags:
- pre_install
- update
- name: common | Install role-independent useful system packages
# do this before log dir setup; rsyslog package guarantees syslog user present
apt: pkg={{','.join(common_debian_pkgs)}} install_recommends=yes state=present update_cache=yes
tags:
- pre_install
- update
apt: >
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes
- name: common | Create log directory
file: path={{log_base_dir}} state=directory mode=2755 group=adm owner=syslog
tags:
- pre_install
- update
- name: common | upload sudo config for key forwarding as root
copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
validate='visudo -c -f %s' owner=root group=root mode=0440
- name: common | Create alias from app_base_dir to the log_base_dir
file: state=link src={{log_base_dir}} path={{app_base_dir}}/log
tags:
- pre_install
- logging
- update
- name: common | pip install virtualenv
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: common_pip_pkgs
- name: common | Create convenience link from log_base_dir to system logs
file: state=link src=/var/log path=$log_base_dir/system
tags:
- pre_install
- logging
- update
- name: common | Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd
- name: common | Touch edx log file into place
# This is done for the benefit of the rake commands, which expect it
command: touch -a {{log_base_dir}}/edx.log creates={{log_base_dir}}/edx.log
tags:
- pre_install
- logging
- install
- name: common | Set permissions on edx log file
# This is done for the benefit of the rake commands, which expect it
file: path={{log_base_dir}}/edx.log owner=syslog group=adm mode=640
tags:
- pre_install
- logging
- update
- name: common | Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
- name: common | upload sudo config for key forwarding as root
copy: src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward validate='visudo -c -f %s' owner=root group=root mode=0440
- include: create_venv.yml
- include: edx_logging_base.yml
- name: common | update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME
register: etc_hosts
- name: common | update /etc/hostname
template: src=hostname.j2 dest=/etc/hostname
when: COMMON_HOSTNAME
register: etc_hostname
- name: common | run hostname
shell: >
hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
---
- name: common | edx-update.sh, manual lms/cms update script
template: src=edx-update.sh.j2 dest=/usr/local/bin/edx-update.sh owner=ubuntu group=adm mode=0775
tags:
- release
- update
{{log_base_dir}}/*/edx.log {
{{ COMMON_LOG_DIR }}/*/edx.log {
create
compress
copytruncate
......
......@@ -27,12 +27,12 @@ auth,authpriv.* /var/log/auth.log
$template tracking,"%syslogtag%%msg%\n"
# looks for [service_name=<name>] in the beginning of the log message,
# if it exists the log will go into {{log_base_dir}}/<name>/edx.log, otherwise
# it will go into {{log_base_dir}}/edx.log
$template DynaFile,"{{log_base_dir}}/%syslogtag:R,ERE,1,BLANK:\[service_variant=([a-zA-Z_-]*)\].*--end%/edx.log"
# if it exists the log will go into {{ COMMON_LOG_DIR }}/<name>/edx.log, otherwise
# it will go into {{ COMMON_LOG_DIR }}/edx.log
$template DynaFile,"{{ COMMON_LOG_DIR }}/%syslogtag:R,ERE,1,BLANK:\[service_variant=([a-zA-Z_-]*)\].*--end%/edx.log"
local0.* -?DynaFile
local1.* {{log_base_dir}}/tracking.log;tracking
local1.* {{ COMMON_LOG_DIR }}/tracking.log;tracking
#cron.* /var/log/cron.log
#daemon.* -/var/log/daemon.log
kern.* -/var/log/kern.log
......
127.0.0.1 {{ COMMON_HOSTNAME }} localhost
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
......@@ -2,3 +2,4 @@
- name: datadog | restart the datadog service
service: name=datadog-agent state=restarted
tags: deploy
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role demo
#
demo_app_dir: "{{ COMMON_APP_DIR }}/demo"
demo_code_dir: "{{ demo_app_dir }}/edx-demo-course"
demo_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-demo-course.git"
demo_course_id: 'edX/Open_DemoX/edx_demo_course'
demo_version: "master"
demo_test_users:
- email: 'honor@example.com'
mode: honor
password: edx
- email: 'audit@example.com'
mode: audit
password: edx
- email: 'verified@example.com'
mode: verified
password: edx
---
- name: demo | check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
sudo_user: "{{ edxapp_user }}"
register: demo_checkout
tags: deploy
- name: demo | import demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
tags: deploy
- name: demo | create some test users and enroll them in the course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
with_items: demo_test_users
when: demo_checkout.changed
tags: deploy
- name: demo | create staff user
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
tags: deploy
- name: demo | add test users to the certificate whitelist
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
tags: deploy
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role demo
#
# Overview:
#
# Imports the demo course into studio
# https://github.com/edx/edx-demo-course
#
# Once imported this role will only re-import the course
# if the edx-demo-course repo has been updated
#
# Dependencies:
# - common
# - edxapp
#
# Example play:
#
# roles:
# - common
# - edxapp
# - demo
- name: demo | create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}"
- include: deploy.yml
---
devpi_venv_dir: "{{ app_base_dir }}/devpi/venvs/devpi"
devpi_app_dir: "{{ COMMON_APP_DIR }}/devpi"
devpi_data_dir: "{{ COMMON_DATA_DIR }}/devpi"
devpi_mirror_dir: "{{ devpi_data_dir }}/data"
devpi_log_dir: "{{ COMMON_LOG_DIR }}/devpi"
devpi_venvs_dir: "{{ devpi_app_dir }}/venvs"
devpi_venv_dir: "{{ devpi_venvs_dir }}/devpi"
devpi_venv_bin: "{{ devpi_venv_dir }}/bin"
devpi_pip_pkgs:
- devpi-server
- eventlet
devpi_nginx_port: 80
devpi_port: 4040
devpi_data_dir: /var/devpi/data
devpi_user: devpi
devpi_group: devpi
devpi_server_name: 'pypy.*'
devpi_supervisor_user: devpi.supervisor
devpi_supervisor_app_dir: "{{ devpi_app_dir }}/supervisor"
devpi_supervisor_cfg_dir: "{{ devpi_supervisor_app_dir }}/conf.d"
devpi_supervisor_data_dir: "{{ devpi_data_dir }}/supervisor"
devpi_supervisor_cfg: "{{ devpi_supervisor_app_dir }}/supervisord.conf"
devpi_supervisor_log_dir: "{{ devpi_log_dir }}/supervisor"
devpi_supervisor_venv_dir: "{{ devpi_app_dir }}/venvs/supervisor"
devpi_supervisor_venv_bin: "{{ devpi_supervisor_venv_dir }}/bin"
devpi_supervisor_ctl: "{{ devpi_supervisor_venv_bin }}/supervisorctl"
......@@ -13,13 +13,9 @@
---
- name: devpi | restart devpi
supervisorctl: >
state=restarted
config={{ supervisor_cfg }}
state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }}
config={{ devpi_supervisor_cfg }}
name=devpi-server
- name: devpi | start devpi
supervisorctl: >
state=started
config={{ supervisor_cfg }}
name=devpi-server
sudo_user: "{{ devpi_supervisor_user }}"
tags: deploy
---
dependencies:
- role: supervisor
supervisor_app_dir: "{{ devpi_supervisor_app_dir }}"
supervisor_data_dir: "{{ devpi_supervisor_data_dir }}"
supervisor_log_dir: "{{ devpi_supervisor_log_dir }}"
supervisor_venv_dir: "{{ devpi_supervisor_venv_dir }}"
supervisor_service_user: "{{ devpi_supervisor_user }}"
supervisor_service: "supervisor.devpi"
supervisor_http_bind_port: '9002'
......@@ -33,36 +33,83 @@
- name: devpi | create devpi user
user: >
name={{ devpi_user }}
state=present
shell=/bin/false createhome=no
notify: devpi | restart devpi
- name: devpi | create virtualenv directory
- name: devpi | create devpi application directories
file: >
path={{ devpi_venv_dir }}
path={{ item }}
state=directory
owner={{ devpi_user }}
group={{ devpi_group }}
group={{ devpi_supervisor_user }}
with_items:
- "{{ devpi_app_dir }}"
- "{{ devpi_venv_dir }}"
notify: devpi | restart devpi
- name: devpi | create the devpi data directory
- name: devpi | create the devpi data directory, needs write access by the service user
file: >
path={{ devpi_data_dir }}
path={{ item }}
state=directory
owner={{ devpi_user }}
group={{ devpi_group }}
owner={{ devpi_supervisor_user }}
group={{ devpi_user }}
with_items:
- "{{ devpi_data_dir }}"
- "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi
- name: devpi | install devpi pip pkgs
pip: >
name={{ item }}
state=present
state=present
virtualenv={{ devpi_venv_dir }}
sudo_user: "{{ devpi_user }}"
with_items: devpi_pip_pkgs
notify: devpi | restart devpi
- name: supervisor | ensure supervisor is started
service: name=supervisor state=started
- name: devpi | writing supervisor script
template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi
- name: devpi | create a symlink for venv python, pip
file: >
src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link
notify: devpi | restart devpi
with_items:
- python
- pip
- name: devpi | create a symlink for venv supervisor
file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link
- name: devpi | create a symlink for supervisor config
file: >
src="{{ devpi_supervisor_app_dir }}/supervisord.conf"
dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi
state=link
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: devpi | update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
tags: deploy
- name: devpi | ensure devpi is running
- name: devpi | ensure devpi is started
supervisorctl: >
state=started
config={{ supervisor_cfg }}
supervisorctl_path={{ devpi_supervisor_ctl }}
config={{ devpi_supervisor_cfg }}
name=devpi-server
sudo_user: "{{ devpi_supervisor_user }}"
[program:devpi-server]
command={{ devpi_venv_bin }}/devpi-server --port {{ devpi_port }} --serverdir {{ devpi_mirror_dir }}
user={{ devpi_supervisor_user }}
priority=999
stdout_logfile={{ devpi_supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ devpi_supervisor_log_dir }}/%(program_name)-stderr.log
autostart=True
killasgroup=true
stopasgroup=true
DISCERN_NGINX_PORT: 18070
DISCERN_BASIC_AUTH: False
DISCERN_MEMCACHE: [ 'localhost:11211' ]
DISCERN_AWS_ACCESS_KEY_ID: ""
DISCERN_AWS_SECRET_ACCESS_KEY: ""
DISCERN_BROKER_URL: ""
DISCERN_RESULT_BACKEND: ""
DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID: ""
DISCERN_MYSQL_DB_NAME: 'discern'
DISCERN_MYSQL_USER: 'root'
DISCERN_MYSQL_PASSWORD: ''
DISCERN_MYSQL_HOST: 'localhost'
DISCERN_MYSQL_PORT: '3306'
DISCERN_LANG: "en_US.UTF-8"
discern_app_dir: "{{ COMMON_APP_DIR }}/discern"
discern_code_dir: "{{ discern_app_dir }}/discern"
discern_data_dir: "{{ COMMON_DATA_DIR }}/discern"
discern_venvs_dir: "{{ discern_app_dir }}/venvs"
discern_venv_dir: "{{ discern_venvs_dir }}/discern"
discern_venv_bin: "{{ discern_venv_dir }}/bin"
discern_pre_requirements_file: "{{ discern_code_dir }}/pre-requirements.txt"
discern_post_requirements_file: "{{ discern_code_dir }}/requirements.txt"
discern_user: "discern"
discern_ease_venv_dir: "{{ discern_venv_dir }}"
discern_ease_code_dir: "{{ discern_app_dir }}/ease"
discern_ease_source_repo: https://github.com/edx/ease.git
discern_ease_version: 'HEAD'
discern_ease_pre_requirements_file: "{{ discern_ease_code_dir }}/pre-requirements.txt"
discern_ease_post_requirements_file: "{{ discern_ease_code_dir }}/requirements.txt"
discern_nltk_data_dir: "{{ discern_data_dir}}/nltk_data"
discern_source_repo: https://github.com/edx/discern.git
ease_source_repo: https://github.com/edx/ease.git
ease_dir: $app_base_dir/ease
discern_dir: $app_base_dir/discern
discern_settings: discern.aws
nltk_data_dir: /usr/share/nltk_data
ease_branch: master
discern_branch: dev
discern_branch: master
discern_gunicorn_port: 8070
discern_gunicorn_host: 127.0.0.1
discern_user: discern
site_name: discern
discern_worker_mult: 2
discern_env_config:
ACCOUNT_EMAIL_VERIFICATION: "mandatory"
AWS_SES_REGION_NAME: "us-east-1"
DEFAULT_FROM_EMAIL: "registration@example.com"
DNS_HOSTNAME: ""
ELB_HOSTNAME: ""
EMAIL_BACKEND: "django.core.mail.backends.smtp.EmailBackend"
S3_BUCKETNAME: ""
USE_S3_TO_STORE_MODElS: false
discern_auth_config:
AWS_ACCESS_KEY_ID: $DISCERN_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $DISCERN_SECRET_ACCESS_KEY
BROKER_URL: $DISCERN_BROKER_URL
CACHES:
default:
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
LOCATION: $DISCERN_MEMCACHE
CELERY_RESULT_BACKEND: $DISCERN_RESULT_BACKEND
DATABASES:
default:
ENGINE: django.db.backends.mysql
HOST: $DISCERN_MYSQL_HOST
NAME: $DISCERN_MYSQL_DB_NAME
PASSWORD: $DISCERN_MYSQL_PASSWORD
PORT: $DISCERN_MYSQL_PORT
USER: $DISCERN_MYSQL_USER
GOOGLE_ANALYTICS_PROPERTY_ID: $DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID
discern_debian_pkgs:
- policykit-1
- python-virtualenv
- gcc
- g++
- build-essential
- python-dev
- gfortran
- libfreetype6-dev
- libpng12-dev
- libxml2-dev
- libxslt1-dev
- libreadline6
- libreadline6-dev
- redis-server
- python-pip
- ipython
- nginx
- libmysqlclient-dev
- libblas3gf
- libblas-dev
- liblapack3gf
- liblapack-dev
- libatlas-base-dev
- curl
- yui-compressor
discern_ease_debian_pkgs:
- python-pip
- gcc
- g++
- gfortran
- libblas3gf
- libblas-dev
- liblapack3gf
- liblapack-dev
- libatlas-base-dev
- libxml2-dev
- libxslt1-dev
- aspell
- python
---
- name: discern | restart discern
service: name=discern state=restarted
- name: discern | restart celery
service: name=celery state=restarted
supervisorctl: >
name=discern
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
with_items:
- discern
- discern_celery
tags: deploy
---
- name: discern | create supervisor scripts - discern, discern_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644
sudo_user: "{{ supervisor_user }}"
with_items: ['discern', 'discern_celery']
#Upload config files for django (auth and env)
- name: discern | create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- name: discern | create discern auth file auth.json
template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- name: discern | git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_branch }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
- name: discern | git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
#Numpy has to be a pre-requirement in order for scipy to build
- name : install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
with_items:
- "{{ discern_pre_requirements_file }}"
- "{{ discern_ease_pre_requirements_file }}"
tags:
- deploy
- name : install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
with_items:
- "{{ discern_post_requirements_file }}"
- "{{ discern_ease_post_requirements_file }}"
tags:
- deploy
- name: discern | install ease python package
shell: >
{{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install
notify:
- discern | restart discern
tags:
- deploy
#Needed for the ease package to work
- name: discern | install nltk data using rendered shell script
shell: >
{{ discern_venv_dir }}/bin/python -m nltk.downloader -d {{ discern_nltk_data_dir }} all
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment
- name: discern | django syncdb migrate and collectstatic for discern
shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
with_items:
- syncdb
- migrate
- collectstatic
tags:
- deploy
#Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern
shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
# have changed.
#
- name: discern | update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
tags: deploy
- name: discern | ensure discern, discern_celery has started
supervisorctl: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
with_items:
- discern
- discern_celery
tags: deploy
---
#Create the templates for upstart services
- name: discern | render celery service from template
template: src=celery.conf.j2 dest=/etc/init/celery.conf owner=root group=edx mode=0664
notify: discern | restart celery
- name: discern | render discern service from template
template: src=discern.conf.j2 dest=/etc/init/discern.conf owner=root group=edx mode=0664
notify: discern | restart discern
#Allows us to recover from a bad sudoers file
- name: discern | Install policykit
apt: pkg=policykit-1 install_recommends=yes state=present update_cache=yes
#Discern user is admin
- name: discern | Create discern user
user: name={{ discern_user }} append=yes groups="adm,edx" shell=/bin/bash
- name: discern | upload sudoers template to /tmp/sudoers
copy: src=sudoers-discern dest=/tmp/{{site_name}} owner=root group=root mode=0440
#Verify file and move to sudoers.d folder
- name: discern | move temp file to sudoers.d
shell: visudo -q -c -f /tmp/{{site_name}} && cp /tmp/{{site_name}} /etc/sudoers.d/{{site_name}}
#Upload config files for django (auth and env)
- name: discern | create discern application config env.json file
template: src=env.json.j2 dest={{app_base_dir}}/env.json owner={{discern_user}} group=edx mode=0640
- name: discern | create application user
user: >
name="{{ discern_user }}"
home="{{ discern_app_dir }}"
createhome=no
shell=/bin/false
notify:
- discern | restart celery
- discern | restart discern
- name: discern | create discern auth file auth.json
template: src=auth.json.j2 dest={{app_base_dir}}/auth.json owner={{discern_user}} group=edx mode=0640
- name: discern | create discern app dirs owned by discern
file: >
path="{{ item }}"
state=directory
owner="{{ discern_user }}"
group="{{ common_web_group }}"
notify:
- discern | restart celery
- discern | restart discern
#Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1
- name: discern | set permissions on app_base_dir sgid for edx
file: path={{app_base_dir}} owner=root group=edx mode=2775 state=directory
file: path={{venv_dir}} owner=root group=edx mode=2775 state=directory
- name: discern | Install git so that we can clone repos
apt: pkg=git install_recommends=yes state=present
#Create directories for repos
- name: discern | create discern and ease directories and set permissions
file: path={{item}} owner={{discern_user}} group=edx mode=2775 state=directory
with_items:
- ${discern_dir}
- ${ease_dir}
#Grab both repos or update
- name: discern | git checkout discern repo into discern_dir
git: dest={{discern_dir}} repo={{discern_source_repo}} version={{discern_branch}}
- "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}"
- name: discern | create discern data dir, owned by {{ common_web_user }}
file: >
path="{{ discern_data_dir }}" state=directory
owner="{{ common_web_user }}" group="{{ discern_user }}"
mode=0775
notify:
- discern | restart celery
- discern | restart discern
- name: discern | git checkout ease repo into ease_dir
git: dest={{ease_dir}} repo={{ease_source_repo}} version={{ease_branch}}
- name: discern | install debian packages that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart celery
- discern | restart discern
with_items: discern_debian_pkgs
#Install system packages
- name: discern | install discern and ease apt packages
command: xargs -a {{item}}/apt-packages.txt apt-get install -y
with_items:
- ${discern_dir}
- ${ease_dir}
#Numpy has to be a pre-requirement in order for scipy to build
- name : install python pre-requirements for discern and ease
pip: requirements="{{item}}/pre-requirements.txt" virtualenv="{{venv_dir}}" state=present
with_items:
- ${discern_dir}
- ${ease_dir}
- name : install python requirements for discern and ease
pip: requirements="{{item}}/requirements.txt" virtualenv="{{venv_dir}}" state=present
with_items:
- ${discern_dir}
- ${ease_dir}
- name: discern | install ease python package
shell: command="{{venv_dir}}/bin/activate; cd {{ease_dir}}; python setup.py install"
- name: discern | install debian packages for ease that discern needs
apt: pkg={{ item }} state=present
notify:
- discern | restart discern
with_items: discern_ease_debian_pkgs
#Needed for the ease package to work
- name: discern | install nltk data using rendered shell script
shell: command="{{venv_dir}}/bin/python -m nltk.downloader -d {{nltk_data_dir}} all"
- name: discern | copy sudoers file for discern
copy: >
src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root
notify:
- discern | restart discern
- name: discern | set permissions on nltk data directory
file: path={{nltk_data_dir}} owner={{discern_user}} group=edx mode=2775 state=directory
#Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1
notify:
- discern | restart discern
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment
- name: discern | django syncdb, migrate, and collectstatic for discern
shell: ${venv_dir}/bin/python {{discern_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_dir}}
with_items:
- syncdb
- migrate
- collectstatic
- include: deploy.yml
#Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern
shell: ${venv_dir}/bin/python {{discern_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_dir}}
- name: discern | create a symlink for venv python
file: >
src="{{ discern_venv_bin }}/python"
dest={{ COMMON_BIN_DIR }}/python.discern
state=link
{{ auth_config | to_nice_json }}
\ No newline at end of file
{{ discern_auth_config | to_nice_json }}
......@@ -11,7 +11,7 @@ respawn limit 3 30
env DJANGO_SETTINGS_MODULE={{discern_settings}}
chdir {{discern_dir}}
chdir {{ discern_code_dir }}
setuid {{discern_user}}
exec {{venv_dir}}/bin/python {{discern_dir}}/manage.py celeryd --loglevel=info --settings={{discern_settings}} --pythonpath={{discern_dir}} -B --autoscale={{ ansible_processor_cores * 2 }},1
exec {{ discern_venv_dir }}/bin/python {{ discern_code_dir }}/manage.py celeryd --loglevel=info --settings={{ discern_settings }} --pythonpath={{ discern_code_dir }} -B --autoscale={{ ansible_processor_cores * 2 }},1
# gunicorn
description "ML API Server"
author "Vik Paruchuri <vik@edx.org>"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
respawn limit 3 30
env PID=/var/run/gunicorn/discern.pid
env WORKERS={{ ansible_processor_cores * 2 }}
env PORT={{ discern_gunicorn_port }}
env ADDRESS={{ discern_gunicorn_host }}
env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE={{discern_settings}}
chdir {{discern_dir}}
setuid {{discern_user}}
exec {{venv_dir}}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=30 --pythonpath={{discern_dir}} discern.wsgi
[program:discern]
{% if ansible_processor|length > 0 %}
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ ansible_processor|length * discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi
{% else %}
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ discern_code_dir }}
environment=LANG={{ DISCERN_LANG }},DJANGO_SETTINGS_MODULE={{ discern_settings }},SERVICE_VARIANT=discern
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
[program:discern_celery]
command={{ discern_venv_bin }}/python {{ discern_code_dir }}/manage.py celeryd --loglevel=info --settings=discern.aws --pythonpath={{ discern_code_dir }} -B --autoscale=4,1 --schedule={{ discern_data_dir }}/celerybeat-schedule
user={{ common_web_user }}
directory={{ discern_code_dir }}
environment=DJANGO_SETTINGS_MODULE=discern.aws,SERVICE_VARIANT=discern
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
{{ env_config | to_nice_json }}
\ No newline at end of file
{{ discern_env_config | to_nice_json }}
---
- name: start edxapp
service: name=edxapp state=started
tags:
- lms
- lms-preview
- cms
- deploy
- name: edxapp | restart edxapp
supervisorctl: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
when: not devstack
sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled
tags: deploy
- name: stop edxapp
service: name=edxapp state=stopped
tags:
- lms
- lms-preview
- cms
- deploy
- name: restart edxapp
service: name=edxapp state=restarted
tags:
- lms
- lms-preview
- cms
- deploy
- name: edxapp | restart edxapp_workers
supervisorctl: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: celery_worker is defined and not devstack
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
tags: deploy
---
dependencies:
- role: rbenv
rbenv_user: "{{ edxapp_user }}"
rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment