Commit 0ba07131 by Feanil Patel

Merge pull request #923 from edx/feanil/release_himbasha

Feanil/release himbasha
parents 7bc2c211 4e1593eb
# Travis CI configuration file for running tests
language: python
python:
- "2.7"
install:
- "sudo apt-get install -y npm python-demjson"
- "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson"
- "sudo npm install -g js-yaml"
script:
- |
for yml in $(find . -name "*.yml"); do
js-yaml $yml >/dev/null
if [[ $? -ne 0 ]]; then
echo "ERROR parsing $yml"
exit 1
fi
done
- |
for json in $(find . -name "*.json"); do
jsonlint -v $json
if [[ $? -ne 0 ]]; then
echo "ERROR parsing $json"
exit 1
fi
done
- |
plays="aws bastion certs commoncluster common demo devpi discern edx_ansible edxapp elasticsearch forum ora rabbitmq worker xqueue xserver"
set -e
cd playbooks/edx-east
for play in $plays; do
ansible-playbook -i localhost, --syntax-check ${play}.yml
done
......@@ -711,6 +711,11 @@
},
{
"IpProtocol":"tcp",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "JenkinsServerPort" },
"ToPort":{ "Ref": "JenkinsServerPort" },
"CidrIp":"0.0.0.0/0"
......
......@@ -156,15 +156,19 @@ class Ec2Inventory(object):
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
if self.args.tags_only:
to_check = self.cache_path_tags
else:
to_check = self.cache_path_cache
if os.path.isfile(to_check):
mod_time = os.path.getmtime(to_check)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
......@@ -215,15 +219,18 @@ class Ec2Inventory(object):
# Cache related
cache_path = config.get('ec2', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--tags-only', action='store_true', default=False,
help='only return tags (default: False)')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
......@@ -247,9 +254,12 @@ class Ec2Inventory(object):
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
if self.args.tags_only:
self.write_to_cache(self.inventory, self.cache_path_tags)
else:
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
......@@ -266,13 +276,13 @@ class Ec2Inventory(object):
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
instances = sorted(reservation.instances)
for instance in instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
......@@ -349,7 +359,7 @@ class Ec2Inventory(object):
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
......@@ -403,10 +413,10 @@ class Ec2Inventory(object):
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
......@@ -541,8 +551,10 @@ class Ec2Inventory(object):
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
if self.args.tags_only:
cache = open(self.cache_path_tags, 'r')
else:
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
......@@ -556,7 +568,9 @@ class Ec2Inventory(object):
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
'''
Writes data in JSON format to a file
'''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
......@@ -574,7 +588,8 @@ class Ec2Inventory(object):
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if self.args.tags_only:
data = [key for key in data.keys() if 'tag_' in key]
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
......
- name: Deploy aws
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- name: Deploy bastion
hosts: all
sudo: True
gather_facts: True
roles:
- bastion
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- certs
- role: datadog
when: enable_datadog
......
......@@ -5,8 +5,6 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- role: datadog
......
# ansible-playbook -i ec2.py commoncluster.yml --extra-vars "deployment=edge env=stage" -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: tag_play_commoncluster:&tag_environment_{{ env }}:&tag_deployment_{{ deployment }}
sudo: True
serial: 1
roles:
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
#
# In order to reconfigure the host resolution we are issuing a
# reboot.
- hosts: tag_play_commoncluster:&tag_environment_{{ env }}:&tag_deployment_{{ deployment }}
sudo: True
serial: 1
vars:
reboot: False
tasks:
- name: reboot
command: /sbin/shutdown -r now "Reboot is triggered by Ansible"
# This is a test play that creates all supported user
# types using the user role. Example only, not meant
# to be run on a real system
- name: Create all user types (test play)
hosts: all
sudo: True
gather_facts: False
vars_files:
- 'roles/edxapp/defaults/main.yml'
- 'roles/common/defaults/main.yml'
- 'roles/analytics-server/defaults/main.yml'
- 'roles/analytics/defaults/main.yml'
pre_tasks:
- fail: msg="You must pass a user into this play"
when: user is not defined
- name: give access with no sudo
set_fact:
CUSTOM_USER_INFO:
- name: "{{ user }}"
github: true
- name: test-admin-user
type: admin
- name: test-normal-user
- name: test-restricted-user-edxapp
type: restricted
sudoers_template: 99-edxapp-manage-cmds.j2
- name: test-restricted-user-anayltics
type: restricted
sudoers_template: 99-analytics-manage-cmds.j2
roles:
- role: user
user_info: "{{ CUSTOM_USER_INFO }}"
......@@ -33,27 +33,19 @@
vars_prompt:
# passwords use vars_prompt so they aren't in the
# bash history
- name: "edxapp_db_root_pass"
prompt: "Password for edxapp root mysql user (enter to skip)"
default: "None"
private: True
- name: "xqueue_db_root_pass"
prompt: "Password for xqueue root mysql user (enter to skip)"
default: "None"
private: True
- name: "ora_db_root_pass"
prompt: "Password for ora root mysql user (enter to skip)"
default: "None"
private: True
- name: "discern_db_root_pass"
prompt: "Password for discern root mysql user (enter to skip)"
default: "None"
- name: "db_root_pass"
prompt: "Password for root mysql user"
private: True
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: install python mysqldb module
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
sudo: yes
with_items:
- python-mysqldb
- name: create mysql databases for the edX stack
mysql_db: >
db={{ item[0] }}{{ item[1].db_name }}
......@@ -72,17 +64,58 @@
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
db_pass: "{{ db_root_pass }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ xqueue_db_root_pass }}"
db_pass: "{{ db_root_pass }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ ora_db_root_pass }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for db user
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
......@@ -99,19 +132,19 @@
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
......@@ -139,18 +172,18 @@
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
# Creates a single user on a server
# By default no super-user privileges
# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv"
# Create a user with sudo privileges
# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv" -e "give_sudo=true"
- name: Create a single user
hosts: all
sudo: True
gather_facts: False
pre_tasks:
- fail: msg="You must pass a user into this play"
when: not user
- set_fact:
gh_users:
- "{{ user }}"
when: user is not defined
- name: give access with no sudo
set_fact:
CUSTOM_USER_INFO:
- name: "{{ user }}"
github: true
when: give_sudo is not defined
- name: give access with sudo
set_fact:
CUSTOM_USER_INFO:
- name: "{{ user }}"
type: admin
github: true
when: give_sudo is defined
roles:
- gh_users
- role: user
user_info: "{{ CUSTOM_USER_INFO }}"
......@@ -5,10 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- demo
- demo
- role: datadog
when: enable_datadog
- role: splunkforwarder
......
......@@ -2,7 +2,5 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- devpi
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- discern
......
---
# dummy var file
# This file is needed as a fall through
# for vars_files
dummy_var: True
......@@ -7,6 +7,7 @@
migrate_db: "yes"
openid_workaround: True
roles:
- aws
- role: nginx
nginx_sites:
- cms
......
---
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_bastion
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_edxapp
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_version: 'sarina/install-datadog'
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_version: 'sarina/install-datadog'
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xserver
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - nginx
# - xserver
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_rabbitmq
# serial: 1
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - rabbitmq
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xqueue
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - nginx
# - xqueue
---
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_edxapp
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_version: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_version: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- role: nginx
nginx_sites:
- xserver
- xserver
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_rabbitmq
serial: 1
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- rabbitmq
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- role: 'mongo'
mongo_clustered: true
......@@ -67,9 +67,6 @@
- forum
nginx_default_sites:
- lms
# gh_users hash must be passed
# in as a -e variable
- gh_users
post_tasks:
- name: get instance id for elb registration
local_action:
......
......@@ -5,7 +5,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: 'mongo'
mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo
......@@ -14,7 +14,7 @@
# - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
# - "{{ secure_dir }}/vars/common/common.yml"
# roles:
# - gh_users
# - user
# - mongo
- hosts: first_in_tag_role_edxapp
sudo: True
......@@ -23,7 +23,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- datadog
- role: nginx
nginx_sites:
......@@ -44,7 +44,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- datadog
- role: nginx
nginx_sites:
......@@ -62,7 +62,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- datadog
- role: nginx
nginx_sites:
......@@ -81,7 +81,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: nginx
nginx_sites:
- xserver
......@@ -94,7 +94,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- rabbitmq
- splunkforwarder
- hosts: first_in_tag_role_xqueue
......@@ -103,7 +103,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: nginx
nginx_sites:
- xqueue
......@@ -116,7 +116,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: nginx
nginx_sites:
- xqueue
......@@ -128,7 +128,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- oraclejdk
- elasticsearch
- forum
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- lms
......
......@@ -5,6 +5,5 @@
- "{{ secure_dir }}/vars/stage/stage-edx.yml"
roles:
- common
- gh_users
- oraclejdk
- elasticsearch
......@@ -5,9 +5,9 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
enable_newrelic: True
roles:
- aws
- role: nginx
nginx_sites:
- forum
......@@ -16,3 +16,5 @@
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- role: newrelic
when: enable_newrelic
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- newrelic
......@@ -10,5 +10,4 @@
COMMON_DATA_DIR: "/mnt"
roles:
- common
- gh_users
- jenkins_master
# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENV_TYPE=stage secure_dir=/path/to/secure/dir"
# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENVIRONMENT=stage COMMON_DEPLOYMENT=edx secure_dir=/path/to/secure/dir"
- name: Deploy legacy_ora
hosts: all
sudo: True
gather_facts: True
vars:
ora_app_dir: '/opt/wwc'
ora_user: 'www-data'
serial: 1
vars_files:
- "{{secure_dir}}/vars/{{COMMON_ENVIRONMENT}}/legacy-ora.yml"
roles:
- legacy_ora
- splunkforwarder
- newrelic
......@@ -2,8 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
......
......@@ -2,7 +2,6 @@
hosts: all
sudo: True
gather_facts: False
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- rabbitmq
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@gh_users.yml" -e "@/path/to/secure/ansible/vars/hotg.yml" -e "@/path/to/configuration-secure/ansible/vars/common/common.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@/path/to/secure/ansible/vars/edx_admin.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_role_bastion
hosts: tag_play_bastion
sudo: True
gather_facts: False
roles:
- gh_users
- aws
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_role_admin
hosts: tag_play_admin
sudo: True
gather_facts: True
roles:
- common
- gh_users
- jenkins_master
- aws
- edx_ansible
- jenkins_admin
- hotg
- newrelic
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: edxapp
celery_worker: True
- role: datadog
......
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- xqueue
......
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- xserver
......
Readme
------
# Stanford Ansible Configuration Files
This directory has the live playbooks that we use here at Stanford to
maintain our instance of OpenEdX at [class.stanford.edu][c]. We check
......@@ -23,25 +22,34 @@ Other install docs:
[1]: https://docs.google.com/document/d/1ZDx51Jxa-zffyeKvHmTp_tIskLW9D9NRg9NytPTbnrA/edit#heading=h.iggugvghbcpf
Ansible Commands - Prod
-----------------------
## Ansible Commands - Prod
Generally we do installs as the "ubuntu" user. You want to make
sure that the stanford-deploy-20130415 ssh key is in your ssh agent.
ANSIBLE_EC2_INI=ec2.ini ansible-playbook prod-log.yml -u ubuntu -c ssh -i ./ec2.py
ANSIBLE_CONFIG=prod-ansible.cfg ANSIBLE_EC2_INI=prod-ec2.ini ansible-playbook prod-app.yml -e "machine=app4" -u ubuntu -c ssh -i ./ec2.py
Some specifics:
Ansible Commands - Stage
------------------------
* To do database migrations, include this: ```-e "migrate_db=yes"```. The default
behavior is to not do migrations.
Verify that you're doing something reasonable:
* To hit multiple machines the use this: ```-e "machine=app(1|2|4)"```.
Use multiple separate "-e" options to specify multiple vars on the
command line.
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py --list-hosts
* Usually I do with the ```--list-hosts``` option first to verify that I'm
doing something sane before actually running.
Verify that you're doing something reasonable:
* To install the utility machines, substitute ```prod-worker.yml```. Those
are also parameterized on the take the machine variable (util1, util(1|2),
and so forth).
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py
## Ansible Commands - Stage
Command is:
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=stage-ec2.ini ansible-playbook stage-app.yml -e "machine=app1" -u ubuntu -c ssh -i ./ec2.py
- hosts: ~tag_Name_app(10|20)_carn
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{machine}}_carn
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
secure_dir: '../../../configuration-secure/ansible'
# this indicates the path to site-specific (with precedence)
......
# this gets all running prod webservers
- hosts: tag_environment_prod_carn:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(10)_carn
- name: Basic util setup on carnegie workers
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_carn
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
gather_facts: True
vars:
secure_dir: '../../../edx-secret/ansible'
# this indicates the path to site-specific (with precedence)
......
......@@ -9,17 +9,13 @@
# - apt: pkg=libzmq-dev,python-zmq state=present
# - action: fireball
# this gets all running prod webservers
#- hosts: tag_environment_prod:&tag_function_webserver
# or we can get subsets of them by name
- hosts: ~tag_Name_app(10|20)_cme
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{machine}}_cme
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
secure_dir: '../../../edx-secret/ansible'
# this indicates the path to site-specific (with precedence)
......
# this gets all running prod webservers
- hosts: tag_environment_prod_cme:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(10)_cme
- name: Basic util setup on cme hosts
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_cme
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
vars:
secure_dir: '../../../edx-secret/ansible'
......
# this gets all running prod webservers
#- hosts: tag_environment_prod:&tag_function_webserver
# or we can get subsets of them by name
#- hosts: ~tag_Name_app(10|20)_prod
- hosts: ~tag_Name_app(11|21)_prod
## this is the test box
#- hosts: ~tag_Name_app4_prod
## you can also do security group, but don't do that
#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{machine}}_prod
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
secure_dir: '../../../configuration-secure/ansible'
# this indicates the path to site-specific (with precedence)
......
......@@ -8,3 +8,35 @@
roles:
- common
- supervisor
- role: user
USER_INFO:
- name: sefk
github: true
type: admin
- name: jbau
github: true
type: admin
- name: jrbl
github: true
type: admin
- name: ali123
github: true
type: admin
- name: caesar2164
github: true
type: admin
- name: dcadams
github: true
type: admin
- name: nparlante
github: true
type: admin
- name: jinpa
github: true
- name: gbruhns
github: true
- name: paepcke
github: true
- name: akshayak
github: true
tags: users
# For all util machines
- hosts: tag_environment_prod:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(1|2)_prod
- name: Basic util setup on all hosts
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_prod
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
vars:
secure_dir: '../../../configuration-secure/ansible'
......
- hosts: tag_environment_stage:&tag_function_webserver
#- hosts: tag_Name_app1_stage
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{ machine }}_stage
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
not_prod: true
secure_dir: ../../../edx-secret/ansible
......
# this gets all running stage util machiens
- hosts: tag_environment_stage:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(1|2)_stage
---
- name: Basic util setup on all hosts
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_stage
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
vars:
secure_dir: ../../../edx-secret/ansible
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_subnet_group
version_added: "1.5"
short_description: manage RDS database subnet groups
description:
- Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
default: null
aliases: []
description:
description:
- Database subnet group description. Only set when a new group is added.
required: false
default: null
aliases: []
subnets:
description:
- List of subnet IDs that make up the database subnet group.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
requirements: [ "boto" ]
author: Scott Anderson
'''
EXAMPLES = '''
# Add or change a subnet group
- local_action:
module: rds_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian-blue
'''
import sys
import time
try:
import boto.rds
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
try:
conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
try:
changed = False
exists = False
try:
matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
else:
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
except BotoServerError, e:
module.fail_json(msg = e.error_message)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
......@@ -19,6 +19,13 @@ AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
# add public keys to enable the automator user
# for running manage.py commands
AS_AUTOMATOR_NAME: automator
AS_AUTOMATOR_AUTHORIZED_KEYS: []
AS_AUTOMATOR_SUDO_CMDS:
- "ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries *"
DATABASES:
default: &databases_default
......@@ -43,7 +50,7 @@ analytics_auth_config:
MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION
as_role_name: "analytics-server"
as_user: "analytics-server"
as_user: "analytics-server"
as_home: "/opt/wwc/analytics-server"
as_venv_dir: "{{ as_home }}/virtualenvs/analytics-server"
as_source_repo: "git@github.com:edx/analytics-server.git"
......@@ -63,14 +70,6 @@ as_env_vars:
ANALYTICS_SERVER_LOG_LEVEL: "{{ AS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# See meta/main.yml
#
as_automated_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
#
# OS packages
#
......
automator ALL=(www-data) NOPASSWD:SETENV:/opt/wwc/analytics-server/virtualenvs/analytics-server/bin/django-admin.py run_all_queries *
---
dependencies:
- {
role: automated,
automated_rbash_links: $as_automated_rbash_links,
autmoated_sudoers_dest: '99-automator-analytics-server',
automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2'
}
- role: user
user_info:
- name: "{{ AS_AUTOMATOR_NAME }}"
type: restricted
sudo_cmds: "{{ AS_AUTOMATOR_SUDO_CMDS }}"
authorized_keys: "{{ AS_AUTOMATOR_AUTHORIZED_KEYS }}"
user_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
when: AS_AUTOMATOR_AUTHORIZED_KEYS|length != 0
......@@ -21,7 +21,7 @@
#
# common role
#
# Depends upon the automated role
# Depends upon the user role
#
# Example play:
#
......
......@@ -43,7 +43,7 @@ analytics_auth_config:
MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION
analytics_role_name: "analytics"
analytics_user: "analytics"
analytics_user: "analytics"
analytics_home: "/opt/wwc/analytics"
analytics_venv_dir: "{{ analytics_home }}/virtualenvs/analytics"
analytics_source_repo: "git@github.com:edx/analytics-server.git"
......@@ -63,7 +63,7 @@ analytics_env_vars:
ANALYTICS_LOG_LEVEL: "{{ ANALYTICS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# Used by the included role, user.
# See meta/main.yml
#
analytics_automated_rbash_links:
......
......@@ -21,7 +21,7 @@
#
# common role
#
# Depends upon the automated role
# user role to set up a restricted user
#
# Example play:
#
......
automator ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries *
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role automated
#
# Overview:
#
# This role is included as a dependency by other roles which provide
# automated jobs. Automation occurs over ssh. The automator user
# is assigned to a managed rbash shell and is, potentially, allowed to run
# explicitly listed commands via sudo. Both the commands that are
# allowed via rbash and the sudoers file are provided by the
# including role.
#
# Dependencies:
#
# This role depends upon variables provided by an including role
# via the my_role/meta/main.yml file. Includes take the following forms:
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_file: 'roles/my_role/files/etc/sudoers.d/99-my_role'
# }
#
# or
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_template: 'roles/my_role/templates/etc/sudoers.d/99-my_role.j2'
# }
#
# The sudoers file is optional. Note that for sudo to work it must be
# included in the rbash links list.
#
# That list should be provided via my_role's defaults
#
# role_automated_rbash_links:
# - /usr/bin/sudo
# - /usr/bin/scp
#
- fail: automated_rbash_links required for role
when: automated_rbash_links is not defined
- fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined
- name: create automated user
user:
name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes
- name: create sudoers file from file
copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_file
- name: create sudoers file from template
template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_template
#
# Prevent user from updating their PATH and
# environment.
#
- name: update shell file mode
file:
path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }}
with_items:
- .bashrc
- .profile
- .bash_logout
- name: change ~automated ownership
file:
path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }}
#
# This ensures that the links are updated with each run
# and that links that were remove from the role are
# removed.
#
- name: remove ~automated/bin directory
file:
path={{ automated_home }}/bin state=absent
ignore_errors: yes
- name: create ~automated/bin directory
file:
path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }}
- name: re-write .profile
copy:
src=home/automator/.profile
dest={{ automated_home }}/.profile
owner="root"
group={{ automated_user }}
mode="0744"
- name: re-write .bashrc
copy:
src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc
owner="root"
group={{ automated_user }}
mode="0744"
- name: create .ssh directory
file:
path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }}
- name: build authorized_keys file
template:
src=home/automator/.ssh/authorized_keys.j2
dest={{ automated_home }}/.ssh/authorized_keys mode=0600
owner={{ automated_user }} group={{ automated_user }}
- name: create allowed command links
file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link
with_items: automated_rbash_links
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role aws
#
#
# Rotate logs to S3
# Only for when edX is running in AWS since it organizes
# logs by security group.
# !! The buckets defined below MUST exist prior to enabling !!
# this feature and the instance IAM role must have write permissions
# to the buckets
AWS_S3_LOGS: false
# If there are any issues with the s3 sync an error
# log will be sent to the following address.
# This relies on your server being able to send mail
AWS_S3_LOGS_NOTIFY_EMAIL: dummy@example.com
AWS_S3_LOGS_FROM_EMAIL: dummy@example.com
# Separate buckets for tracking logs and everything else
# You should be overriding the environment and deployment vars
# Order of precedence is left to right for exclude and include options
AWS_S3_LOG_PATHS:
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
path: "{{ COMMON_LOG_DIR }}/!(*tracking*)"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
path: "/var/log/*"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-tracking-logs"
path: "{{ COMMON_LOG_DIR }}/*tracking*"
#
# vars are namespace with the module name.
#
aws_role_name: aws
aws_data_dir: "{{ COMMON_DATA_DIR }}/aws"
aws_app_dir: "{{ COMMON_APP_DIR }}/aws"
aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
# default path to the aws binary
aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
#
# OS packages
#
aws_debian_pkgs:
- python-setuptools
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
aws_s3cmd_url: "http://files.edx.org/s3cmd/{{ aws_s3cmd_version }}.tar.gz"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role aws
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role aws
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create data directories
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0700
with_items:
- "{{ aws_data_dir }}"
- "{{ aws_log_dir }}"
- name: create app directory
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0755
with_items:
- "{{ aws_app_dir }}"
- name: install system packages
apt: >
pkg={{','.join(aws_debian_pkgs)}}
state=present
update_cache=yes
- name: install aws python packages
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: aws_pip_pkgs
- name: get s3cmd
get_url: >
url={{ aws_s3cmd_url }}
dest={{ aws_data_dir }}/
- name: untar s3cmd
shell: >
tar xf {{ aws_data_dir }}/{{ aws_s3cmd_version }}.tar.gz
creates={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
chdir={{ aws_app_dir }}
- name: create symlink for s3cmd
file: >
src={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
dest={{ COMMON_BIN_DIR }}/s3cmd
state=link
- name: create s3 log sync script
template: >
dest={{ aws_s3_sync_script }}
src=send-logs-to-s3.j2 mode=0755 owner=root group=root
when: AWS_S3_LOGS
- name: create symlink for s3 log sync script
file: >
state=link
src={{ aws_s3_sync_script }}
dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }}
when: AWS_S3_LOGS
- name: run s3 log sync script on shutdown
file: >
state=link
src={{ COMMON_BIN_DIR }}/send-logs-to-s3
path=/etc/rc0.d/S00send-logs-to-s3
when: AWS_S3_LOGS
# cron job runs the aws s3 sync script
- name: cronjob for s3 log sync
cron: >
name="cronjob for s3 log sync"
user=root
minute=0
job={{ aws_s3_sync_script }}
when: AWS_S3_LOGS
{% set lb = '{' %}
{% set rb = '}' %}
#!/bin/bash
#
# This script can be called from logrotate
# to sync logs to s3
if (( $EUID != 0 )); then
echo "Please run as the root user"
exit 1
fi
S3_LOGFILE="{{ aws_s3_logfile }}"
NOTIFY_EMAIL={{ AWS_S3_LOGS_NOTIFY_EMAIL }}
FROM_EMAIL={{ AWS_S3_LOGS_FROM_EMAIL }}
AWS_CMD={{ aws_cmd }}
exec > >(tee $S3_LOGFILE)
exec 2>&1
shopt -s extglob
usage() {
cat<<EO
A wrapper of s3cmd sync that will sync files to
an s3 bucket, will send mail to {{ AWS_S3_LOGS_NOTIFY_EMAIL }}
on failures.
Usage: $PROG
-v add verbosity (set -x)
-n echo what will be done
-h this
EO
}
while getopts "vhn" opt; do
case $opt in
v)
set -x
shift
;;
h)
usage
exit 0
;;
n)
noop="echo Would have run: "
shift
;;
esac
done
# grab the first security group for the instance
# which will be used as a directory name in the s3
# bucket
# If there are any errors from this point
# send mail to $NOTIFY_EMAIL
set -e
sec_grp=unset
instance_id=unset
s3_path=unset
onerror() {
if [[ -z $noop ]]; then
message_file=/var/tmp/message-$$.json
message_string="Error syncing $s3_path: inst_id=$instance_id ip=$ip region=$region"
if [[ -r $S3_LOGFILE ]]; then
python -c "import json; d={'Subject':{'Data':'$message_string'},'Body':{'Text':{'Data':open('$S3_LOGFILE').read()}}};print json.dumps(d)" > $message_file
else
cat << EOF > $message_file
{"Subject": { "Data": "$message_string" }, "Body": { "Text": { "Data": "!! ERROR !! no logfile" } } }
EOF
fi
echo "ERROR: syncing $s3_path on $instance_id"
$AWS_CMD ses send-email --from $FROM_EMAIL --to $NOTIFY_EMAIL --message file://$message_file --region $region
else
echo "Error syncing $s3_path on $instance_id"
fi
}
trap onerror ERR SIGHUP SIGINT SIGTERM
# first security group is used as the directory name in the bucket
sec_grp=$(ec2metadata --security-groups | head -1)
instance_id=$(ec2metadata --instance-id)
ip=$(ec2metadata --local-ipv4)
availability_zone=$(ec2metadata --availability-zone)
# region isn't available via the metadata service
region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%}
$noop $AWS_CMD sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
#
# Defaults for role bastion
#
# These users are given access
# to the databases from the bastion
# box, it needs to be a subset of the
# users created on the box which is
# COMMON_USER_INFO + BASTION_USER_INFO
BASTION_REPLICA_USERS: []
# These users are created on the bastion
# server.
BASTION_USER_INFO: []
#
# vars are namespace with the module name.
#
bastion_role_name: bastion
#
# OS packages
#
bastion_debian_pkgs:
# for running ansible mysql module
- mysql-client-core-5.5
- libmysqlclient-dev
# for connecting to mongo
- mongodb-clients
bastion_pip_pkgs:
# for running ansible mysql
- mysql-python
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role bastion
#
dependencies:
- role: user
user_info: "{{ BASTION_USER_INFO }}"
- aws
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role bastion
#
# Overview:
#
#
# Dependencies:
# - common
#
- name: install system packages
apt: >
pkg={{','.join(bastion_debian_pkgs)}}
state=present
- name: install bastion python packages
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: bastion_pip_pkgs
- template: >
src=mysql.sh.j2
dest=/home/{{ item[0] }}/{{ item[1].script_name }}
mode=0700 owner={{ item[0] }} group=root
with_nested:
- "{{ BASTION_REPLICA_USERS }}"
-
- db_host: "{{ EDXAPP_MYSQL_HOST }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME }}"
script_name: edxapp-rds.sh
- db_host: "{{ XQUEUE_MYSQL_HOST }}"
db_name: "{{ XQUEUE_MYSQL_DB_NAME }}"
script_name: xqueue-rds.sh
- db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh
- template: >
src=mongo.sh.j2
dest=/home/{{ item[0] }}/{{ item[1].script_name }}
mode=0700 owner={{ item[0] }} group=root
with_nested:
- "{{ BASTION_REPLICA_USERS }}"
-
- db_host: "{{ EDXAPP_MONGO_HOSTS[1] }}"
db_name: "{{ EDXAPP_MONGO_DB_NAME }}"
db_port: "{{ EDXAPP_MONGO_PORT }}"
script_name: edxapp-mongo.sh
- db_host: "{{ FORUM_MONGO_HOSTS[1] }}"
db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh
#!/usr/bin/env bash
mongo {{ item[1].db_host }}:{{ item[1].db_port }}/{{ item[1].db_name }} -u {{ COMMON_MONGO_READ_ONLY_USER }} -p"{{ COMMON_MONGO_READ_ONLY_PASS }}"
#!/usr/bin/env bash
mysql -u {{ COMMON_MYSQL_READ_ONLY_USER }} -h {{ item[1].db_host }} -p"{{ COMMON_MYSQL_READ_ONLY_PASS }}" {{ item[1].db_name }}
......@@ -28,7 +28,7 @@ CERTS_KEY_ID: "FEF8D954"
# Path to git identity file for pull access to
# the edX certificates repo - REQUIRED
# Example - {{ secure_dir }}/files/git-identity
CERTS_LOCAL_GIT_IDENTITY: !!null
CERTS_GIT_IDENTITY: !!null
# Path to public and private gpg key for signing
# the edX certificate. Default is a dummy key
CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt"
......@@ -42,7 +42,7 @@ certs_venvs_dir: "{{ certs_app_dir }}/venvs"
certs_venv_dir: "{{ certs_venvs_dir }}/certs"
certs_venv_bin: "{{ certs_venv_dir }}/bin"
certs_git_ssh: /tmp/git_ssh.sh
certs_git_identity: "{{ certs_app_dir }}/git-identity"
certs_git_identity: "{{ certs_app_dir }}/certs-git-identity"
certs_requirements_file: "{{ certs_code_dir }}/requirements.txt"
certs_repo: "git@github.com:/edx/certificates"
certs_version: 'master'
......
......@@ -20,4 +20,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: certs_installed is defined
when: certs_installed is defined and not disable_edx_services
......@@ -16,9 +16,19 @@
- name: writing supervisor script for certificates
template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
src=certs.conf.j2 dest={{ supervisor_available_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644
- name: enable supervisor script for certificates
file: >
src={{ supervisor_available_dir }}/certs.conf
dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }}
state=link
force=yes
mode=0644
notify: restart certs
when: not disable_edx_services
- name: create ssh script for git
template: >
......@@ -28,7 +38,7 @@
- name: install read-only ssh key for the certs repo
copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
content="{{ CERTS_GIT_IDENTITY }}" dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600
notify: restart certs
......@@ -58,6 +68,7 @@
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure certs has started
supervisorctl_local: >
......@@ -66,6 +77,7 @@
config={{ supervisor_cfg }}
state=started
sudo_user: "{{ supervisor_service_user }}"
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
......@@ -32,8 +32,8 @@
# - certs
#
- name: Checking to see if git identity is set
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY
fail: msg="You must set CERTS_GIT_IDENTITY var for this role!"
when: not CERTS_GIT_IDENTITY
- name: create application user
user: >
......
......@@ -7,7 +7,9 @@ COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
COMMON_LOG_DIR: "{{ COMMON_DATA_DIR }}/log"
# Override this to create
# common users in all roles
COMMON_USER_INFO: []
# these directories contain
# symlinks for convenience
COMMON_BIN_DIR: "{{ COMMON_BASE_DIR }}/bin"
......@@ -25,7 +27,22 @@ COMMON_HOSTNAME: !!null
COMMON_CUSTOM_DHCLIENT_CONFIG: false
# uncomment and specifity your domains.
# COMMON_DHCLIENT_DNS_SEARCH: ["ec2.internal","example.com"]
COMMON_MOTD_TEMPLATE: "motd.tail.j2"
# These are two special accounts across all databases
# the read only user is is granted select privs on all dbs
# the admin user is granted create user privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: 'password'
COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: 'password'
common_debian_pkgs:
- ntp
- ack-grep
......@@ -40,9 +57,13 @@ common_debian_pkgs:
- python2.7
- python-pip
- python2.7-dev
# Not installed by default on vagrant ubuntu
# boxes
- curl
common_pip_pkgs:
- virtualenv==1.10.1
- pip==1.5.4
- virtualenv==1.11.4
- virtualenvwrapper
common_web_user: www-data
......@@ -52,7 +73,14 @@ common_log_user: syslog
common_git_ppa: "ppa:git-core/ppa"
# Skip supervisor tasks
# Useful when supervisor is not installed (local dev)
# When set to true this flag will allow you to install everything but keep
# supervisor from starting any of the services.
# Service files will be placed in supervisor's conf.available.d but not linked
# to supervisors 'conf.d' directory.
disable_edx_services: False
# Some apps run differently in dev mode(forums)
# so different start scripts are generated in dev mode.
devstack: False
common_debian_variants:
......
#!/bin/sh
test -x /usr/sbin/logrotate || exit 0
/usr/sbin/logrotate /etc/logrotate.d/hourly
......@@ -2,3 +2,6 @@
- name: restart rsyslogd
service: name=rsyslog state=restarted
sudo: True
- name: restart ssh
service: name=ssh state=restarted
sudo: True
---
dependencies:
- role: user
user_info: "{{ COMMON_USER_INFO }}"
......@@ -48,13 +48,38 @@
with_items: common_pip_pkgs
- name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
template: >
dest=/etc/rsyslog.d/99-edx.conf
src=edx_rsyslog.j2
owner=root group=root mode=644
notify: restart rsyslogd
- name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
template: >
dest=/etc/logrotate.d/edx-services
src=etc/logrotate.d/edx_logrotate.j2
owner=root group=root mode=644
# This is in common to keep all logrotation config
# in the same role
- name: Create hourly subdirectory in logrotate.d
file: path=/etc/logrotate.d/hourly state=directory
- name: Install logrotate configuration for tracking file
template: >
dest=/etc/logrotate.d/hourly/tracking.log
src=etc/logrotate.d/hourly/edx_logrotate_tracking_log.j2
owner=root group=root mode=644
- name: Add logrotate for tracking.log to cron.hourly
copy: >
dest=/etc/cron.hourly/logrotate
src=etc/cron.hourly/logrotate
owner=root group=root mode=555
# This can be removed after new release of edX
- name: Remove old tracking.log config from /etc/logrotate.d
file: path=/etc/logrotate.d/tracking.log state=absent
- name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts
......@@ -73,4 +98,28 @@
- name: update /etc/dhcp/dhclient.conf
template: src=etc/dhcp/dhclient.conf.j2 dest=/etc/dhcp/dhclient.conf
when: COMMON_CUSTOM_DHCLIENT_CONFIG
\ No newline at end of file
when: COMMON_CUSTOM_DHCLIENT_CONFIG
# Remove some of the default motd display on ubuntu
# and add a custom motd. These do not require an
# ssh restart
- name: update the ssh motd on Ubuntu
file: >
mode=0644
path={{ item }}
with_items:
- "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo"
- "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade"
- name: add ssh-warning banner motd
template: >
dest=/etc/motd.tail
src={{ COMMON_MOTD_TEMPLATE }} mode=0755 owner=root group=root
- name: update ssh config
template: >
dest=/etc/ssh/sshd_config
src=sshd_config.j2 mode=0644 owner=root group=root
notify: restart ssh
*******************************************************************
* *
* _ _| |\ \/ / *
* / -_) _` | > < *
* \___\__,_|/_/\_\ *
* *
* Instructions and troubleshooting: *
* https://github.com/edx/configuration/wiki/edX-Developer-Stack *
*******************************************************************
{{ COMMON_LOG_DIR }}/tracking.log {
create
compress
delaycompress
create
dateext
dateformat -%Y%m%d-%s
missingok
nodelaycompress
notifempty
daily
rotate 365000
rotate 16000
size 1M
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
}
*******************************************************************
* _ __ __ *
* _ _| |\ \/ / This system is for the use of authorized *
* / -_) _` | > < users only. Usage of this system may be *
* \___\__,_|/_/\_\ monitored and recorded by system personnel. *
* *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide the *
* evidence from such monitoring to law enforcement officials. *
* *
*******************************************************************
# {{ ansible_managed }}
#
# Changes from the default Ubuntu ssh config:
# - LogLevel set to VERBOSE
#
# What ports, IPs and protocols we listen for
Port 22
# Use these options to restrict which interfaces/protocols sshd will bind to
#ListenAddress ::
#ListenAddress 0.0.0.0
Protocol 2
# HostKeys for protocol version 2
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_dsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
#Privilege Separation is turned on for security
UsePrivilegeSeparation yes
# Lifetime and size of ephemeral version 1 server key
KeyRegenerationInterval 3600
ServerKeyBits 768
# Logging
SyslogFacility AUTH
LogLevel VERBOSE
# Authentication:
LoginGraceTime 120
PermitRootLogin yes
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
#AuthorizedKeysFile %h/.ssh/authorized_keys
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# For this to work you will also need host keys in /etc/ssh_known_hosts
RhostsRSAAuthentication no
# similar for protocol version 2
HostbasedAuthentication no
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
#IgnoreUserKnownHosts yes
# To enable empty passwords, change to yes (NOT RECOMMENDED)
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords
PasswordAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosGetAFSToken no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
X11Forwarding yes
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
#UseLogin no
#MaxStartups 10:30:60
#Banner /etc/issue
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
......@@ -5,7 +5,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: discern_installed is defined
when: discern_installed is defined and not disable_edx_services
with_items:
- discern
- discern_celery
......@@ -2,11 +2,23 @@
- name: create supervisor scripts - discern, discern_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
src={{ item }}.conf.j2 dest={{ supervisor_available_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644
sudo_user: "{{ supervisor_user }}"
with_items: ['discern', 'discern_celery']
- name: enable supervisor scripts - discern, discern_celery
file: >
src={{ supervisor_available_dir }}/{{ item }}.conf
dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }}
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
with_items: ['discern', 'discern_celery']
when: not disable_edx_services
#Upload config files for django (auth and env)
- name: create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
......@@ -104,6 +116,7 @@
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure discern, discern_celery has started
supervisorctl_local: >
......@@ -114,6 +127,7 @@
with_items:
- discern
- discern_celery
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
......@@ -13,6 +13,12 @@
#
# OS packages
#
# set this to true dump all extra vars
# this is currently broken when extra vars
# contains references to vars that are not
# included in the play.
EDX_ANSIBLE_DUMP_VARS: false
edx_ansible_debian_pkgs:
- python-pip
......@@ -32,6 +38,6 @@ edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin"
edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
edx_ansible_var_file: "{{ edx_ansible_data_dir }}/server-vars.yml"
# edX configuration repo
configuration_version: master
edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml"
......@@ -11,3 +11,4 @@
# Role includes for role edx_ansible
dependencies:
- common
- aws
......@@ -18,31 +18,29 @@
dest={{ COMMON_BIN_DIR }}/update
state=link
- name: create a symlink for ansible-playbook
file: >
src={{ edx_ansible_venv_bin }}/ansible-playbook
dest={{ COMMON_BIN_DIR }}/ansible-playbook
state=link
- name: create a symlink for the playbooks dir
file: >
src={{ edx_ansible_code_dir }}/playbooks
dest={{ COMMON_CFG_DIR }}/playbooks
state=link
- name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
when: EDX_ANSIBLE_DUMP_VARS
- name: clean up var file, removing all version vars
shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }}
with_items:
# deploy versions
- "^edx_platform_version:"
- "^edx_platform_commit:"
- "^xqueue_version:"
- "^forum_version:"
- "^xserver_version:"
- "^discern_ease_version:"
- "^ora_ease_version:"
- "^discern_version:"
- "^ora_version:"
- "^configuration_version:"
- "^ease_version:"
- "^certs_version:"
# other misc vars
- "^tags:"
- "^_original_file:"
- name: create a symlink for var file
- name: create symlink for config file
file: >
src={{ edx_ansible_var_file }}
dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }}
state=link
when: EDX_ANSIBLE_DUMP_VARS
- name: clean up var file, removing all version vars and internal ansible vars (anything not caps)
shell: python -c "import yaml; y=yaml.load(open('{{ edx_ansible_var_file }}')); f=open('{{ edx_ansible_var_file }}', 'wb'); f.write(yaml.safe_dump({key:value for key,value in y.iteritems() if key.isupper()}, default_flow_style=False)); f.close();"
when: EDX_ANSIBLE_DUMP_VARS
......@@ -31,7 +31,13 @@ EDXAPP_MONGO_DB_NAME: 'edxapp'
EDXAPP_MYSQL_DB_NAME: 'edxapp'
EDXAPP_MYSQL_USER: 'edxapp001'
EDXAPP_MYSQL_USER_ADMIN: 'root'
EDXAPP_MYSQL_USER_MIGRATE: 'migrate'
EDXAPP_MYSQL_PASSWORD: 'password'
EDXAPP_MYSQL_PASSWORD_READ_ONLY: 'password'
EDXAPP_MYSQL_PASSWORD_ADMIN: 'password'
EDXAPP_MYSQL_PASSWORD_MIGRATE: 'password'
EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
......@@ -57,6 +63,11 @@ EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_PLATFORM_NAME: 'edX'
EDXAPP_CAS_SERVER_URL: ''
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ''
EDXAPP_CAS_ATTRIBUTE_CALLBACK: ''
EDXAPP_CAS_ATTRIBUTE_PACKAGE: ''
EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true
CERTIFICATES_ENABLED: true
......@@ -104,6 +115,7 @@ EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL: 'no-reply@example.com'
EDXAPP_ENV_EXTRA: {}
EDXAPP_AUTH_EXTRA: {}
EDXAPP_MKTG_URL_LINK_MAP: {}
EDXAPP_MKTG_URLS: {}
# Set this sets the url for static files
# Override this var to use a CDN
# Example: xxxxx.cloudfront.net/static/
......@@ -123,21 +135,39 @@ EDXAPP_PYTHON_SANDBOX: false
# it puts the sandbox in 'complain' mode, for reporting but not enforcement
EDXAPP_SANDBOX_ENFORCE: true
# Supply authorized keys used for remote management via the automated
# role, see meta/main.yml. Ensure you know what this does before
# enabling. The boolean flag determines whether the role is included.
# This is done to make it possible to disable remote access easily by
# setting the flag to true and providing an empty array.
EDXAPP_INCLUDE_AUTOMATOR_ROLE: false
# Supply authorized keys used for remote management via the user
# role.
EDXAPP_AUTOMATOR_NAME: automator
EDXAPP_AUTOMATOR_AUTHORIZED_KEYS: []
# These are the commands allowed by the automator role.
# The --settings parameter must be set at the end so that
# is caught by the glob.
# Example: sudo -u www-data /edx/bin/python.edxapp /edx/bin/manage.edxapp lms migrate --settings=aws
EDXAPP_AUTOMATOR_SUDO_CMDS:
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms migrate *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp cms migrate *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms syncdb *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp cms syncdb *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms seed_permissions_roles *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms set_staff *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms transfer_students *"
EDXAPP_USE_GIT_IDENTITY: false
# Example: "{{ secure_dir }}/files/git-identity"
EDXAPP_LOCAL_GIT_IDENTITY: !!null
# Paste the contents of the git identity
# into this var
EDXAPP_GIT_IDENTITY: !!null
# Configuration for database migration
EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ EDXAPP_MYSQL_DB_NAME }}"
EDXAPP_UPDATE_STATIC_FILES_KEY: false
# Set this to true if you want to install the private pip
# requirements in the edx-platform repo.
# This will use EDXAPP_GIT_IDENTITY, EDXAPP_USE_GIT_IDENTITY
# must be set to true if EDXAPP_INSTALL_PRIVATE_REQUIREMENTS is
# set to true
EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -161,7 +191,7 @@ edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_course_data_dir: "{{ edxapp_data_dir }}/data"
edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads"
edxapp_theme_dir: "{{ edxapp_data_dir }}/themes"
edxapp_git_identity: "{{ edxapp_app_dir }}/{{ EDXAPP_LOCAL_GIT_IDENTITY|basename }}"
edxapp_git_identity: "{{ edxapp_app_dir }}/edxapp-git-identity"
edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh"
edxapp_pypi_local_mirror: "http://localhost:{{ devpi_port }}/root/pypi/+simple"
edxapp_workers:
......@@ -322,6 +352,7 @@ generic_env_config: &edxapp_generic_env
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
TIME_ZONE: $EDXAPP_TIME_ZONE
MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP
MKTG_URLS: $EDXAPP_MKTG_URLS
# repo root for courses
GITHUB_REPO_ROOT: $edxapp_course_data_dir
CACHES:
......@@ -361,7 +392,9 @@ generic_env_config: &edxapp_generic_env
DEFAULT_FEEDBACK_EMAIL: $EDXAPP_DEFAULT_FEEDBACK_EMAIL
SERVER_EMAIL: $EDXAPP_DEFAULT_SERVER_EMAIL
BULK_EMAIL_DEFAULT_FROM_EMAIL: $EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL
CAS_SERVER_URL: $EDXAPP_CAS_SERVER_URL
CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
lms_auth_config:
<<: *edxapp_generic_auth
......@@ -467,6 +500,7 @@ post_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/post.txt"
base_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/base.txt"
github_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/github.txt"
repo_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/repo.txt"
private_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/edx-private.txt"
sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base.txt"
sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt"
......@@ -516,10 +550,3 @@ edxapp_cms_variant: cms
# Worker Settings
worker_django_settings_module: 'aws'
# This array is used by the automator role to provide
# access to a limited set of commands via rbash. The
# commands listed here will be symlinked to ~/bin/ for
# the automator user.
edxapp_automated_rbash_links:
- /usr/bin/sudo
......@@ -5,7 +5,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
when: edxapp_installed is defined and celery_worker is not defined and not devstack
when: edxapp_installed is defined and celery_worker is not defined and not disable_edx_services
sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled
......@@ -15,6 +15,6 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: edxapp_installed is defined and celery_worker is defined and not devstack
when: edxapp_installed is defined and celery_worker is defined and not disable_edx_services
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
......@@ -6,9 +6,10 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi
- role: automated
automated_rbash_links: "{{ edxapp_automated_rbash_links }}"
automated_sudoers_dest: '99-automator-edxapp-server'
automated_sudoers_template: 'roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2'
automated_authorized_keys: "{{ EDXAPP_AUTOMATOR_AUTHORIZED_KEYS }}"
when: EDXAPP_INCLUDE_AUTOMATOR_ROLE
- role: user
user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}"
sudo_cmds: "{{ EDXAPP_AUTOMATOR_SUDO_CMDS }}"
type: restricted
authorized_keys: "{{ EDXAPP_AUTOMATOR_AUTHORIZED_KEYS }}"
when: EDXAPP_AUTOMATOR_AUTHORIZED_KEYS|length != 0
......@@ -22,7 +22,7 @@
- name: install read-only ssh key
copy: >
src={{ EDXAPP_LOCAL_GIT_IDENTITY }} dest={{ edxapp_git_identity }}
content="{{ EDXAPP_GIT_IDENTITY }}" dest={{ edxapp_git_identity }}
force=yes owner={{ edxapp_user }} mode=0600
when: EDXAPP_USE_GIT_IDENTITY
......@@ -54,10 +54,6 @@
- "restart edxapp"
- "restart edxapp_workers"
- name: remove read-only ssh key
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- name: create checksum for requirements, package.json and Gemfile
shell: >
/usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new
......@@ -186,6 +182,37 @@
- "restart edxapp"
- "restart edxapp_workers"
# Private requriements require a ssh key to install, use the same key as the private key for edx-platform
# If EDXAPP_INSTALL_PRIVATE_REQUIREMENTS is set to true EDXAPP_USE_GIT_IDENTITY must also be true
- name : install python private requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }}
with_items:
- "{{ private_requirements_file }}"
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
when: EDXAPP_INSTALL_PRIVATE_REQUIREMENTS
notify:
- "restart edxapp"
- "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module
pip: >
name="{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}"
virtualenv="{{edxapp_venv_dir}}"
state=present
extra_args="-i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_user }}"
when: EDXAPP_CAS_ATTRIBUTE_PACKAGE|length > 0
notify: "restart edxapp"
# Install the sandbox python modules into {{ edxapp_venv_dir }}
- name : install sandbox requirements into regular venv
......@@ -253,7 +280,7 @@
- edxapp-sandbox
- name: compiling all py files in the edx-platform repo
shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}"
shell: "{{ edxapp_venv_bin }}/python -m compileall -x .git/.* {{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......@@ -300,7 +327,7 @@
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: not devstack
when: not disable_edx_services
- name: ensure edxapp has started
supervisorctl_local: >
......@@ -309,7 +336,7 @@
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
sudo_user: "{{ supervisor_service_user }}"
when: celery_worker is not defined and not devstack
when: celery_worker is not defined and not disable_edx_services
with_items: service_variants_enabled
- name: ensure edxapp_workers has started
......@@ -318,18 +345,30 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: celery_worker is defined and not devstack
when: celery_worker is defined and not disable_edx_services
with_items: edxapp_workers
sudo_user: "{{ supervisor_service_user }}"
- name: create a symlink for venv python
- name: create symlinks from the venv bin dir
file: >
src="{{ edxapp_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.edxapp
dest={{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.edxapp
state=link
with_items:
- python
- pip
- django-admin.py
- name: create symlinks from the repo dir
file: >
src="{{ edxapp_code_dir }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.edxapp
state=link
with_items:
- manage.py
- name: remove read-only ssh key
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- set_fact: edxapp_installed=true
......@@ -2,14 +2,6 @@
# - group_vars/all
# - common/tasks/main.yml
---
- name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify:
- "restart edxapp"
- "restart edxapp_workers"
- name: create application user
user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
......
......@@ -3,6 +3,7 @@
src={{ item }}.env.json.j2
dest={{ edxapp_app_dir }}/{{ item }}.env.json
sudo_user: "{{ edxapp_user }}"
tags: edxapp_cfg
with_items: service_variants_enabled
notify:
- "restart edxapp"
......@@ -13,6 +14,7 @@
src={{ item }}.auth.json.j2
dest={{ edxapp_app_dir }}/{{ item }}.auth.json
sudo_user: "{{ edxapp_user }}"
tags: edxapp_cfg
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -22,26 +24,55 @@
- name: "writing {{ item }} supervisor script"
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
src={{ item }}.conf.j2 dest={{ supervisor_available_dir }}/{{ item }}.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
with_items: service_variants_enabled
when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_user }}"
- name: writing edxapp supervisor script
template: >
src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf
src=edxapp.conf.j2 dest={{ supervisor_available_dir }}/edxapp.conf
owner={{ supervisor_user }}
when: celery_worker is not defined and not devstack
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
# write the supervisor script for celery workers
- name: writing celery worker supervisor script
template: >
src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf
src=workers.conf.j2 dest={{ supervisor_available_dir }}/workers.conf
owner={{ supervisor_user }}
when: celery_worker is defined and not devstack
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
# Enable the supervisor jobs
- name: "enable {{ item }} supervisor script"
file: >
src={{ supervisor_available_dir }}/{{ item }}.conf
dest={{ supervisor_cfg_dir }}/{{ item }}.conf
state=link
force=yes
with_items: service_variants_enabled
when: celery_worker is not defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
- name: "enable edxapp supervisor script"
file: >
src={{ supervisor_available_dir }}/edxapp.conf
dest={{ supervisor_cfg_dir }}/edxapp.conf
state=link
force=yes
when: celery_worker is not defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
- name: "enable celery worker supervisor script"
file: >
src={{ supervisor_available_dir }}/workers.conf
dest={{ supervisor_cfg_dir }}/workers.conf
state=link
force=yes
when: celery_worker is defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
# Fake syncdb with migrate, only when fake_migrations is defined
......@@ -66,7 +97,8 @@
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes"
environment:
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD }}"
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......@@ -95,6 +127,9 @@
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......
{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do cms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ cms_env_config | to_nice_json }}
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py migrate *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py seed_permissions_roles *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py set_staff *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py transfer_students *
{% do lms_preview_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_preview_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ lms_preview_env_config | to_nice_json }}
{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ lms_env_config | to_nice_json }}
......@@ -13,6 +13,12 @@
- name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: setup the migration db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER_MIGRATE }}
password={{ EDXAPP_MYSQL_PASSWORD_MIGRATE}}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
- name: setup the edxapp db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
......@@ -31,14 +37,14 @@
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
when: XQUEUE_MYSQL_USER is defined and not devstack
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
- name: create a database for xqueue
mysql_db: >
db=xqueue
state=present
encoding=utf8
when: XQUEUE_MYSQL_USER is defined and not devstack
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
- name: setup the ora db user
mysql_user: >
......@@ -58,7 +64,7 @@
name={{ DISCERN_MYSQL_USER }}
password={{ DISCERN_MYSQL_PASSWORD }}
priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL'
when: DISCERN_MYSQL_USER is defined and not devstack
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: create a database for discern
......@@ -66,7 +72,7 @@
db=discern
state=present
encoding=utf8
when: DISCERN_MYSQL_USER is defined and not devstack
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: install memcached
......
......@@ -12,5 +12,5 @@ elasticsearch_group: "elasticsearch"
#
# Defaults for a single server installation.
ELASTICSEARCH_CLUSTERED: true
ELASTICSEARCH_HEAP_SIZE: "512m"
\ No newline at end of file
ELASTICSEARCH_CLUSTERED: false
ELASTICSEARCH_HEAP_SIZE: "512m"
......@@ -83,4 +83,8 @@
when: bigdesk.stat.isdir is not defined
- name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=restarted enabled=yes
\ No newline at end of file
service: name=elasticsearch state=started enabled=yes
- name: Restart elastic when there has been an upgrade
service: name=elasticsearch state=restarted enabled=yes
when: elasticsearch_reinstall.changed
......@@ -18,7 +18,7 @@ FORUM_MONGO_HOSTS:
FORUM_MONGO_TAGS: !!null
FORUM_MONGO_PORT: "27017"
FORUM_MONGO_DATABASE: "cs_comments_service"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?{{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?tags={{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_SINATRA_ENV: "development"
FORUM_RACK_ENV: "development"
FORUM_NGINX_PORT: "18080"
......@@ -29,6 +29,9 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
FORUM_LISTEN_PORT: "4567"
FORUM_USE_TCP: false
forum_environment:
RBENV_ROOT: "{{ forum_rbenv_root }}"
......@@ -45,20 +48,13 @@ forum_environment:
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
DATA_DIR: "{{ forum_data_dir }}"
FORUM_LISTEN_HOST: "{{ FORUM_LISTEN_HOST }}"
FORUM_LISTEN_PORT: "{{ FORUM_LISTEN_PORT }}"
forum_user: "forum"
forum_ruby_version: "1.9.3-p448"
forum_source_repo: "https://github.com/edx/cs_comments_service.git"
# Currently we are installing a branch of the comments service
# that configures unicorn to listen on a unix socket and get the
# worker count configuration from the environment. We are not
# merging to master of the comments service yet as this will have
# some incompatibilities with our Heroku deployments.
#
# https://github.com/edx/cs_comments_service/pull/83
#
forum_version: "e0d/unicorn-config"
forum_unicorn_port: "4567"
forum_version: "master"
#
# test config
......
......@@ -5,4 +5,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: forum_installed is defined and not devstack
when: forum_installed is defined and not disable_edx_services
......@@ -2,11 +2,23 @@
- name: create the supervisor config
template: >
src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf
src=forum.conf.j2 dest={{ supervisor_available_dir }}/forum.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not devstack
register: forum_supervisor
- name: enable the supervisor config
file: >
src={{ supervisor_available_dir }}/forum.conf
dest={{ supervisor_cfg_dir }}/forum.conf
owner={{ supervisor_user }}
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
register: forum_supervisor
- name: create the supervisor wrapper
......@@ -15,7 +27,6 @@
dest={{ forum_supervisor_wrapper }}
mode=0755
sudo_user: "{{ forum_user }}"
when: not devstack
notify: restart the forum service
- name: git checkout forum repo into {{ forum_code_dir }}
......@@ -41,7 +52,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: not devstack
when: not disable_edx_services
- name: ensure forum is started
supervisorctl_local: >
......@@ -49,7 +60,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not devstack
when: not disable_edx_services
- include: test.yml tags=deploy
......
......@@ -3,9 +3,9 @@
- name: test that the required service are listening
wait_for: port={{ item.port }} host={{ item.host }} timeout=30
with_items: forum_services
when: not devstack
when: not disable_edx_services
- name: test that mongo replica set members are listing
wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30
with_items: FORUM_MONGO_HOSTS
when: not devstack
\ No newline at end of file
when: not disable_edx_services
......@@ -5,6 +5,8 @@ cd {{ forum_code_dir }}
{% if devstack %}
{{ forum_rbenv_shims }}/ruby app.rb
{% elif FORUM_USE_TCP %}
{{ forum_gem_bin }}/unicorn -c config/unicorn_tcp.rb
{% else %}
{{ forum_gem_bin }}/unicorn -c config/unicorn.rb
{% endif %}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment