Commit 6f83b1ba by Joe Blaylock

Merge remote-tracking branch 'origin/release' into edx-west/release

Conflicts:
	playbooks/roles/edxapp/defaults/main.yml
parents f68a2efd 6d5c5677
John Jarvis <jarv@edx.org>
Sef Kloninger <sef@kloninger.com>
Joe Blaylock <jrbl@jrbl.org>
Joe Blaylock <jrbl@stanford.edu>
Vik Paruchuri <vik@edx.org>
Jason Bau <jbau@stanford.edu>
Ed Zarecor <ed@edx.org>
John Kern <kern3020@gmail.com>
Will Daly <will@edx.org>
Bethany LaPenta <lapentab@mit.edu>
Jay Zoldak <zoldak@edx.org>
Jay Zoldak <jzoldak@edx.org>
Philippe Chiu <philippe.chiu@gmail.com>
Marko Seric <marko.seric@math.uzh.ch>
Feanil Patel <feanil@edx.org>
Calen Pennington <calen.pennington@gmail.com>
David Baumgold <david@davidbaumgold.com>
Kevin Luo <kevluo@edx.org>
Carson Gee <x@carsongee.com>
Xavier Antoviaque <xavier@antoviaque.org>
James Tauber <jtauber@jtauber.com>
......@@ -5,8 +5,8 @@
**This project is currently in alpha**
The goal of the edx/configuration project is to provide a simple, but
flexible, way for anyone to stand up an instance of the edX platform
that is fully configured and ready-to-go.
flexible, way for anyone to stand up an instance of Open edX that is
fully configured and ready-to-go.
Building the platform takes place to two phases:
......
Jinja2==2.6
PyYAML==3.10
ansible==1.3.1
argparse==1.2.1
boto==2.10.0
paramiko==1.10.1
pycrypto==2.6
wsgiref==0.1.2
......@@ -1602,6 +1602,12 @@
},
{
"IpProtocol":"tcp",
"FromPort":"9997",
"ToPort":"9997",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"10016",
"ToPort":"10016",
"CidrIp":"0.0.0.0/0"
......@@ -1636,6 +1642,12 @@
},
{
"IpProtocol":"tcp",
"FromPort":"9997",
"ToPort":"9997",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"9418",
"ToPort":"9418",
"CidrIp":"0.0.0.0/0"
......
......@@ -6,3 +6,4 @@
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
# Ansible EC2 external inventory script settings
#
[ec2]
regions=all
destination_variable=public_dns_name
vpc_destination_variable=private_ip_address
cache_path=/tmp
cache_max_age=300
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all
regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from with EC2.
vpc_destination_variable = private_ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-seperated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 300
#!/usr/bin/env python
import sys
import os
'''
EC2 external inventory script
=================================
......@@ -118,6 +115,8 @@ import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
try:
......@@ -191,11 +190,13 @@ class Ec2Inventory(object):
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
......@@ -204,6 +205,13 @@ class Ec2Inventory(object):
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
cache_path = config.get('ec2', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
......@@ -232,8 +240,12 @@ class Ec2Inventory(object):
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
......@@ -250,6 +262,11 @@ class Ec2Inventory(object):
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
......@@ -261,6 +278,20 @@ class Ec2Inventory(object):
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
......@@ -270,6 +301,11 @@ class Ec2Inventory(object):
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
......@@ -328,6 +364,111 @@ class Ec2Inventory(object):
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances
if instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info(self):
''' Get variables about a specific host '''
......@@ -387,7 +528,7 @@ class Ec2Inventory(object):
the dict '''
if key in my_dict:
my_dict[key].append(element)
my_dict[key].append(element);
else:
my_dict[key] = [element]
......@@ -438,4 +579,3 @@ class Ec2Inventory(object):
# Run the script
Ec2Inventory()
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
roles:
- edxapp
- name: Deploy forum
hosts: all
sudo: True
gather_facts: True
roles:
- forum
- name: Deploy ora
hosts: all
sudo: True
gather_facts: True
roles:
- ora
- name: Deploy xqueue
hosts: all
sudo: True
gather_facts: True
roles:
- xqueue
- name: Deploy xserver
hosts: all
sudo: True
gather_facts: True
roles:
- xserver
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
serial: 10
vars:
migrate_db: "yes"
openid_workaround: True
roles:
- ansible_debug
- common
- role: nginx
nginx_sites:
- cms
- lms
- lms-preview
- ora
- xqueue
- xserver
#- discern
- edxlocal
- edxapp
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk
- elasticsearch
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_user_home: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
- role: virtualenv
virtualenv_user: "{{ xqueue_user }}"
virtualenv_user_home: "{{ xqueue_user_home }}"
virtualenv_name: "{{ xqueue_user }}"
- { role: "xqueue", update_users: True }
- xserver
- ora
#- discern
---
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_bastion
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_edxapp
sudo: True
vars_files:
......@@ -8,10 +16,14 @@
roles:
- common
- datadog
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
lms_nginx_port: 80
cms_nginx_port: 80
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'sarina/install-datadog'
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker
......@@ -23,7 +35,11 @@
roles:
- common
- datadog
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
......
......@@ -8,12 +8,15 @@
roles:
- common
- datadog
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
lms_nginx_port: 80
cms_nginx_port: 80
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'master'
edx_platform_commit: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_worker
sudo: True
vars_files:
......@@ -23,11 +26,16 @@
roles:
- common
- datadog
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'master'
edx_platform_commit: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xserver
sudo: True
vars_files:
......@@ -35,8 +43,11 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- xserver
- xserver
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_rabbitmq
serial: 1
sudo: True
......@@ -46,6 +57,7 @@
roles:
- common
- rabbitmq
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xqueue
sudo: True
vars_files:
......@@ -53,5 +65,8 @@
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
# ansible-playbook --limit tag_Name_mirror edx_mirror.yml --user ubuntu -i ec2.py
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: False
roles:
- common
- role: nginx
nginx_sites:
- devpi
- gh_mirror
tags: ['r_nginx']
- role: supervisor
supervisor_servers:
- devpi
- role: devpi
tags: ['r_devpi']
- role: gh_mirror
tags: ['r_gh_mirror']
- name: Create ec2 instance
hosts: localhost
connection: local
gather_facts: False
pre_tasks:
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}"
security_group: "{{ security_group }}"
ami_image: "{{ ami }}"
region: "{{ region }}"
instance_tags: "{{ instance_tags }}"
root_ebs_size: "{{ root_ebs_size }}"
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
terminate_instance: true
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
roles:
# gh_users hash must be passed
# in as a -e variable
- gh_users
......@@ -13,8 +13,13 @@
mysql5_workaround: True
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal
- mongo
- edxapp
- rabbitmq
- oraclejdk
......
../inventory.ini
\ No newline at end of file
../library
\ No newline at end of file
......@@ -32,7 +32,12 @@
- "{{ secure_dir }}/vars/edxapp_prod_users.yml"
roles:
- common
- nginx
- {'role': 'edxapp', 'openid_workaround': true}
- role: nginx
nginx_conf: true
nginx_sites:
- lms
- cms
- lms-preview
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'}
# run this role last
# - in_production
[ec2]
regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=/tmp
cache_max_age=300
route53=False
......@@ -15,7 +15,11 @@
state: 'absent'
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- ruby
post_tasks:
......
......@@ -26,7 +26,11 @@
- "{{ secure_dir }}/vars/shib_prod_vars.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- apache
- shibboleth
......
# this gets all running prod webservers
#- hosts: tag_environment_prod:&tag_function_ora
# or we can get subsets of them by name
- hosts: ~tag_Name_ora(10|11)_prod
#- hosts: ~tag_Name_ora(10|11)_prod
- hosts: ~tag_Name_ora10_prod
#- hosts: ~tag_Name_ora11_prod
#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB
sudo: True
vars:
......@@ -16,5 +18,7 @@
- "{{ secure_dir }}/vars/edxapp_prod_users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- ora
- ora
......@@ -15,5 +15,7 @@
- "{{ secure_dir }}/vars/edxapp_prod_users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- xqueue
- xqueue
......@@ -14,10 +14,14 @@
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/edxapp_stage_users.yml"
#- "{{ secure_dir }}/vars/shib_stage_vars.yml"
- "{{ secure_dir }}/vars/shib_stage_vars.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
#- apache
#- shibboleth
- apache
- shibboleth
- hosts: localhost
#- hosts: tag_Name_app1_stage
vars:
migrate_db: "no"
not_prod: true
secure_dir: ../../../edx-secret/ansible
local_dir: ../../../edx-secret/ansible/local
vars_files:
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/edxapp_stage_users.yml"
#- "{{ secure_dir }}/vars/shib_stage_vars.yml"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- ansible_debug
#- apache
#- shibboleth
# run the notifier on the first util machine only
- hosts: ~tag_Name_util10_stage
sudo: True
vars:
secure_dir: '../../../configuration-secure/ansible'
migrate_db: "no"
vars_files:
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/notifier_stage_vars.yml"
roles:
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
......@@ -10,6 +10,8 @@
- "{{ secure_dir }}/vars/edxapp_stage_users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- ora
- ora
# this gets all running stage util machiens
# this gets all running stage util machiens
- hosts: tag_environment_stage:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(1|2)_stage
......
......@@ -10,7 +10,9 @@
- "{{ secure_dir }}/vars/edxapp_stage_users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- xqueue
- xqueue
#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_edxapp
......
......@@ -14,8 +14,16 @@
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xqueue
- xserver
- ora
- edxlocal
- mongo
- edxapp
- xqueue
- xserver
......
......@@ -26,8 +26,15 @@
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xserver
- xqueue
- edxlocal
- mongo
- edxapp
- xqueue
- xserver
......
......@@ -12,7 +12,7 @@
# lms listening on port 80 - example.com
# lms-preview listening on port 80 - preview.example.com
#
# ansible-playbook -c local --limit "localhost:127.0.0.1" path/to/configuration/playbooks/edx_sandbox.yml -i "localhost," -e "cms_nginx_port=80 lms_preview_nginx_port=80 c_lms_base=example.com c_preview_lms_base=preview.example.com"
# ansible-playbook -c local --limit "localhost:127.0.0.1" path/to/configuration/playbooks/edx_sandbox.yml -i "localhost," -e "EDXAPP_CMS_NGINX_PORT=80 EDXAPP_LMS_PREVIEW_NGINX_PORT=80 EDXAPP_LMS_BASE=example.com EDXAPP_PREVIEW_LMS_BASE=preview.example.com"
#
- name: Configure instance(s)
hosts: localhost
......@@ -23,8 +23,14 @@
openid_workaround: True
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- ora
- edxlocal
- mongo
- edxapp
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
......
......@@ -5,7 +5,11 @@
- "{{ secure_dir }}/vars/edxapp_ref_users.yml"
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- gunicorn
- edxapp
- ruby
......
......@@ -15,7 +15,11 @@
state: 'absent'
roles:
- common
- nginx
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- ruby
post_tasks:
......
......@@ -2,11 +2,15 @@
# This should only have variables
# that are applicable to all edX roles
storage_base_dir: /mnt
app_base_dir: /opt/wwc
log_base_dir: /mnt/logs
log_base_dir: "{{ storage_base_dir }}/logs"
venv_dir: /opt/edx
os_name: ubuntu
ENV_NAME: 'default_env'
ENV_TYPE: 'default_type'
# these pathes are relative to the playbook dir
# directory for secret settings (keys, etc)
secure_dir: 'secure_example'
......@@ -14,3 +18,7 @@ secure_dir: 'secure_example'
# this indicates the path to site-specific (with precedence)
# things like nginx template files
local_dir: '../../ansible_local'
# include http/https
PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
# do not include http/https
GIT_MIRROR: 'github.com'
- name: Configure instance(s)
hosts: jenkins
sudo: True
gather_facts: True
roles:
- common
- edxlocal
- role: rbenv
rbenv_user: "{{ jenkins_user }}"
rbenv_user_home: "{{ jenkins_user_home }}"
rbenv_ruby_version: "{{ jenkins_ruby_version }}"
- jenkins
# Configure a Jenkins master instance
# This has the Jenkins Java app, but none of the requirements
# to run the tests.
- name: Configure instance(s)
hosts: jenkins_master
sudo: True
gather_facts: True
roles:
- jenkins_master
# Configure a Jenkins worker instance
# This has all the requirements to run test jobs,
# but not the Jenkins Java app.
- name: Configure instance(s)
hosts: jenkins_worker
sudo: True
gather_facts: True
roles:
- jenkins_worker
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_lookup
short_description: returns a list of ec2 instances that meet search criteria
description:
- Returns a list of ec2 instances that meet search criteria
version_added: "1.4"
options:
region:
description:
- The AWS region to use. Must be specified if ec2_url
is not used. If not specified then the value of the
EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of
the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the
AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
tags:
desription:
- tags to lookup
required: false
default: null
type: dict
aliases: []
requirements: [ "boto" ]
author: John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Return all instances that match the tag "Name: foo"
- local_action:
module: ec2_lookup
tags:
Name: foo
'''
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
import boto.ec2
from boto.ec2 import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
ec2_url=dict(),
region=dict(aliases=['aws_region', 'ec2_region'],
choices=AWS_REGIONS),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None, type='dict'),
)
)
tags = module.params.get('tags')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
ec2_url = module.params.get('ec2_url')
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
# If we specified an ec2_url then try connecting to it
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key,
aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
instances = []
instance_ids = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
for inst in res.instances:
if inst.state == "running":
instances.append({k: v for k, v in inst.__dict__.iteritems()
if isinstance(v, (basestring))})
instance_ids.append(inst.id)
module.exit_json(changed=False, instances=instances,
instance_ids=instance_ids)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Vars for role analytics-server
# vars are namespace with the module name.
#
AS_DB_ANALYTICS_PASSWORD: 'CHANGEME!'
AS_DB_ANALYTICS_USER: 'analytics001'
AS_DB_ANALYTICS_HOST: 'localhost'
AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
DATABASES:
default: &databases_default
ENGINE: 'django.db.backends.mysql'
NAME: 'wwc'
USER: 'analytics001'
PASSWORD: 'CHANGEME!'
HOST: 'CHANGEME'
PORT: 3306
analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $AS_DB_ANALYTICS_USER
PASSWORD: $AS_DB_ANALYTICS_PASSWORD
HOST: $AS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $AS_API_KEY
ANALYTICS_RESULTS_DB:
MONGO_URI: $AS_DB_RESULTS_URL
MONGO_DB: $AS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION
as_role_name: "analytics-server"
as_user: "analytics-server"
as_home: "/opt/wwc/analytics-server"
as_venv_dir: "{{ as_home }}/virtualenvs/analytics-server"
as_source_repo: "git@github.com:edx/analytics-server.git"
as_code_dir: "{{ as_home }}/src"
as_version: "master"
as_git_identity_path: "{{ secure_dir }}/files/git-identity"
as_git_identity_dest: "/etc/{{ as_role_name }}.git-identity"
as_git_ssh: "/tmp/{{ as_role_name }}.git_ssh.sh"
as_requirements_file: "{{ as_code_dir }}/requirements.txt"
as_rsyslog_enabled: "yes"
as_web_user: "www-data"
as_env: "analytics-server_env"
as_service_variant: 'analytics'
as_django_settings: 'anserv.settings'
as_env_vars:
ANALYTICS_SERVER_LOG_LEVEL: "{{ AS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# See meta/main.yml
#
as_automated_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
#
# OS packages
#
as_debian_pkgs:
- mongodb-clients
- zip
- libmysqlclient-dev
as_redhat_pkgs:
- zip
- community-mysql-libs
#
# Installed via pip to get the IAM role feature.
#
as_pip_pkgs:
- git+https://github.com/s3tools/s3cmd.git#egg=s3cmd
\ No newline at end of file
automator ALL=(www-data) NOPASSWD:SETENV:/opt/wwc/analytics-server/virtualenvs/analytics-server/bin/django-admin.py run_all_queries *
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i /etc/git-identity "$@"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics-server
#
# Overview:
#
#
- name: analytics-server | stop the analytics service
service: name=analytics state=stopped
- name: analytics-server | start the analytics service
service: name=analytics state=started
---
dependencies:
- {
role: automated,
automated_rbash_links: $as_automated_rbash_links,
autmoated_sudoers_dest: '99-automator-analytics-server',
automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2'
}
\ No newline at end of file
#
# TODO: Needed while this repo is private
#
- name: analytics-server | upload ssh script
template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | install read-only ssh key required for checkout
copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
tags:
- analytics-server
- deploy
- install
- update
- name: analytics-server | checkout code
git:
dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true
environment:
GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service
notify: analytics-server | start the analytics service
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | update src permissions
file:
path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove ssh script
file: path={{ as_git_ssh }} state=absent
tags:
- analytics-server
- deploy
- install
- update
- name: analytics-server | install application requirements
pip:
requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present
sudo: true
sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service
tags:
- analytics-server
- deploy
- install
- update
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role analytics-server
#
# Overview:
#
# Installs the edX analytics-server Django application which provides
# basic analytics to the LMS instructor dashboard via service calls.
#
# Dependencies:
#
# common role
#
# Depends upon the automated role
#
# Example play:
#
# - name: Configure analytics instance(s)
# hosts: analytics-servers
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/common/common.yml"
# - "{{ secure_dir }}/vars/stage/analytics-server.yml"
# - "{{ secure_dir }}/vars/users.yml"
# gather_facts: True
# roles:
# - common
# - analytics-server
#
- name: analytics-server | install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags:
- analytics-server
- install
- update
- name: analytics-server | create analytics-server user {{ as_user }}
user:
name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes
tags:
- analytics-server
- install
- update
- name: analytics-server | setup the analytics-server env
template:
src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }}
owner="{{ as_user }}" group="{{ as_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile
owner={{ as_user }}
group={{ as_user }}
# Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch
# sudo: true
# sudo_user: "{{ as_user }}"
# tags:
# - analytics-server
# - install
# - update
- name: analytics-server | ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc
sudo: true
sudo_user: "{{ as_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | add source of analytics-server_env to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env'
line='. {{ as_home }}/analytics_server_env'
tags:
- analytics-server
- install
- update
- name: analytics-server | add source venv to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate'
line='. {{ as_venv_dir }}/bin/activate'
tags:
- analytics-server
- install
- update
- name: analytics-server | install global python requirements
pip: name={{ item }}
with_items: as_pip_pkgs
tags:
- analytics-server
- install
- update
- name: analytics-server | create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
mode=0600
owner="{{ as_web_user }}" group="{{ as_web_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
- include: deploy.yml
\ No newline at end of file
# {{ ansible_managed }}
description "Analytics server under gunicorn"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
respawn limit 3 30
env SERVICE_VARIANT={{ as_service_variant }}
env PID=/var/tmp/analytics.pid
env WORKERS={{ AS_WORKERS }}
env PORT={{ AS_SERVER_PORT }}
env LANG={{ AS_ENV_LANG }}
env DJANGO_SETTINGS_MODULE={{ as_django_settings }}
chdir {{ as_code_dir }}
setuid {{ as_web_user }}
exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv anserv.wsgi
# {{ ansible_managed }}
{% for name,value in as_env_vars.items() %}
{% if value %}
export {{ name }}="{{ value }}"
{% endif %}
{% endfor %}
\ No newline at end of file
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ as_git_identity_dest }} "$@"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Vars for role analytics
# vars are namespace with the module name.
#
ANALYTICS_DB_ANALYTICS_PASSWORD: 'CHANGEME!'
ANALYTICS_DB_ANALYTICS_USER: 'analytics001'
ANALYTICS_DB_ANALYTICS_HOST: 'localhost'
ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '4'
DATABASES:
default: &databases_default
ENGINE: 'django.db.backends.mysql'
NAME: 'wwc'
USER: 'analytics001'
PASSWORD: 'CHANGEME!'
HOST: 'CHANGEME'
PORT: 3306
analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $ANALYTICS_DB_ANALYTICS_USER
PASSWORD: $ANALYTICS_DB_ANALYTICS_PASSWORD
HOST: $ANALYTICS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $ANALYTICS_API_KEY
ANALYTICS_RESULTS_DB:
MONGO_URI: $ANALYTICS_DB_RESULTS_URL
MONGO_DB: $ANALYTICS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION
analytics_role_name: "analytics"
analytics_user: "analytics"
analytics_home: "/opt/wwc/analytics"
analytics_venv_dir: "{{ analytics_home }}/virtualenvs/analytics"
analytics_source_repo: "git@github.com:edx/analytics-server.git"
analytics_code_dir: "{{ analytics_home }}/src"
analytics_version: "master"
analytics_git_identity_path: "{{ secure_dir }}/files/git-identity"
analytics_git_identity_dest: "/etc/{{ analytics_role_name }}.git-identity"
analytics_git_ssh: "/tmp/{{ analytics_role_name }}.git_ssh.sh"
analytics_requirements_file: "{{ analytics_code_dir }}/requirements.txt"
analytics_rsyslog_enabled: "yes"
analytics_web_user: "www-data"
analytics_env: "analytics_env"
analytics_service_variant: 'analytics'
analytics_django_settings: 'anserv.settings'
analytics_env_vars:
ANALYTICS_LOG_LEVEL: "{{ ANALYTICS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# See meta/main.yml
#
analytics_automated_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
#
# OS packages
#
analytics_debian_pkgs:
- mongodb-clients
- zip
- libmysqlclient-dev
analytics_redhat_pkgs:
- zip
- community-mysql-libs
#
# Installed via pip to get the IAM role feature.
#
analytics_pip_pkgs:
- git+https://github.com/s3tools/s3cmd.git#egg=s3cmd
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics
#
# Overview:
#
#
- name: analytics | stop the analytics service
service: name=analytics state=stopped
- name: analytics | start the analytics service
service: name=analytics state=started
#
# TODO: Needed while this repo is private
#
- name: analytics | upload ssh script
template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | install read-only ssh key required for checkout
copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
tags:
- analytics
- deploy
- install
- update
- name: analytics | checkout code
git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true
environment:
GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service
notify: analytics | start the analytics service
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | update src permissions
file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | remove ssh script
file: path={{ analytics_git_ssh }} state=absent
tags:
- analytics
- deploy
- install
- update
- name: analytics | install application requirements
pip:
requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present
sudo: true
sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service
tags:
- analytics
- deploy
- install
- update
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role analytics
#
# Overview:
#
# Installs the edX analytics Django application which provides
# basic analytics to the LMS instructor dashboard via service calls.
#
# Dependencies:
#
# common role
#
# Depends upon the automated role
#
# Example play:
#
# - name: Configure analytics instance(s)
# hosts: analyticss
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/common/common.yml"
# - "{{ secure_dir }}/vars/stage/analytics.yml"
# - "{{ secure_dir }}/vars/users.yml"
# gather_facts: True
# roles:
# - common
# - analytics
#
- name: analytics | install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags:
- analytics
- install
- update
- name: analytics | create analytics user {{ analytics_user }}
user:
name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes
tags:
- analytics
- install
- update
- name: analytics | setup the analytics env
template:
src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }}
owner="{{ analytics_user }}" group="{{ analytics_user }}"
tags:
- analytics
- install
- update
- name: analytics | drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile
owner={{ analytics_user }}
group={{ analytics_user }}
# Awaiting next ansible release.
#- name: analytics | ensure .bashrc exists
# file: path={{ analytics_home }}/.bashrc state=touch
# sudo: true
# sudo_user: "{{ analytics_user }}"
# tags:
# - analytics
# - install
# - update
- name: analytics | ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc
sudo: true
sudo_user: "{{ analytics_user }}"
tags:
- analytics
- install
- update
- name: analytics | add source of analytics_env to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env'
line='. {{ analytics_home }}/analytics_env'
tags:
- analytics
- install
- update
- name: analytics | add source venv to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate'
line='. {{ analytics_venv_dir }}/bin/activate'
tags:
- analytics
- install
- update
- name: analytics | install global python requirements
pip: name={{ item }}
with_items: analytics_pip_pkgs
tags:
- analytics
- install
- update
- name: analytics | create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
mode=0600
owner="{{ analytics_web_user }}" group="{{ analytics_web_user }}"
tags:
- analytics
- install
- update
- name: analytics | install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
- include: deploy.yml
\ No newline at end of file
# {{ ansible_managed }}
description "Analytics gunicorn"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
respawn limit 3 30
env SERVICE_VARIANT={{ analytics_service_variant }}
env PID=/var/tmp/analytics.pid
env WORKERS={{ ANALYTICS_WORKERS }}
env PORT={{ ANALYTICS_SERVER_PORT }}
env LANG={{ ANALYTICS_ENV_LANG }}
env DJANGO_SETTINGS_MODULE={{ analytics_django_settings }}
chdir {{ analytics_code_dir }}
setuid {{ analytics_web_user }}
exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv anserv.wsgi
automator ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries *
# {{ ansible_managed }}
{% for name,value in analytics_env_vars.items() %}
{% if value %}
export {{ name }}="{{ value }}"
{% endif %}
{% endfor %}
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ analytics_git_identity_dest }} "$@"
---
- name: ansible-role | check if the role exists
command: test -d roles/{{ role_name }}
register: role_exists
ignore_errors: yes
- name: ansible-role | prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success
- name: ansible-role | create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory
with_items:
- tasks
- meta
- handlers
- vars
- defaults
- templates
- files
......@@ -13,5 +23,6 @@
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items:
- tasks
- meta
- defaults
- handlers
- vars
\ No newline at end of file
---
{% include 'roles/ansible-role/templates/header.j2' %}
#
# Vars for role {{ role_name }}
# Defaults for role {{ role_name }}
#
#
......
---
{% include 'roles/ansible-role/templates/header.j2' %}
#
# Role includes for role {{ role_name }}
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
---
- name: Dump all vars to json
template: src=dumpall.json.j2 dest=/tmp/ansible.all.json mode=0600
tags:
- dumpall
- debug
- name: Dump lms auth|env file
template: src=../../edxapp/templates/lms.{{item}}.json.j2 dest=/tmp/lms.{{item}}.json mode=0600
with_items:
- env
- auth
when: "'lms' in service_variants_enabled"
tags:
- dumpall
- debug
- name: Dump lms-preview auth|env file
template: src=../../edxapp/templates/lms-preview.{{item}}.json.j2 dest=/tmp/lms-preview.{{item}}.json mode=0600
with_items:
- env
- auth
when: "'lms-preview' in service_variants_enabled"
tags:
- dumpall
- debug
- name: Dump cms auth|env file
template: src=../../edxapp/templates/cms.{{item}}.json.j2 dest=/tmp/cms.{{item}}.json mode=0600
with_items:
- env
- auth
when: "'cms' in service_variants_enabled"
tags:
- dumpall
- debug
- name: Dump all vars to yaml
template: src=dumpall.yml.j2 dest=/tmp/ansible.all.yml mode=0600
tags:
- dumpall
- debug
- name: fetch remote files
# fetch is fail-safe for remote files that don't exist
# setting mode is not an option
fetch: src=/tmp/{{item}} dest=/tmp/{{ansible_hostname}}-{{item}} flat=True
with_items:
- ansible.all.json
- ansible.all.yml
- lms.env.json
- lms.auth.json
- lms-preview.env.json
- lms-preview.auth.json
- cms.env.json
- cms.auth.json
tags:
- dumpall
- debug
Module Variables ("vars"):
--------------------------------
{{ vars | to_nice_json }}
Environment Variables ("environment"):
--------------------------------
{{ environment | to_nice_json }}
GROUP NAMES Variables ("group_names"):
--------------------------------
{{ group_names | to_nice_json }}
GROUPS Variables ("groups"):
--------------------------------
{{ groups | to_nice_json }}
HOST Variables ("hostvars"):
--------------------------------
{{ hostvars | to_nice_json }}
{% if lms_env_config %}
LMS env variables:
---------------
{{ lms_env_config | to_nice_json }}
{% endif %}
{% if lms_auth_config %}
LMS auth variables:
---------------
{{ lms_auth_config | to_nice_json }}
{% endif %}
{% if lms_preview_env_config %}
Preview env variables:
---------------
{{ lms_preview_env_config | to_nice_json }}
{% endif %}
{% if lms_preview_auth_config %}
Preview auth variables:
---------------
{{ lms_preview_auth_config | to_nice_json }}
{% endif %}
{% if cms_env_config %}
CMS env variables:
---------------
{{ cms_env_config | to_nice_json }}
{% endif %}
{% if cms_auth_config %}
CMS auth variables:
---------------
{{ cms_auth_config | to_nice_json }}
{% endif %}
Module Variables ("vars"):
--------------------------------
{{ vars | to_nice_yaml }}
Environment Variables ("environment"):
--------------------------------
{{ environment | to_nice_yaml }}
GROUP NAMES Variables ("group_names"):
--------------------------------
{{ group_names | to_nice_yaml }}
GROUPS Variables ("groups"):
--------------------------------
{{ groups | to_nice_yaml }}
{% if lms_env_config %}
LMS env variables:
---------------
{{ lms_env_config | to_nice_yaml }}
{% endif %}
{% if lms_auth_config %}
LMS auth variables:
---------------
{{ lms_auth_config | to_nice_yaml }}
{% endif %}
{% if lms_preview_env_config %}
Preview env variables:
---------------
{{ lms_preview_env_config | to_nice_yaml }}
{% endif %}
{% if lms_preview_auth_config %}
Preview auth variables:
---------------
{{ lms_preview_auth_config | to_nice_yaml }}
{% endif %}
{% if cms_env_config %}
CMS env variables:
---------------
{{ cms_env_config | to_nice_yaml }}
{% endif %}
{% if cms_auth_config %}
CMS auth variables:
---------------
{{ cms_auth_config | to_nice_yaml }}
{% endif %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Vars for role automated
#
#
# vars are namespace with the module name.
#
automated_role_name: automated
automated_user: "automator"
automated_home: "/home/automator"
automated_rbash_links: !!null
automated_sudoers_template: !!null
automated_sudoers_file: !!null
#
# OS packages
#
automated_debian_pkgs: []
automated_redhat_pkgs: []
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6noLNy7YVFNK6OSOFgPbnGGovgZqLsvJxjhs82jT7tZIsYOjVVCAMk0kkSnBt0etDjGSJlJ664r1aBhubZrujzxns0oOzA7J+tWQ3CiaOBLtOSffeh8a3dTWWNPCAGg9KflPaufXdd31Bf96g9ACGZR7uLYgWUP/J0jOPMCPE1RBfRNFeZ7cHlh3t/pI+JzTcyZTka4AAEsCejBKHngYxVoOk+gfxe+Qo703st0MFuoxVAMymeBGi/1lCwKsV6r9BijzuvIFyQCl2vThjoF32yHmmP8by//hmgpo5UNqG7jbmSrCJhkdh+My3SgEebn5c2QLJepOrUfrZFwz1BQ1l task@edx.org
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role automated
#
# Overview:
#
# This role is included as a dependency by other roles which provide
# automated jobs. Automation occurs over ssh. The automator user
# is assigned to a managed rbash shell and is, potentially, allowed to run
# explicitly listed commands via sudo. Both the commands that are
# allowed via rbash and the sudoers file are provided by the
# including role.
#
# Dependencies:
#
# This role depends upon variables provided by an including role
# via the my_role/meta/main.yml file. Includes take the following forms:
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_file: 'roles/my_role/files/etc/sudoers.d/99-my_role'
# }
#
# or
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_template: 'roles/my_role/templates/etc/sudoers.d/99-my_role.j2'
# }
#
# The sudoers file is optional. Note that for sudo to work it must be
# included in the rbash links list.
#
# That list should be provided via my_role's defaults
#
# role_automated_rbash_links:
# - /usr/bin/sudo
# - /usr/bin/scp
#
- fail: automated_rbash_links required for role
when: automated_rbash_links is not defined
- fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined
- name: automated | create automated user
user:
name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes
tags:
- automated
- install
- update
- name: automated | create sudoers file from file
copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_file
tags:
- automated
- install
- update
- name: automated | create sudoers file from template
template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_template
tags:
- automated
- install
- update
#
# Prevent user from updating their PATH and
# environment.
#
- name: automated | update shell file mode
file:
path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
with_items:
- .bashrc
- .profile
- .bash_logout
- name: automated | change ~automated ownership
file:
path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
#
# This ensures that the links are updated with each run
# and that links that were remove from the role are
# removed.
#
- name: automated | remove ~automated/bin directory
file:
path={{ automated_home }}/bin state=absent
ignore_errors: yes
tags:
- automated
- install
- update
- name: automated | create ~automated/bin directory
file:
path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | re-write .profile
copy:
src=home/automator/.profile
dest={{ automated_home }}/.profile
owner="root"
group={{ automated_user }}
mode="0744"
tags:
- automated
- install
- update
- name: automated | re-write .bashrc
copy:
src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc
owner="root"
group={{ automated_user }}
mode="0744"
tags:
- automated
- install
- update
- name: automated | create .ssh directory
file:
path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | copy key to .ssh/authorized_keys
copy:
src=home/automator/.ssh/authorized_keys
dest={{ automated_home }}/.ssh/authorized_keys mode=0600
owner={{ automated_user }} group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | create allowed command links
file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link
with_items: automated_rbash_links
tags:
- automated
- install
- update
......@@ -3,10 +3,10 @@
# Overview:
#
# Creates OS accounts for users based on their github credential.
# Expects to find a list in scope named github_users with
# Expects to find a list in scope named GITHUB_USERS with
# the following structure:
#
# github_users:
# GITHUB_USERS:
# - user: me_at_github
# groups:
# - adm
......@@ -20,7 +20,7 @@
name={{ item.user }}
groups={{ ",".join(item.groups) }}
shell=/bin/bash
with_items: github_users
with_items: GITHUB_USERS
tags:
- users
- update
......@@ -29,7 +29,7 @@
file:
path=/home/{{ item.user }}/.ssh state=directory mode=0700
owner={{ item.user }} group={{ item.user }}
with_items: github_users
with_items: GITHUB_USERS
tags:
- users
- update
......@@ -39,7 +39,7 @@
url=https://github.com/{{ item.user }}.keys
dest=/home/{{ item.user }}/.ssh/authorized_keys mode=0600
owner={{ item.user }} group={{ item.user }}
with_items: github_users
with_items: GITHUB_USERS
tags:
- users
- update
......@@ -36,8 +36,8 @@
- name: common | Creating env users
user: name={{ item.user }} {% if item.groups %}groups={{ ",".join(item.groups) }}{% endif %} shell=/bin/bash
with_items: env_users
when: env_users is defined
with_items: ENV_USERS
when: ENV_USERS is defined
tags:
- users
- update
......
......@@ -11,13 +11,19 @@
- install
- name: common | pip install virtualenv
pip: name=virtualenv state=present
pip: >
name=virtualenv
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- venv_base
- install
- name: common | pip install virtualenvwrapper
pip: name=virtualenvwrapper state=present
pip: >
name=virtualenvwrapper
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- venv_base
- install
......@@ -35,7 +41,11 @@
- install
- name: common | pip install gunicorn
pip: name=gunicorn virtualenv="{{venv_dir}}" state=present
pip: >
name=gunicorn
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- gunicorn
- install
---
- include: create_users.yml
- include: create_github_users.yml
when: github_users is defined
when: GITHUB_USERS is defined
- name: common | Add user www-data
# This user should be created on the system by default
......@@ -10,6 +10,14 @@
- pre_install
- update
- name: common | Create the base directory for storage
file: >
path={{ storage_base_dir }}
state=directory
owner=root
group=root
mode=0755
- name: common | Create application root
# In the future consider making group edx r/t adm
file: path={{ app_base_dir }} state=directory owner=root group=adm mode=2775
......@@ -43,13 +51,13 @@
- update
- name: common | Create log directory
file: path=$log_base_dir state=directory mode=2770 group=adm owner=syslog
file: path={{log_base_dir}} state=directory mode=2755 group=adm owner=syslog
tags:
- pre_install
- update
- name: common | Create alias from app_base_dir to the log_base_dir
file: state=link src=$log_base_dir path=$app_base_dir/log
file: state=link src={{log_base_dir}} path={{app_base_dir}}/log
tags:
- pre_install
- logging
......
---
devpi_venv_dir: "{{ app_base_dir }}/devpi/venvs/devpi"
devpi_pip_pkgs:
- devpi-server
- eventlet
devpi_nginx_port: 80
devpi_port: 4040
devpi_data_dir: /var/devpi/data
devpi_user: devpi
devpi_group: devpi
devpi_server_name: 'pypy.*'
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Defaults for role devpi
#
---
- name: devpi | restart devpi
supervisorctl: >
state=restarted
config={{ supervisor_cfg }}
name=devpi-server
- name: devpi | start devpi
supervisorctl: >
state=started
config={{ supervisor_cfg }}
name=devpi-server
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role devpi
#
# Overview:
# Creates a pypi caching server
#
# Dependencies:
# - common
# - nginx
# - supervisor
#
# Example play:
# roles:
# - common
# - role: nginx
# nginx_sites:
# - devpi
# - role: supervisor
# supervisor_servers:
# - devpi
# - devpi
---
- name: devpi | create devpi user
user: >
name={{ devpi_user }}
state=present
- name: devpi | create virtualenv directory
file: >
path={{ devpi_venv_dir }}
state=directory
owner={{ devpi_user }}
group={{ devpi_group }}
notify: devpi | restart devpi
- name: devpi | create the devpi data directory
file: >
path={{ devpi_data_dir }}
state=directory
owner={{ devpi_user }}
group={{ devpi_group }}
- name: devpi | install devpi pip pkgs
pip: >
name={{ item }}
state=present
virtualenv={{ devpi_venv_dir }}
with_items: devpi_pip_pkgs
notify: devpi | restart devpi
- name: supervisor | ensure supervisor is started
service: name=supervisor state=started
- name: devpi | ensure devpi is running
supervisorctl: >
state=started
config={{ supervisor_cfg }}
name=devpi-server
DISCERN_NGINX_PORT: 18070
DISCERN_BASIC_AUTH: False
discern_source_repo: https://github.com/edx/discern.git
ease_source_repo: https://github.com/edx/ease.git
ease_dir: $app_base_dir/ease
......@@ -6,7 +9,7 @@ discern_settings: discern.aws
nltk_data_dir: /usr/share/nltk_data
ease_branch: master
discern_branch: dev
nginx_listen_port: 80
gunicorn_port: 7999
discern_gunicorn_port: 8070
discern_gunicorn_host: 127.0.0.1
discern_user: discern
site_name: discern
......@@ -4,6 +4,3 @@
- name: discern | restart celery
service: name=celery state=restarted
- name: discern | restart nginx
service: name=nginx state=restarted
......@@ -109,20 +109,3 @@
#Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern
shell: ${venv_dir}/bin/python {{discern_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_dir}}
- name: discern | create nginx directory and set perms
file: path=/etc/nginx/sites-available owner=root group=edx mode=2775 state=directory
#Install nginx sites available
#remove default link, render template, link template to sites-enabled to activate
- name: discern | Removing default nginx config
file: path=/etc/nginx/sites-enabled/default state=absent
notify: discern | restart nginx
- name: discern | render nginx sites available
template: src=nginx-discern.j2 dest=/etc/nginx/sites-available/{{ site_name }}
notify: discern | restart nginx
- name: discern | Creating nginx config link {{ site_name }}
file: src=/etc/nginx/sites-available/{{ site_name }} dest=/etc/nginx/sites-enabled/{{ site_name }} state=link owner=root group=root
notify: discern | restart nginx
......@@ -11,11 +11,12 @@ respawn limit 3 30
env PID=/var/run/gunicorn/discern.pid
env WORKERS={{ ansible_processor_cores * 2 }}
env PORT={{ gunicorn_port }}
env PORT={{ discern_gunicorn_port }}
env ADDRESS={{ discern_gunicorn_host }}
env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE={{discern_settings}}
chdir {{discern_dir}}
setuid {{discern_user}}
exec {{venv_dir}}/bin/gunicorn --preload -b 127.0.0.1:$PORT -w $WORKERS --timeout=30 --pythonpath={{discern_dir}} discern.wsgi
\ No newline at end of file
exec {{venv_dir}}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=30 --pythonpath={{discern_dir}} discern.wsgi
......@@ -3,7 +3,6 @@
service: name=edxapp state=started
tags:
- lms
- lms-xml
- lms-preview
- cms
- deploy
......@@ -12,7 +11,6 @@
service: name=edxapp state=stopped
tags:
- lms
- lms-xml
- lms-preview
- cms
- deploy
......@@ -21,7 +19,6 @@
service: name=edxapp state=restarted
tags:
- lms
- lms-xml
- lms-preview
- cms
- deploy
# requires:
# - group_vars/all
# - common/tasks/main.yml
# - nginx/tasks/main.yml
---
- name: create cms application config
template: src=cms.env.json.j2 dest=$app_base_dir/cms.env.json mode=640 owner=www-data group=adm
......@@ -9,6 +8,7 @@
- cms-env
- cms
- update
- deploy
- name: create cms auth file
template: src=cms.auth.json.j2 dest=$app_base_dir/cms.auth.json mode=640 owner=www-data group=adm
......@@ -16,20 +16,16 @@
- cms-env
- cms
- update
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=cms
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=cms-backend
when: celery_worker is not defined
- deploy
- name: Create CMS log target directory
file: path={{log_base_dir}}/cms state=directory owner=syslog group=adm mode=2770
file: path={{log_base_dir}}/cms state=directory owner=syslog group=syslog mode=2750
tags:
- cms
- cms-env
- logging
- update
- deploy
# Creates CMS upstart file
- include: upstart.yml basename=cms
......
......@@ -4,7 +4,6 @@
when: celery_worker is not defined
tags:
- lms
- lms-xml
- lms-preview
- cms
- deploy
......@@ -15,9 +14,20 @@
tags:
- deploy
# update json configs for the application
- include: lms.yml
when: "'lms' in service_variants_enabled"
- include: cms.yml
when: "'cms' in service_variants_enabled"
- include: lms-preview.yml
when: "'lms-preview' in service_variants_enabled"
# Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edx_platform_code_dir}}
git: dest={{edx_platform_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}}
register: edx_platform_checkout
tags:
- lms
- cms
......@@ -26,6 +36,7 @@
- name: git clean after checking out edx-platform
shell: cd {{edx_platform_code_dir}} && git clean -xdf
when: edx_platform_checkout.changed
tags:
- lms
- cms
......@@ -46,7 +57,6 @@
when: edxapp_theme_name != ''
tags:
- cms
- lms-xml
- lms-preview
- lms
- update
......@@ -60,6 +70,7 @@
file: path={{edx_platform_code_dir}} state=directory owner=www-data group=www-data recurse=yes
# Post Checkout tasks will get run as handlers when the {{ edx_platform_code_dir }} is ready.
# Look at the handlers/main.yml in this role for a description of the tasks stated below.
when: edx_platform_checkout.changed
tags:
- lms
- cms
......@@ -68,14 +79,18 @@
# Ruby plays that need to be run after platform updates.
- name: gem | gem install bundler
shell: RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ rbenv_root }}/shims/gem install bundle chdir={{ edx_platform_code_dir }}
shell: >
RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ rbenv_root }}/shims/gem install bundle
chdir={{ edx_platform_code_dir }}
tags:
- ruby
- deploy
- install
- name: bundle | bundle install
shell: RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ gem_home }}/bin/bundle install --binstubs chdir={{ edx_platform_code_dir }}
shell: >
RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ gem_home }}/bin/bundle install --binstubs
chdir={{ edx_platform_code_dir }}
tags:
- ruby
- deploy
......@@ -91,9 +106,33 @@
# Python plays that need to be run after platform updates.
# Substitute github mirror in all requirements files
#
- name: Updating requirement files for git mirror
command: |
/bin/sed -i -e 's/github\.com/{{ GIT_MIRROR }}/g' {{ item }}
with_items:
- "{{ pre_requirements_file }}"
- "{{ post_requirements_file }}"
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}"
- "{{ local_requirements_file }}"
- "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}"
tags:
- lms
- cms
- install
- deploy
# Install the python pre requirements into {{ venv_dir }}
- name : install python pre-requirements
pip: requirements="{{pre_requirements_file}}" virtualenv="{{venv_dir}}" state=present
pip: >
requirements="{{pre_requirements_file}}"
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- lms
- cms
......@@ -105,7 +144,7 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install --exists-action w --use-mirrors -r {{ base_requirements_file }}
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install -i {{ PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ base_requirements_file }}
tags:
- lms
- cms
......@@ -114,7 +153,11 @@
# Install the python post requirements into {{ venv_dir }}
- name : install python post-requirements
pip: requirements="{{post_requirements_file}}" virtualenv="{{venv_dir}}" state=present
pip: >
requirements="{{post_requirements_file}}"
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags:
- lms
- cms
......@@ -126,7 +169,7 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install --exists-action w --use-mirrors -r {{ item }}
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install -i {{ PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
with_items:
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}"
......@@ -143,7 +186,7 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install --exists-action w --use-mirrors -r {{ item }}
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install -i {{ PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
with_items:
- "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}"
......@@ -158,36 +201,6 @@
- name: changing group ownership to www-data for everything in the venv (workaround)
shell: chgrp -R www-data {{ venv_dir }}
# Gather lms assets using rake if possible
- name: gather lms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ lms_variant }} rake lms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- lms
- lms-preview
- lms-xml
- deploy
# Gather cms assets using rake if possible
- name: gather cms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ cms_variant }} rake cms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- cms
- deploy
# https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726
# This is necessary for when syncdb is run and the django_openid_auth module is installed,
# not sure if this fix will ever get merged
......@@ -198,7 +211,6 @@
tags:
- deploy
- lms
- lms-xml
- lms-preview
- cms
- syncdb
......@@ -209,7 +221,6 @@
tags:
- deploy
- lms
- lms-xml
- lms-preview
- cms
- syncdb
......@@ -220,17 +231,44 @@
tags:
- deploy
- lms
- lms-xml
- lms-preview
- cms
- syncdb
- migrate
# Gather lms assets using rake if possible
- name: gather lms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ lms_variant }} rake lms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- lms
- lms-preview
- deploy
# Gather cms assets using rake if possible
- name: gather cms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ cms_variant }} rake cms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- cms
- deploy
- name: restart edxapp
service: name=edxapp state=restarted
when: celery_worker is not defined
tags:
- lms
- lms-xml
- lms-preview
- cms
- deploy
......
# requires:
# - group_vars/all
# - common/tasks/main.yml
# - nginx/tasks/main.yml
---
- name: create lms application config
template: src=lms-preview.env.json.j2 dest=$app_base_dir/lms-preview.env.json mode=640 owner=www-data group=adm
tags:
- lms-preview
- lms-preview-env
- deploy
- name: create lms auth file
template: src=lms-preview.auth.json.j2 dest=$app_base_dir/lms-preview.auth.json mode=640 owner=www-data group=adm
tags:
- lms-preview
- lms-preview-env
- deploy
- name: Create lms-preview log target directory
file: path={{log_base_dir}}/lms-preview state=directory owner=syslog group=adm mode=2770
file: path={{log_base_dir}}/lms-preview state=directory owner=syslog group=syslog mode=2750
tags:
- lms-preview
- lms-preview-env
- logging
- update
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-preview
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-preview-backend
when: celery_worker is not defined
- deploy
# Creates LMS Preview upstart file
- include: upstart.yml basename=lms-preview
# requires:
# - group_vars/all
# - common/tasks/main.yml
# - nginx/tasks/main.yml
---
- name: create lms-xml application config
template: src=lms-xml.env.json.j2 dest=$app_base_dir/lms-xml.env.json mode=640 owner=www-data group=adm
tags:
- lms-xml-env
- lmx-xml
- update
- name: create lms-xml auth file
template: src=lms-xml.auth.json.j2 dest=$app_base_dir/lms-xml.auth.json mode=640 owner=www-data group=adm
tags:
- lms-xml-env
- lmx-xml
- update
- name: Create lms-xml log target directory
file: path={{log_base_dir}}/lms-xml state=directory owner=syslog group=adm mode=2770
tags:
- lms-xml
- lms-xml-env
- logging
- update
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-xml
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-xml-backend
when: celery_worker is not defined
# Creates upstart file
- include: upstart.yml basename=lms-xml
when: celery_worker is not defined
- include: upstart.yml basename=edx-worker-lms-xml
when: celery_worker is defined
......@@ -5,6 +5,7 @@
- lms
- lms-env
- update
- deploy
- name: create lms auth file
template: src=lms.auth.json.j2 dest=$app_base_dir/lms.auth.json mode=640 owner=www-data group=adm
......@@ -12,20 +13,16 @@
- lms
- lms-env
- update
- deploy
- name: Create lms log target directory
file: path={{log_base_dir}}/lms state=directory owner=syslog group=adm mode=2770
file: path={{log_base_dir}}/lms state=directory owner=syslog group=syslog mode=2750
tags:
- lms
- lms-env
- logging
- update
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-backend
when: celery_worker is not defined
- deploy
# Creates LMS upstart file
- include: upstart.yml basename=lms
......
# requires:
# - group_vars/all
# - common/tasks/main.yml
# - nginx/tasks/main.yml
---
- name: Change permissions on datadir
file: path={{ app_base_dir }}/data state=directory owner=www-data group=www-data
......@@ -35,16 +34,6 @@
- cms
- install
- include: lms.yml
when: "'lms' in service_variants_enabled"
- include: lms-xml.yml
when: "'lms-xml' in service_variants_enabled"
- include: cms.yml
when: "'cms' in service_variants_enabled"
- include: lms-preview.yml
when: "'lms-preview' in service_variants_enabled"
- name: creating edxapp upstart script
sudo: True
template: src=edxapp.conf.j2 dest=/etc/init/edxapp.conf owner=root group=root
......
......@@ -6,3 +6,4 @@
- upstart
- gunicorn
- update
- deploy
......@@ -16,8 +16,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.cms }}
{% else %}
env WORKERS={{ worker_core_mult.cms }}
{% endif %}
env PORT={{edxapp_cms_app_port}}
env ADDRESS={{edxapp_cms_app_address}}
env PORT={{edxapp_cms_gunicorn_port}}
env ADDRESS={{edxapp_cms_gunicorn_host}}
env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=cms.envs.aws
env SERVICE_VARIANT="cms"
......
# gunicorn
# Templated and placed by ansible from jinja2 source
# lms-xml Celery Worker Upstart Script
description "cms celery worker"
stop on stopping edx-workers
respawn
instance edx.${SERVICE_VARIANT}.core.${QUEUE}
#env NEW_RELIC_CONFIG_FILE=/opt/wwc/newrelic.ini
#env NEWRELIC={{venv_dir}}/bin/newrelic-admin
env CONCURRENCY=${CONCURRENCY}
env LOGLEVEL=info
env DJANGO_SETTINGS_MODULE={{worker_django_settings_module}}
env PYTHONPATH={{edx_platform_code_dir}}
env SERVICE_VARIANT=${SERVICE_VARIANT}
setuid www-data
chdir {{edx_platform_code_dir}}
exec {{venv_dir}}/bin/python {{edx_platform_code_dir}}/manage.py lms --service-variant=$SERVICE_VARIANT --settings=$DJANGO_SETTINGS_MODULE celery worker --loglevel=$LOGLEVEL --queues=edx.${SERVICE_VARIANT}.core.${QUEUE} --hostname=edx.${SERVICE_VARIANT}.core.${QUEUE}.`hostname` --concurrency=$CONCURRENCY
......@@ -20,10 +20,4 @@ pre-start script
start edx-worker-lms QUEUE=high CONCURRENCY=4 SERVICE_VARIANT=lms
{% endif %}
{% if 'lms-xml' in service_variants_enabled %}
start edx-worker-lms-xml QUEUE=low CONCURRENCY=1 SERVICE_VARIANT=lms-xml
start edx-worker-lms-xml QUEUE=default CONCURRENCY=3 SERVICE_VARIANT=lms-xml
start edx-worker-lms-xml QUEUE=high CONCURRENCY=4 SERVICE_VARIANT=lms-xml
{% endif %}
end script
......@@ -11,12 +11,6 @@ stop on runlevel [!2345]
##
pre-start script
{% if 'lms-xml' in service_variants_enabled %}
if [ -e /etc/init/lms-xml.conf ]; then
start wait-for-state WAIT_FOR=lms-xml WAITER=$UPSTART_JOB
fi
{% endif %}
{% if 'lms' in service_variants_enabled %}
if [ -e /etc/init/lms.conf ]; then
start wait-for-state WAIT_FOR=lms WAITER=$UPSTART_JOB
......@@ -51,12 +45,6 @@ end script
pre-stop script
{% if 'lms-xml' in service_variants_enabled %}
if [ -e /etc/init/lms-xml.conf ]; then
start wait-for-state WAIT_FOR=lms-xml WAITER=$UPSTART_JOB TARGET_GOAL="stop"
fi
{% endif %}
{% if 'lms' in service_variants_enabled %}
if [ -e /etc/init/lms.conf ]; then
start wait-for-state WAIT_FOR=lms WAITER=$UPSTART_JOB TARGET_GOAL="stop"
......
......@@ -17,8 +17,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.lms_preview }}
{% else %}
env WORKERS={{ worker_core_mult.lms_preview }}
{% endif %}
env PORT={{edxapp_lms_preview_app_port}}
env ADDRESS={{edxapp_lms_preview_app_address}}
env PORT={{edxapp_lms_preview_gunicorn_port}}
env ADDRESS={{edxapp_lms_preview_gunicorn_host}}
env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=lms.envs.aws
env SERVICE_VARIANT="lms-preview"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment