Commit 6f83b1ba by Joe Blaylock

Merge remote-tracking branch 'origin/release' into edx-west/release

Conflicts:
	playbooks/roles/edxapp/defaults/main.yml
parents f68a2efd 6d5c5677
John Jarvis <jarv@edx.org> John Jarvis <jarv@edx.org>
Sef Kloninger <sef@kloninger.com> Sef Kloninger <sef@kloninger.com>
Joe Blaylock <jrbl@jrbl.org> Joe Blaylock <jrbl@stanford.edu>
Vik Paruchuri <vik@edx.org> Vik Paruchuri <vik@edx.org>
Jason Bau <jbau@stanford.edu> Jason Bau <jbau@stanford.edu>
Ed Zarecor <ed@edx.org> Ed Zarecor <ed@edx.org>
John Kern <kern3020@gmail.com> John Kern <kern3020@gmail.com>
Will Daly <will@edx.org> Will Daly <will@edx.org>
Bethany LaPenta <lapentab@mit.edu> Bethany LaPenta <lapentab@mit.edu>
Jay Zoldak <zoldak@edx.org> Jay Zoldak <jzoldak@edx.org>
Philippe Chiu <philippe.chiu@gmail.com>
Marko Seric <marko.seric@math.uzh.ch>
Feanil Patel <feanil@edx.org>
Calen Pennington <calen.pennington@gmail.com>
David Baumgold <david@davidbaumgold.com>
Kevin Luo <kevluo@edx.org>
Carson Gee <x@carsongee.com>
Xavier Antoviaque <xavier@antoviaque.org>
James Tauber <jtauber@jtauber.com>
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
**This project is currently in alpha** **This project is currently in alpha**
The goal of the edx/configuration project is to provide a simple, but The goal of the edx/configuration project is to provide a simple, but
flexible, way for anyone to stand up an instance of the edX platform flexible, way for anyone to stand up an instance of Open edX that is
that is fully configured and ready-to-go. fully configured and ready-to-go.
Building the platform takes place to two phases: Building the platform takes place to two phases:
......
Jinja2==2.6
PyYAML==3.10
ansible==1.3.1
argparse==1.2.1
boto==2.10.0
paramiko==1.10.1
pycrypto==2.6
wsgiref==0.1.2
...@@ -1602,6 +1602,12 @@ ...@@ -1602,6 +1602,12 @@
}, },
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"9997",
"ToPort":"9997",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"10016", "FromPort":"10016",
"ToPort":"10016", "ToPort":"10016",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
...@@ -1636,6 +1642,12 @@ ...@@ -1636,6 +1642,12 @@
}, },
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"9997",
"ToPort":"9997",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"9418", "FromPort":"9418",
"ToPort":"9418", "ToPort":"9418",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
......
...@@ -6,3 +6,4 @@ ...@@ -6,3 +6,4 @@
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
hash_behaviour=merge hash_behaviour=merge
host_key_checking = False
# Ansible EC2 external inventory script settings
#
[ec2] [ec2]
regions=all
destination_variable=public_dns_name # to talk to a private eucalyptus instance uncomment these lines
vpc_destination_variable=private_ip_address # and edit edit eucalyptus_host to be the host name of your cloud controller
cache_path=/tmp #eucalyptus = True
cache_max_age=300 #eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all
regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from with EC2.
vpc_destination_variable = private_ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-seperated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 300
#!/usr/bin/env python #!/usr/bin/env python
import sys
import os
''' '''
EC2 external inventory script EC2 external inventory script
================================= =================================
...@@ -118,6 +115,8 @@ import re ...@@ -118,6 +115,8 @@ import re
from time import time from time import time
import boto import boto
from boto import ec2 from boto import ec2
from boto import rds
from boto import route53
import ConfigParser import ConfigParser
try: try:
...@@ -191,11 +190,13 @@ class Ec2Inventory(object): ...@@ -191,11 +190,13 @@ class Ec2Inventory(object):
# Regions # Regions
self.regions = [] self.regions = []
configRegions = config.get('ec2', 'regions') configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'): if (configRegions == 'all'):
if self.eucalyptus_host: if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else: else:
for regionInfo in ec2.regions(): for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name) self.regions.append(regionInfo.name)
else: else:
self.regions = configRegions.split(",") self.regions = configRegions.split(",")
...@@ -204,6 +205,13 @@ class Ec2Inventory(object): ...@@ -204,6 +205,13 @@ class Ec2Inventory(object):
self.destination_variable = config.get('ec2', 'destination_variable') self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related # Cache related
cache_path = config.get('ec2', 'cache_path') cache_path = config.get('ec2', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-ec2.cache" self.cache_path_cache = cache_path + "/ansible-ec2.cache"
...@@ -232,8 +240,12 @@ class Ec2Inventory(object): ...@@ -232,8 +240,12 @@ class Ec2Inventory(object):
def do_api_calls_update_cache(self): def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files ''' ''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions: for region in self.regions:
self.get_instances_by_region(region) self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index) self.write_to_cache(self.index, self.cache_path_index)
...@@ -250,6 +262,11 @@ class Ec2Inventory(object): ...@@ -250,6 +262,11 @@ class Ec2Inventory(object):
else: else:
conn = ec2.connect_to_region(region) conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances() reservations = conn.get_all_instances()
for reservation in reservations: for reservation in reservations:
for instance in reservation.instances: for instance in reservation.instances:
...@@ -261,6 +278,20 @@ class Ec2Inventory(object): ...@@ -261,6 +278,20 @@ class Ec2Inventory(object):
print e print e
sys.exit(1) sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id): def get_instance(self, region, instance_id):
''' Gets details about a specific instance ''' ''' Gets details about a specific instance '''
...@@ -270,6 +301,11 @@ class Ec2Inventory(object): ...@@ -270,6 +301,11 @@ class Ec2Inventory(object):
else: else:
conn = ec2.connect_to_region(region) conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id]) reservations = conn.get_all_instances([instance_id])
for reservation in reservations: for reservation in reservations:
for instance in reservation.instances: for instance in reservation.instances:
...@@ -328,6 +364,111 @@ class Ec2Inventory(object): ...@@ -328,6 +364,111 @@ class Ec2Inventory(object):
key = self.to_safe("tag_" + k + "=" + v) key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest) self.push(self.inventory, key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances
if instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info(self): def get_host_info(self):
''' Get variables about a specific host ''' ''' Get variables about a specific host '''
...@@ -387,7 +528,7 @@ class Ec2Inventory(object): ...@@ -387,7 +528,7 @@ class Ec2Inventory(object):
the dict ''' the dict '''
if key in my_dict: if key in my_dict:
my_dict[key].append(element) my_dict[key].append(element);
else: else:
my_dict[key] = [element] my_dict[key] = [element]
...@@ -438,4 +579,3 @@ class Ec2Inventory(object): ...@@ -438,4 +579,3 @@ class Ec2Inventory(object):
# Run the script # Run the script
Ec2Inventory() Ec2Inventory()
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
roles:
- edxapp
- name: Deploy forum
hosts: all
sudo: True
gather_facts: True
roles:
- forum
- name: Deploy ora
hosts: all
sudo: True
gather_facts: True
roles:
- ora
- name: Deploy xqueue
hosts: all
sudo: True
gather_facts: True
roles:
- xqueue
- name: Deploy xserver
hosts: all
sudo: True
gather_facts: True
roles:
- xserver
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
serial: 10
vars:
migrate_db: "yes"
openid_workaround: True
roles:
- ansible_debug
- common
- role: nginx
nginx_sites:
- cms
- lms
- lms-preview
- ora
- xqueue
- xserver
#- discern
- edxlocal
- edxapp
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk
- elasticsearch
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_user_home: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
- role: virtualenv
virtualenv_user: "{{ xqueue_user }}"
virtualenv_user_home: "{{ xqueue_user_home }}"
virtualenv_name: "{{ xqueue_user }}"
- { role: "xqueue", update_users: True }
- xserver
- ora
#- discern
--- ---
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_bastion
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_edxapp - hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_edxapp
sudo: True sudo: True
vars_files: vars_files:
...@@ -8,10 +16,14 @@ ...@@ -8,10 +16,14 @@
roles: roles:
- common - common
- datadog - datadog
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp' - role: 'edxapp'
lms_nginx_port: 80 EDXAPP_LMS_NGINX_PORT: 80
cms_nginx_port: 80 EDXAPP_CMS_NGINX_PORT: 80
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'sarina/install-datadog' edx_platform_commit: 'sarina/install-datadog'
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker - hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker
...@@ -23,7 +35,11 @@ ...@@ -23,7 +35,11 @@
roles: roles:
- common - common
- datadog - datadog
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
......
...@@ -8,12 +8,15 @@ ...@@ -8,12 +8,15 @@
roles: roles:
- common - common
- datadog - datadog
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp' - role: 'edxapp'
lms_nginx_port: 80
cms_nginx_port: 80
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'master' edx_platform_commit: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_worker - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_worker
sudo: True sudo: True
vars_files: vars_files:
...@@ -23,11 +26,16 @@ ...@@ -23,11 +26,16 @@
roles: roles:
- common - common
- datadog - datadog
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
edx_platform_commit: 'master' edx_platform_commit: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xserver - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xserver
sudo: True sudo: True
vars_files: vars_files:
...@@ -35,8 +43,11 @@ ...@@ -35,8 +43,11 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- xserver - xserver
- xserver
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_rabbitmq - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_rabbitmq
serial: 1 serial: 1
sudo: True sudo: True
...@@ -46,6 +57,7 @@ ...@@ -46,6 +57,7 @@
roles: roles:
- common - common
- rabbitmq - rabbitmq
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xqueue - hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_group_xqueue
sudo: True sudo: True
vars_files: vars_files:
...@@ -53,5 +65,8 @@ ...@@ -53,5 +65,8 @@
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- xqueue
- xqueue - xqueue
- splunkforwarder
# ansible-playbook --limit tag_Name_mirror edx_mirror.yml --user ubuntu -i ec2.py
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: False
roles:
- common
- role: nginx
nginx_sites:
- devpi
- gh_mirror
tags: ['r_nginx']
- role: supervisor
supervisor_servers:
- devpi
- role: devpi
tags: ['r_devpi']
- role: gh_mirror
tags: ['r_gh_mirror']
- name: Create ec2 instance
hosts: localhost
connection: local
gather_facts: False
pre_tasks:
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}"
security_group: "{{ security_group }}"
ami_image: "{{ ami }}"
region: "{{ region }}"
instance_tags: "{{ instance_tags }}"
root_ebs_size: "{{ root_ebs_size }}"
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
terminate_instance: true
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
roles:
# gh_users hash must be passed
# in as a -e variable
- gh_users
...@@ -13,8 +13,13 @@ ...@@ -13,8 +13,13 @@
mysql5_workaround: True mysql5_workaround: True
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal - edxlocal
- mongo
- edxapp - edxapp
- rabbitmq - rabbitmq
- oraclejdk - oraclejdk
......
../inventory.ini
\ No newline at end of file
../library
\ No newline at end of file
...@@ -32,7 +32,12 @@ ...@@ -32,7 +32,12 @@
- "{{ secure_dir }}/vars/edxapp_prod_users.yml" - "{{ secure_dir }}/vars/edxapp_prod_users.yml"
roles: roles:
- common - common
- nginx - role: nginx
- {'role': 'edxapp', 'openid_workaround': true} nginx_conf: true
nginx_sites:
- lms
- cms
- lms-preview
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'}
# run this role last # run this role last
# - in_production # - in_production
[ec2] [ec2]
regions=us-west-1 regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name destination_variable=public_dns_name
vpc_destination_variable=private_dns_name vpc_destination_variable=private_dns_name
cache_path=/tmp cache_path=/tmp
cache_max_age=300 cache_max_age=300
route53=False
...@@ -15,7 +15,11 @@ ...@@ -15,7 +15,11 @@
state: 'absent' state: 'absent'
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp - edxapp
- ruby - ruby
post_tasks: post_tasks:
......
...@@ -26,7 +26,11 @@ ...@@ -26,7 +26,11 @@
- "{{ secure_dir }}/vars/shib_prod_vars.yml" - "{{ secure_dir }}/vars/shib_prod_vars.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp - edxapp
- apache - apache
- shibboleth - shibboleth
......
# this gets all running prod webservers # this gets all running prod webservers
#- hosts: tag_environment_prod:&tag_function_ora #- hosts: tag_environment_prod:&tag_function_ora
# or we can get subsets of them by name # or we can get subsets of them by name
- hosts: ~tag_Name_ora(10|11)_prod #- hosts: ~tag_Name_ora(10|11)_prod
- hosts: ~tag_Name_ora10_prod
#- hosts: ~tag_Name_ora11_prod
#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB #- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB
sudo: True sudo: True
vars: vars:
...@@ -16,5 +18,7 @@ ...@@ -16,5 +18,7 @@
- "{{ secure_dir }}/vars/edxapp_prod_users.yml" - "{{ secure_dir }}/vars/edxapp_prod_users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- ora
- ora - ora
...@@ -15,5 +15,7 @@ ...@@ -15,5 +15,7 @@
- "{{ secure_dir }}/vars/edxapp_prod_users.yml" - "{{ secure_dir }}/vars/edxapp_prod_users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- xqueue
- xqueue - xqueue
...@@ -14,10 +14,14 @@ ...@@ -14,10 +14,14 @@
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml" - "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml" - "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/edxapp_stage_users.yml" - "{{ secure_dir }}/vars/edxapp_stage_users.yml"
#- "{{ secure_dir }}/vars/shib_stage_vars.yml" - "{{ secure_dir }}/vars/shib_stage_vars.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp - edxapp
#- apache - apache
#- shibboleth - shibboleth
- hosts: localhost
#- hosts: tag_Name_app1_stage
vars:
migrate_db: "no"
not_prod: true
secure_dir: ../../../edx-secret/ansible
local_dir: ../../../edx-secret/ansible/local
vars_files:
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/edxapp_stage_users.yml"
#- "{{ secure_dir }}/vars/shib_stage_vars.yml"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- ansible_debug
#- apache
#- shibboleth
# run the notifier on the first util machine only
- hosts: ~tag_Name_util10_stage
sudo: True
vars:
secure_dir: '../../../configuration-secure/ansible'
migrate_db: "no"
vars_files:
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/notifier_stage_vars.yml"
roles:
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
- "{{ secure_dir }}/vars/edxapp_stage_users.yml" - "{{ secure_dir }}/vars/edxapp_stage_users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- ora
- ora - ora
# this gets all running stage util machiens # this gets all running stage util machiens
- hosts: tag_environment_stage:&tag_function_util - hosts: tag_environment_stage:&tag_function_util
# or we can get subsets of them by name # or we can get subsets of them by name
#- hosts: ~tag_Name_util(1|2)_stage #- hosts: ~tag_Name_util(1|2)_stage
......
...@@ -10,7 +10,9 @@ ...@@ -10,7 +10,9 @@
- "{{ secure_dir }}/vars/edxapp_stage_users.yml" - "{{ secure_dir }}/vars/edxapp_stage_users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- xqueue
- xqueue - xqueue
#- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_edxapp #- hosts: tag_aws_cloudformation_stack-name_feanilpractice:&tag_group_edxapp
......
...@@ -14,8 +14,16 @@ ...@@ -14,8 +14,16 @@
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml" - "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xqueue
- xserver
- ora
- edxlocal - edxlocal
- mongo
- edxapp - edxapp
- xqueue - xqueue
- xserver - xserver
......
...@@ -26,8 +26,15 @@ ...@@ -26,8 +26,15 @@
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml" - "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xserver
- xqueue
- edxlocal - edxlocal
- mongo
- edxapp - edxapp
- xqueue - xqueue
- xserver - xserver
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# lms listening on port 80 - example.com # lms listening on port 80 - example.com
# lms-preview listening on port 80 - preview.example.com # lms-preview listening on port 80 - preview.example.com
# #
# ansible-playbook -c local --limit "localhost:127.0.0.1" path/to/configuration/playbooks/edx_sandbox.yml -i "localhost," -e "cms_nginx_port=80 lms_preview_nginx_port=80 c_lms_base=example.com c_preview_lms_base=preview.example.com" # ansible-playbook -c local --limit "localhost:127.0.0.1" path/to/configuration/playbooks/edx_sandbox.yml -i "localhost," -e "EDXAPP_CMS_NGINX_PORT=80 EDXAPP_LMS_PREVIEW_NGINX_PORT=80 EDXAPP_LMS_BASE=example.com EDXAPP_PREVIEW_LMS_BASE=preview.example.com"
# #
- name: Configure instance(s) - name: Configure instance(s)
hosts: localhost hosts: localhost
...@@ -23,8 +23,14 @@ ...@@ -23,8 +23,14 @@
openid_workaround: True openid_workaround: True
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- ora
- edxlocal - edxlocal
- mongo
- edxapp - edxapp
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True } - { role: 'edxapp', celery_worker: True }
......
...@@ -5,7 +5,11 @@ ...@@ -5,7 +5,11 @@
- "{{ secure_dir }}/vars/edxapp_ref_users.yml" - "{{ secure_dir }}/vars/edxapp_ref_users.yml"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- gunicorn - gunicorn
- edxapp - edxapp
- ruby - ruby
......
...@@ -15,7 +15,11 @@ ...@@ -15,7 +15,11 @@
state: 'absent' state: 'absent'
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp - edxapp
- ruby - ruby
post_tasks: post_tasks:
......
...@@ -2,11 +2,15 @@ ...@@ -2,11 +2,15 @@
# This should only have variables # This should only have variables
# that are applicable to all edX roles # that are applicable to all edX roles
storage_base_dir: /mnt
app_base_dir: /opt/wwc app_base_dir: /opt/wwc
log_base_dir: /mnt/logs log_base_dir: "{{ storage_base_dir }}/logs"
venv_dir: /opt/edx venv_dir: /opt/edx
os_name: ubuntu os_name: ubuntu
ENV_NAME: 'default_env'
ENV_TYPE: 'default_type'
# these pathes are relative to the playbook dir # these pathes are relative to the playbook dir
# directory for secret settings (keys, etc) # directory for secret settings (keys, etc)
secure_dir: 'secure_example' secure_dir: 'secure_example'
...@@ -14,3 +18,7 @@ secure_dir: 'secure_example' ...@@ -14,3 +18,7 @@ secure_dir: 'secure_example'
# this indicates the path to site-specific (with precedence) # this indicates the path to site-specific (with precedence)
# things like nginx template files # things like nginx template files
local_dir: '../../ansible_local' local_dir: '../../ansible_local'
# include http/https
PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
# do not include http/https
GIT_MIRROR: 'github.com'
- name: Configure instance(s)
hosts: jenkins
sudo: True
gather_facts: True
roles:
- common
- edxlocal
- role: rbenv
rbenv_user: "{{ jenkins_user }}"
rbenv_user_home: "{{ jenkins_user_home }}"
rbenv_ruby_version: "{{ jenkins_ruby_version }}"
- jenkins
# Configure a Jenkins master instance
# This has the Jenkins Java app, but none of the requirements
# to run the tests.
- name: Configure instance(s)
hosts: jenkins_master
sudo: True
gather_facts: True
roles:
- jenkins_master
# Configure a Jenkins worker instance
# This has all the requirements to run test jobs,
# but not the Jenkins Java app.
- name: Configure instance(s)
hosts: jenkins_worker
sudo: True
gather_facts: True
roles:
- jenkins_worker
...@@ -30,7 +30,7 @@ options: ...@@ -30,7 +30,7 @@ options:
aliases: ['keypair'] aliases: ['keypair']
id: id:
description: description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false required: false
default: null default: null
aliases: [] aliases: []
...@@ -43,24 +43,24 @@ options: ...@@ -43,24 +43,24 @@ options:
group_id: group_id:
version_added: "1.1" version_added: "1.1"
description: description:
- security group id to use with the instance - security group id (or list of ids) to use with the instance
required: false required: false
default: null default: null
aliases: [] aliases: []
region: region:
version_added: "1.2" version_added: "1.2"
description: description:
- the EC2 region to use - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false required: false
default: null default: null
aliases: [] aliases: [ 'aws_region', 'ec2_region' ]
zone: zone:
version_added: "1.2" version_added: "1.2"
description: description:
- availability zone in which to launch the instance - AWS availability zone in which to launch the instance
required: false required: false
default: null default: null
aliases: [] aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type: instance_type:
description: description:
- instance type to use for the instance - instance type to use for the instance
...@@ -99,22 +99,22 @@ options: ...@@ -99,22 +99,22 @@ options:
aliases: [] aliases: []
ec2_url: ec2_url:
description: description:
- url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints) - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
required: false required: false
default: null default: null
aliases: [] aliases: []
ec2_secret_key: aws_secret_key:
description: description:
- ec2 secret key - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false required: false
default: null default: null
aliases: [] aliases: [ 'ec2_secret_key', 'secret_key' ]
ec2_access_key: aws_access_key:
description: description:
- ec2 access key - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false required: false
default: null default: null
aliases: [] aliases: [ 'ec2_access_key', 'access_key' ]
count: count:
description: description:
- number of instances to launch - number of instances to launch
...@@ -163,6 +163,13 @@ options: ...@@ -163,6 +163,13 @@ options:
required: false required: false
defualt: null defualt: null
aliases: [] aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids: instance_ids:
version_added: "1.3" version_added: "1.3"
description: description:
...@@ -177,13 +184,22 @@ options: ...@@ -177,13 +184,22 @@ options:
required: false required: false
default: 'present' default: 'present'
aliases: [] aliases: []
root_ebs_size:
version_added: "1.4"
desription:
- size of the root volume in gigabytes
required: false
default: null
aliases: []
requirements: [ "boto" ] requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade author: Seth Vidal, Tim Gerla, Lester Wade, John Jarvis
''' '''
EXAMPLES = ''' EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example # Basic provisioning example
- local_action: - local_action:
module: ec2 module: ec2
...@@ -194,6 +210,17 @@ EXAMPLES = ''' ...@@ -194,6 +210,17 @@ EXAMPLES = '''
group: webserver group: webserver
count: 3 count: 3
# Basic provisioning example with setting the root volume size to 50GB
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
root_ebs_size: 50
# Advanced example with tagging and CloudWatch # Advanced example with tagging and CloudWatch
- local_action: - local_action:
module: ec2 module: ec2
...@@ -226,7 +253,7 @@ local_action: ...@@ -226,7 +253,7 @@ local_action:
instance_type: m1.small instance_type: m1.small
image: ami-6e649707 image: ami-6e649707
wait: yes wait: yes
vpc_subnet_id: subnet-29e63245' vpc_subnet_id: subnet-29e63245
# Launch instances, runs some tasks # Launch instances, runs some tasks
...@@ -244,14 +271,14 @@ local_action: ...@@ -244,14 +271,14 @@ local_action:
region: us-east-1 region: us-east-1
tasks: tasks:
- name: Launch instance - name: Launch instance
local_action: ec2 keypair=$keypair group=$security_group instance_type=$instance_type image=$image wait=true region=$region local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }}
register: ec2 register: ec2
- name: Add new instance to host group - name: Add new instance to host group
local_action: add_host hostname=${item.public_ip} groupname=launched local_action: add_host hostname={{ item.public_ip }} groupname=launched
with_items: ${ec2.instances} with_items: ec2.instances
- name: Wait for SSH to come up - name: Wait for SSH to come up
local_action: wait_for host=${item.public_dns_name} port=22 delay=60 timeout=320 state=started local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: ${ec2.instances} with_items: ec2.instances
- name: Configure instance(s) - name: Configure instance(s)
hosts: launched hosts: launched
...@@ -269,13 +296,22 @@ local_action: ...@@ -269,13 +296,22 @@ local_action:
local_action: local_action:
module: ec2 module: ec2
state: 'absent' state: 'absent'
instance_ids: {{ec2.intance_ids}} instance_ids: {{ec2.instance_ids}}
''' '''
import sys import sys
import time import time
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try: try:
import boto.ec2 import boto.ec2
from boto.exception import EC2ResponseError from boto.exception import EC2ResponseError
...@@ -289,9 +325,7 @@ def get_instance_info(inst): ...@@ -289,9 +325,7 @@ def get_instance_info(inst):
Retrieves instance information from an instance Retrieves instance information from an instance
ID and returns it as a dictionary ID and returns it as a dictionary
""" """
instance_info = {'id': inst.id,
return({
'id': inst.id,
'ami_launch_index': inst.ami_launch_index, 'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address, 'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name, 'private_dns_name': inst.private_dns_name,
...@@ -302,7 +336,6 @@ def get_instance_info(inst): ...@@ -302,7 +336,6 @@ def get_instance_info(inst):
'architecture': inst.architecture, 'architecture': inst.architecture,
'image_id': inst.image_id, 'image_id': inst.image_id,
'key_name': inst.key_name, 'key_name': inst.key_name,
'virtualization_type': inst.virtualization_type,
'placement': inst.placement, 'placement': inst.placement,
'kernel': inst.kernel, 'kernel': inst.kernel,
'ramdisk': inst.ramdisk, 'ramdisk': inst.ramdisk,
...@@ -311,15 +344,32 @@ def get_instance_info(inst): ...@@ -311,15 +344,32 @@ def get_instance_info(inst):
'root_device_type': inst.root_device_type, 'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name, 'root_device_name': inst.root_device_name,
'state': inst.state, 'state': inst.state,
'hypervisor': inst.hypervisor 'hypervisor': inst.hypervisor}
}) try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
return instance_info
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in run_instances_method.func_code.co_varnames
def create_instances(module, ec2): def create_instances(module, ec2):
""" """
Creates new instances Creates new instances
module : AnsbileModule object module : AnsibleModule object
ec2: authenticated ec2 connection object ec2: authenticated ec2 connection object
Returns: Returns:
...@@ -345,9 +395,18 @@ def create_instances(module, ec2): ...@@ -345,9 +395,18 @@ def create_instances(module, ec2):
instance_tags = module.params.get('instance_tags') instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id') vpc_subnet_id = module.params.get('vpc_subnet_id')
private_ip = module.params.get('private_ip') private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
root_ebs_size = module.params.get('root_ebs_size')
if root_ebs_size:
dev_sda1 = boto.ec2.blockdevicemapping.EBSBlockDeviceType()
dev_sda1.size = root_ebs_size
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
else:
bdm = None
# group_id and group_name are exclusive of each other
# Here we try to lookup the group name from the security group id - if group_id is set.
if group_id and group_name: if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
sys.exit(1) sys.exit(1)
...@@ -357,6 +416,8 @@ def create_instances(module, ec2): ...@@ -357,6 +416,8 @@ def create_instances(module, ec2):
if group_name: if group_name:
grp_details = ec2.get_all_security_groups() grp_details = ec2.get_all_security_groups()
if type(group_name) == list: if type(group_name) == list:
# FIXME: this should be a nice list comprehension
# also not py 2.4 compliant
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name) group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
elif type(group_name) == str: elif type(group_name) == str:
for grp in grp_details: for grp in grp_details:
...@@ -365,9 +426,12 @@ def create_instances(module, ec2): ...@@ -365,9 +426,12 @@ def create_instances(module, ec2):
group_name = [group_name] group_name = [group_name]
# Now we try to lookup the group id testing if group exists. # Now we try to lookup the group id testing if group exists.
elif group_id: elif group_id:
#wrap the group_id in a list if it's not one already
if type(group_id) == str:
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id) grp_details = ec2.get_all_security_groups(group_ids=group_id)
grp_item = grp_details[0] grp_item = grp_details[0]
group_name = [grp_item.name ] group_name = [grp_item.name]
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e)) module.fail_json(msg = str(e))
...@@ -386,7 +450,10 @@ def create_instances(module, ec2): ...@@ -386,7 +450,10 @@ def create_instances(module, ec2):
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining > 0: if count_remaining == 0:
changed = False
else:
changed = True
try: try:
params = {'image_id': image, params = {'image_id': image,
'key_name': key_name, 'key_name': key_name,
...@@ -401,7 +468,15 @@ def create_instances(module, ec2): ...@@ -401,7 +468,15 @@ def create_instances(module, ec2):
'ramdisk_id': ramdisk, 'ramdisk_id': ramdisk,
'subnet_id': vpc_subnet_id, 'subnet_id': vpc_subnet_id,
'private_ip_address': private_ip, 'private_ip_address': private_ip,
'user_data': user_data} 'user_data': user_data,
'block_device_map': bdm}
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if vpc_subnet_id: if vpc_subnet_id:
params['security_group_ids'] = group_id params['security_group_ids'] = group_id
...@@ -431,15 +506,24 @@ def create_instances(module, ec2): ...@@ -431,15 +506,24 @@ def create_instances(module, ec2):
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up # wait here until the instances are up
res_list = res.connection.get_all_instances(instids) this_res = []
this_res = res_list[0]
num_running = 0 num_running = 0
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and num_running < len(instids): while wait_timeout > time.time() and num_running < len(instids):
res_list = res.connection.get_all_instances(instids) res_list = res.connection.get_all_instances(instids)
if len(res_list) > 0:
this_res = res_list[0] this_res = res_list[0]
num_running = len([ i for i in this_res.instances if i.state=='running' ]) num_running = len([ i for i in this_res.instances if i.state=='running' ])
else:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5) time.sleep(5)
else:
break
if wait and wait_timeout <= time.time(): if wait and wait_timeout <= time.time():
# waiting took too long # waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
...@@ -454,7 +538,7 @@ def create_instances(module, ec2): ...@@ -454,7 +538,7 @@ def create_instances(module, ec2):
created_instance_ids.append(inst.id) created_instance_ids.append(inst.id)
instance_dict_array.append(d) instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids) return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids): def terminate_instances(module, ec2, instance_ids):
...@@ -502,9 +586,9 @@ def main(): ...@@ -502,9 +586,9 @@ def main():
key_name = dict(aliases = ['keypair']), key_name = dict(aliases = ['keypair']),
id = dict(), id = dict(),
group = dict(type='list'), group = dict(type='list'),
group_id = dict(), group_id = dict(type='list'),
region = dict(choices=['eu-west-1', 'sa-east-1', 'us-east-1', 'ap-northeast-1', 'us-west-2', 'us-west-1', 'ap-southeast-1', 'ap-southeast-2']), region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
zone = dict(), zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']), instance_type = dict(aliases=['type']),
image = dict(), image = dict(),
kernel = dict(), kernel = dict(),
...@@ -513,49 +597,63 @@ def main(): ...@@ -513,49 +597,63 @@ def main():
ramdisk = dict(), ramdisk = dict(),
wait = dict(choices=BOOLEANS, default=False), wait = dict(choices=BOOLEANS, default=False),
wait_timeout = dict(default=300), wait_timeout = dict(default=300),
ec2_url = dict(aliases=['EC2_URL']), ec2_url = dict(),
ec2_secret_key = dict(aliases=['EC2_SECRET_KEY'], no_log=True), aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
ec2_access_key = dict(aliases=['EC2_ACCESS_KEY']), aws_access_key = dict(aliases=['ec2_access_key', 'access_key']),
placement_group = dict(), placement_group = dict(),
user_data = dict(), user_data = dict(),
instance_tags = dict(), instance_tags = dict(),
vpc_subnet_id = dict(), vpc_subnet_id = dict(),
private_ip = dict(), private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list'), instance_ids = dict(type='list'),
state = dict(default='present'), state = dict(default='present'),
root_ebs_size = dict(default=None),
) )
) )
ec2_url = module.params.get('ec2_url') ec2_url = module.params.get('ec2_url')
ec2_secret_key = module.params.get('ec2_secret_key') aws_secret_key = module.params.get('aws_secret_key')
ec2_access_key = module.params.get('ec2_access_key') aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region') region = module.params.get('region')
termination_list = module.params.get('termination_list')
# allow eucarc environment variables to be used if ansible vars aren't set # allow eucarc environment variables to be used if ansible vars aren't set
if not ec2_url and 'EC2_URL' in os.environ: if not ec2_url and 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL'] ec2_url = os.environ['EC2_URL']
if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ:
ec2_secret_key = os.environ['EC2_SECRET_KEY'] if not aws_secret_key:
if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: if 'AWS_SECRET_KEY' in os.environ:
ec2_access_key = os.environ['EC2_ACCESS_KEY'] aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
# If we have a region specified, connect to its endpoint. # If we have a region specified, connect to its endpoint.
if region: if region:
try: try:
ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=ec2_access_key, aws_secret_access_key=ec2_secret_key) ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e)) module.fail_json(msg = str(e))
# Otherwise, no region so we fallback to the old connection method # If we specified an ec2_url then try connecting to it
else: elif ec2_url:
try: try:
if ec2_url: # if we have an URL set, connect to the specified endpoint ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key)
ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key)
else: # otherwise it's Amazon.
ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key)
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e)) module.fail_json(msg = str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
if module.params.get('state') == 'absent': if module.params.get('state') == 'absent':
instance_ids = module.params.get('instance_ids') instance_ids = module.params.get('instance_ids')
...@@ -566,14 +664,13 @@ def main(): ...@@ -566,14 +664,13 @@ def main():
elif module.params.get('state') == 'present': elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances # Changed is always set to true when provisioning new instances
changed = True
if not module.params.get('key_name'): if not module.params.get('key_name'):
module.fail_json(msg='key_name parameter is required for new instance') module.fail_json(msg='key_name parameter is required for new instance')
if not module.params.get('image'): if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance') module.fail_json(msg='image parameter is required for new instance')
(instance_dict_array, new_instance_ids) = create_instances(module, ec2) (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2)
module.exit_json(changed=True, instance_ids=new_instance_ids, instances=instance_dict_array) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_lookup
short_description: returns a list of ec2 instances that meet search criteria
description:
- Returns a list of ec2 instances that meet search criteria
version_added: "1.4"
options:
region:
description:
- The AWS region to use. Must be specified if ec2_url
is not used. If not specified then the value of the
EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of
the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the
AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
tags:
desription:
- tags to lookup
required: false
default: null
type: dict
aliases: []
requirements: [ "boto" ]
author: John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Return all instances that match the tag "Name: foo"
- local_action:
module: ec2_lookup
tags:
Name: foo
'''
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
import boto.ec2
from boto.ec2 import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
ec2_url=dict(),
region=dict(aliases=['aws_region', 'ec2_region'],
choices=AWS_REGIONS),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None, type='dict'),
)
)
tags = module.params.get('tags')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
ec2_url = module.params.get('ec2_url')
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
# If we specified an ec2_url then try connecting to it
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key,
aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
instances = []
instance_ids = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
for inst in res.instances:
if inst.state == "running":
instances.append({k: v for k, v in inst.__dict__.iteritems()
if isinstance(v, (basestring))})
instance_ids.append(inst.id)
module.exit_json(changed=False, instances=instances,
instance_ids=instance_ids)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Vars for role analytics-server
# vars are namespace with the module name.
#
AS_DB_ANALYTICS_PASSWORD: 'CHANGEME!'
AS_DB_ANALYTICS_USER: 'analytics001'
AS_DB_ANALYTICS_HOST: 'localhost'
AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
DATABASES:
default: &databases_default
ENGINE: 'django.db.backends.mysql'
NAME: 'wwc'
USER: 'analytics001'
PASSWORD: 'CHANGEME!'
HOST: 'CHANGEME'
PORT: 3306
analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $AS_DB_ANALYTICS_USER
PASSWORD: $AS_DB_ANALYTICS_PASSWORD
HOST: $AS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $AS_API_KEY
ANALYTICS_RESULTS_DB:
MONGO_URI: $AS_DB_RESULTS_URL
MONGO_DB: $AS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION
as_role_name: "analytics-server"
as_user: "analytics-server"
as_home: "/opt/wwc/analytics-server"
as_venv_dir: "{{ as_home }}/virtualenvs/analytics-server"
as_source_repo: "git@github.com:edx/analytics-server.git"
as_code_dir: "{{ as_home }}/src"
as_version: "master"
as_git_identity_path: "{{ secure_dir }}/files/git-identity"
as_git_identity_dest: "/etc/{{ as_role_name }}.git-identity"
as_git_ssh: "/tmp/{{ as_role_name }}.git_ssh.sh"
as_requirements_file: "{{ as_code_dir }}/requirements.txt"
as_rsyslog_enabled: "yes"
as_web_user: "www-data"
as_env: "analytics-server_env"
as_service_variant: 'analytics'
as_django_settings: 'anserv.settings'
as_env_vars:
ANALYTICS_SERVER_LOG_LEVEL: "{{ AS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# See meta/main.yml
#
as_automated_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
#
# OS packages
#
as_debian_pkgs:
- mongodb-clients
- zip
- libmysqlclient-dev
as_redhat_pkgs:
- zip
- community-mysql-libs
#
# Installed via pip to get the IAM role feature.
#
as_pip_pkgs:
- git+https://github.com/s3tools/s3cmd.git#egg=s3cmd
\ No newline at end of file
automator ALL=(www-data) NOPASSWD:SETENV:/opt/wwc/analytics-server/virtualenvs/analytics-server/bin/django-admin.py run_all_queries *
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i /etc/git-identity "$@"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics-server
#
# Overview:
#
#
- name: analytics-server | stop the analytics service
service: name=analytics state=stopped
- name: analytics-server | start the analytics service
service: name=analytics state=started
---
dependencies:
- {
role: automated,
automated_rbash_links: $as_automated_rbash_links,
autmoated_sudoers_dest: '99-automator-analytics-server',
automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2'
}
\ No newline at end of file
#
# TODO: Needed while this repo is private
#
- name: analytics-server | upload ssh script
template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | install read-only ssh key required for checkout
copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
tags:
- analytics-server
- deploy
- install
- update
- name: analytics-server | checkout code
git:
dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true
environment:
GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service
notify: analytics-server | start the analytics service
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | update src permissions
file:
path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent
tags:
- analytics-server
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics-server | remove ssh script
file: path={{ as_git_ssh }} state=absent
tags:
- analytics-server
- deploy
- install
- update
- name: analytics-server | install application requirements
pip:
requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present
sudo: true
sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service
tags:
- analytics-server
- deploy
- install
- update
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role analytics-server
#
# Overview:
#
# Installs the edX analytics-server Django application which provides
# basic analytics to the LMS instructor dashboard via service calls.
#
# Dependencies:
#
# common role
#
# Depends upon the automated role
#
# Example play:
#
# - name: Configure analytics instance(s)
# hosts: analytics-servers
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/common/common.yml"
# - "{{ secure_dir }}/vars/stage/analytics-server.yml"
# - "{{ secure_dir }}/vars/users.yml"
# gather_facts: True
# roles:
# - common
# - analytics-server
#
- name: analytics-server | install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags:
- analytics-server
- install
- update
- name: analytics-server | create analytics-server user {{ as_user }}
user:
name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes
tags:
- analytics-server
- install
- update
- name: analytics-server | setup the analytics-server env
template:
src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }}
owner="{{ as_user }}" group="{{ as_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile
owner={{ as_user }}
group={{ as_user }}
# Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch
# sudo: true
# sudo_user: "{{ as_user }}"
# tags:
# - analytics-server
# - install
# - update
- name: analytics-server | ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc
sudo: true
sudo_user: "{{ as_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | add source of analytics-server_env to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env'
line='. {{ as_home }}/analytics_server_env'
tags:
- analytics-server
- install
- update
- name: analytics-server | add source venv to .bashrc
lineinfile:
dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate'
line='. {{ as_venv_dir }}/bin/activate'
tags:
- analytics-server
- install
- update
- name: analytics-server | install global python requirements
pip: name={{ item }}
with_items: as_pip_pkgs
tags:
- analytics-server
- install
- update
- name: analytics-server | create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
mode=0600
owner="{{ as_web_user }}" group="{{ as_web_user }}"
tags:
- analytics-server
- install
- update
- name: analytics-server | install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
- include: deploy.yml
\ No newline at end of file
# {{ ansible_managed }}
description "Analytics server under gunicorn"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
respawn limit 3 30
env SERVICE_VARIANT={{ as_service_variant }}
env PID=/var/tmp/analytics.pid
env WORKERS={{ AS_WORKERS }}
env PORT={{ AS_SERVER_PORT }}
env LANG={{ AS_ENV_LANG }}
env DJANGO_SETTINGS_MODULE={{ as_django_settings }}
chdir {{ as_code_dir }}
setuid {{ as_web_user }}
exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv anserv.wsgi
# {{ ansible_managed }}
{% for name,value in as_env_vars.items() %}
{% if value %}
export {{ name }}="{{ value }}"
{% endif %}
{% endfor %}
\ No newline at end of file
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ as_git_identity_dest }} "$@"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Vars for role analytics
# vars are namespace with the module name.
#
ANALYTICS_DB_ANALYTICS_PASSWORD: 'CHANGEME!'
ANALYTICS_DB_ANALYTICS_USER: 'analytics001'
ANALYTICS_DB_ANALYTICS_HOST: 'localhost'
ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '4'
DATABASES:
default: &databases_default
ENGINE: 'django.db.backends.mysql'
NAME: 'wwc'
USER: 'analytics001'
PASSWORD: 'CHANGEME!'
HOST: 'CHANGEME'
PORT: 3306
analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $ANALYTICS_DB_ANALYTICS_USER
PASSWORD: $ANALYTICS_DB_ANALYTICS_PASSWORD
HOST: $ANALYTICS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $ANALYTICS_API_KEY
ANALYTICS_RESULTS_DB:
MONGO_URI: $ANALYTICS_DB_RESULTS_URL
MONGO_DB: $ANALYTICS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION
analytics_role_name: "analytics"
analytics_user: "analytics"
analytics_home: "/opt/wwc/analytics"
analytics_venv_dir: "{{ analytics_home }}/virtualenvs/analytics"
analytics_source_repo: "git@github.com:edx/analytics-server.git"
analytics_code_dir: "{{ analytics_home }}/src"
analytics_version: "master"
analytics_git_identity_path: "{{ secure_dir }}/files/git-identity"
analytics_git_identity_dest: "/etc/{{ analytics_role_name }}.git-identity"
analytics_git_ssh: "/tmp/{{ analytics_role_name }}.git_ssh.sh"
analytics_requirements_file: "{{ analytics_code_dir }}/requirements.txt"
analytics_rsyslog_enabled: "yes"
analytics_web_user: "www-data"
analytics_env: "analytics_env"
analytics_service_variant: 'analytics'
analytics_django_settings: 'anserv.settings'
analytics_env_vars:
ANALYTICS_LOG_LEVEL: "{{ ANALYTICS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# See meta/main.yml
#
analytics_automated_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
#
# OS packages
#
analytics_debian_pkgs:
- mongodb-clients
- zip
- libmysqlclient-dev
analytics_redhat_pkgs:
- zip
- community-mysql-libs
#
# Installed via pip to get the IAM role feature.
#
analytics_pip_pkgs:
- git+https://github.com/s3tools/s3cmd.git#egg=s3cmd
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics
#
# Overview:
#
#
- name: analytics | stop the analytics service
service: name=analytics state=stopped
- name: analytics | start the analytics service
service: name=analytics state=started
#
# TODO: Needed while this repo is private
#
- name: analytics | upload ssh script
template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | install read-only ssh key required for checkout
copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600
tags:
- analytics
- deploy
- install
- update
- name: analytics | checkout code
git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true
environment:
GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service
notify: analytics | start the analytics service
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | update src permissions
file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent
tags:
- analytics
- deploy
- install
- update
#
# TODO: Needed while this repo is private
#
- name: analytics | remove ssh script
file: path={{ analytics_git_ssh }} state=absent
tags:
- analytics
- deploy
- install
- update
- name: analytics | install application requirements
pip:
requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present
sudo: true
sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service
tags:
- analytics
- deploy
- install
- update
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role analytics
#
# Overview:
#
# Installs the edX analytics Django application which provides
# basic analytics to the LMS instructor dashboard via service calls.
#
# Dependencies:
#
# common role
#
# Depends upon the automated role
#
# Example play:
#
# - name: Configure analytics instance(s)
# hosts: analyticss
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/common/common.yml"
# - "{{ secure_dir }}/vars/stage/analytics.yml"
# - "{{ secure_dir }}/vars/users.yml"
# gather_facts: True
# roles:
# - common
# - analytics
#
- name: analytics | install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags:
- analytics
- install
- update
- name: analytics | create analytics user {{ analytics_user }}
user:
name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes
tags:
- analytics
- install
- update
- name: analytics | setup the analytics env
template:
src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }}
owner="{{ analytics_user }}" group="{{ analytics_user }}"
tags:
- analytics
- install
- update
- name: analytics | drop a bash_profile
copy: >
src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile
owner={{ analytics_user }}
group={{ analytics_user }}
# Awaiting next ansible release.
#- name: analytics | ensure .bashrc exists
# file: path={{ analytics_home }}/.bashrc state=touch
# sudo: true
# sudo_user: "{{ analytics_user }}"
# tags:
# - analytics
# - install
# - update
- name: analytics | ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc
sudo: true
sudo_user: "{{ analytics_user }}"
tags:
- analytics
- install
- update
- name: analytics | add source of analytics_env to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env'
line='. {{ analytics_home }}/analytics_env'
tags:
- analytics
- install
- update
- name: analytics | add source venv to .bashrc
lineinfile:
dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate'
line='. {{ analytics_venv_dir }}/bin/activate'
tags:
- analytics
- install
- update
- name: analytics | install global python requirements
pip: name={{ item }}
with_items: analytics_pip_pkgs
tags:
- analytics
- install
- update
- name: analytics | create config
template:
src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json
mode=0600
owner="{{ analytics_web_user }}" group="{{ analytics_web_user }}"
tags:
- analytics
- install
- update
- name: analytics | install service
template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root
- include: deploy.yml
\ No newline at end of file
# {{ ansible_managed }}
description "Analytics gunicorn"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
respawn limit 3 30
env SERVICE_VARIANT={{ analytics_service_variant }}
env PID=/var/tmp/analytics.pid
env WORKERS={{ ANALYTICS_WORKERS }}
env PORT={{ ANALYTICS_SERVER_PORT }}
env LANG={{ ANALYTICS_ENV_LANG }}
env DJANGO_SETTINGS_MODULE={{ analytics_django_settings }}
chdir {{ analytics_code_dir }}
setuid {{ analytics_web_user }}
exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv anserv.wsgi
automator ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries *
# {{ ansible_managed }}
{% for name,value in analytics_env_vars.items() %}
{% if value %}
export {{ name }}="{{ value }}"
{% endif %}
{% endfor %}
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ analytics_git_identity_dest }} "$@"
--- ---
- name: ansible-role | check if the role exists
command: test -d roles/{{ role_name }}
register: role_exists
ignore_errors: yes
- name: ansible-role | prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success
- name: ansible-role | create role directories - name: ansible-role | create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory file: path=roles/{{role_name}}/{{ item }} state=directory
with_items: with_items:
- tasks - tasks
- meta
- handlers - handlers
- vars - defaults
- templates - templates
- files - files
...@@ -13,5 +23,6 @@ ...@@ -13,5 +23,6 @@
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items: with_items:
- tasks - tasks
- meta
- defaults
- handlers - handlers
- vars
\ No newline at end of file
--- ---
{% include 'roles/ansible-role/templates/header.j2' %} {% include 'roles/ansible-role/templates/header.j2' %}
# #
# Vars for role {{ role_name }} # Defaults for role {{ role_name }}
# #
# #
......
---
{% include 'roles/ansible-role/templates/header.j2' %}
#
# Role includes for role {{ role_name }}
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
---
- name: Dump all vars to json
template: src=dumpall.json.j2 dest=/tmp/ansible.all.json mode=0600
tags:
- dumpall
- debug
- name: Dump lms auth|env file
template: src=../../edxapp/templates/lms.{{item}}.json.j2 dest=/tmp/lms.{{item}}.json mode=0600
with_items:
- env
- auth
when: "'lms' in service_variants_enabled"
tags:
- dumpall
- debug
- name: Dump lms-preview auth|env file
template: src=../../edxapp/templates/lms-preview.{{item}}.json.j2 dest=/tmp/lms-preview.{{item}}.json mode=0600
with_items:
- env
- auth
when: "'lms-preview' in service_variants_enabled"
tags:
- dumpall
- debug
- name: Dump cms auth|env file
template: src=../../edxapp/templates/cms.{{item}}.json.j2 dest=/tmp/cms.{{item}}.json mode=0600
with_items:
- env
- auth
when: "'cms' in service_variants_enabled"
tags:
- dumpall
- debug
- name: Dump all vars to yaml
template: src=dumpall.yml.j2 dest=/tmp/ansible.all.yml mode=0600
tags:
- dumpall
- debug
- name: fetch remote files
# fetch is fail-safe for remote files that don't exist
# setting mode is not an option
fetch: src=/tmp/{{item}} dest=/tmp/{{ansible_hostname}}-{{item}} flat=True
with_items:
- ansible.all.json
- ansible.all.yml
- lms.env.json
- lms.auth.json
- lms-preview.env.json
- lms-preview.auth.json
- cms.env.json
- cms.auth.json
tags:
- dumpall
- debug
Module Variables ("vars"):
--------------------------------
{{ vars | to_nice_json }}
Environment Variables ("environment"):
--------------------------------
{{ environment | to_nice_json }}
GROUP NAMES Variables ("group_names"):
--------------------------------
{{ group_names | to_nice_json }}
GROUPS Variables ("groups"):
--------------------------------
{{ groups | to_nice_json }}
HOST Variables ("hostvars"):
--------------------------------
{{ hostvars | to_nice_json }}
{% if lms_env_config %}
LMS env variables:
---------------
{{ lms_env_config | to_nice_json }}
{% endif %}
{% if lms_auth_config %}
LMS auth variables:
---------------
{{ lms_auth_config | to_nice_json }}
{% endif %}
{% if lms_preview_env_config %}
Preview env variables:
---------------
{{ lms_preview_env_config | to_nice_json }}
{% endif %}
{% if lms_preview_auth_config %}
Preview auth variables:
---------------
{{ lms_preview_auth_config | to_nice_json }}
{% endif %}
{% if cms_env_config %}
CMS env variables:
---------------
{{ cms_env_config | to_nice_json }}
{% endif %}
{% if cms_auth_config %}
CMS auth variables:
---------------
{{ cms_auth_config | to_nice_json }}
{% endif %}
Module Variables ("vars"):
--------------------------------
{{ vars | to_nice_yaml }}
Environment Variables ("environment"):
--------------------------------
{{ environment | to_nice_yaml }}
GROUP NAMES Variables ("group_names"):
--------------------------------
{{ group_names | to_nice_yaml }}
GROUPS Variables ("groups"):
--------------------------------
{{ groups | to_nice_yaml }}
{% if lms_env_config %}
LMS env variables:
---------------
{{ lms_env_config | to_nice_yaml }}
{% endif %}
{% if lms_auth_config %}
LMS auth variables:
---------------
{{ lms_auth_config | to_nice_yaml }}
{% endif %}
{% if lms_preview_env_config %}
Preview env variables:
---------------
{{ lms_preview_env_config | to_nice_yaml }}
{% endif %}
{% if lms_preview_auth_config %}
Preview auth variables:
---------------
{{ lms_preview_auth_config | to_nice_yaml }}
{% endif %}
{% if cms_env_config %}
CMS env variables:
---------------
{{ cms_env_config | to_nice_yaml }}
{% endif %}
{% if cms_auth_config %}
CMS auth variables:
---------------
{{ cms_auth_config | to_nice_yaml }}
{% endif %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Vars for role automated
#
#
# vars are namespace with the module name.
#
automated_role_name: automated
automated_user: "automator"
automated_home: "/home/automator"
automated_rbash_links: !!null
automated_sudoers_template: !!null
automated_sudoers_file: !!null
#
# OS packages
#
automated_debian_pkgs: []
automated_redhat_pkgs: []
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6noLNy7YVFNK6OSOFgPbnGGovgZqLsvJxjhs82jT7tZIsYOjVVCAMk0kkSnBt0etDjGSJlJ664r1aBhubZrujzxns0oOzA7J+tWQ3CiaOBLtOSffeh8a3dTWWNPCAGg9KflPaufXdd31Bf96g9ACGZR7uLYgWUP/J0jOPMCPE1RBfRNFeZ7cHlh3t/pI+JzTcyZTka4AAEsCejBKHngYxVoOk+gfxe+Qo703st0MFuoxVAMymeBGi/1lCwKsV6r9BijzuvIFyQCl2vThjoF32yHmmP8by//hmgpo5UNqG7jbmSrCJhkdh+My3SgEebn5c2QLJepOrUfrZFwz1BQ1l task@edx.org
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role automated
#
# Overview:
#
# This role is included as a dependency by other roles which provide
# automated jobs. Automation occurs over ssh. The automator user
# is assigned to a managed rbash shell and is, potentially, allowed to run
# explicitly listed commands via sudo. Both the commands that are
# allowed via rbash and the sudoers file are provided by the
# including role.
#
# Dependencies:
#
# This role depends upon variables provided by an including role
# via the my_role/meta/main.yml file. Includes take the following forms:
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_file: 'roles/my_role/files/etc/sudoers.d/99-my_role'
# }
#
# or
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_template: 'roles/my_role/templates/etc/sudoers.d/99-my_role.j2'
# }
#
# The sudoers file is optional. Note that for sudo to work it must be
# included in the rbash links list.
#
# That list should be provided via my_role's defaults
#
# role_automated_rbash_links:
# - /usr/bin/sudo
# - /usr/bin/scp
#
- fail: automated_rbash_links required for role
when: automated_rbash_links is not defined
- fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined
- name: automated | create automated user
user:
name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes
tags:
- automated
- install
- update
- name: automated | create sudoers file from file
copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_file
tags:
- automated
- install
- update
- name: automated | create sudoers file from template
template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_template
tags:
- automated
- install
- update
#
# Prevent user from updating their PATH and
# environment.
#
- name: automated | update shell file mode
file:
path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
with_items:
- .bashrc
- .profile
- .bash_logout
- name: automated | change ~automated ownership
file:
path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
#
# This ensures that the links are updated with each run
# and that links that were remove from the role are
# removed.
#
- name: automated | remove ~automated/bin directory
file:
path={{ automated_home }}/bin state=absent
ignore_errors: yes
tags:
- automated
- install
- update
- name: automated | create ~automated/bin directory
file:
path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | re-write .profile
copy:
src=home/automator/.profile
dest={{ automated_home }}/.profile
owner="root"
group={{ automated_user }}
mode="0744"
tags:
- automated
- install
- update
- name: automated | re-write .bashrc
copy:
src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc
owner="root"
group={{ automated_user }}
mode="0744"
tags:
- automated
- install
- update
- name: automated | create .ssh directory
file:
path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | copy key to .ssh/authorized_keys
copy:
src=home/automator/.ssh/authorized_keys
dest={{ automated_home }}/.ssh/authorized_keys mode=0600
owner={{ automated_user }} group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | create allowed command links
file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link
with_items: automated_rbash_links
tags:
- automated
- install
- update
...@@ -3,10 +3,10 @@ ...@@ -3,10 +3,10 @@
# Overview: # Overview:
# #
# Creates OS accounts for users based on their github credential. # Creates OS accounts for users based on their github credential.
# Expects to find a list in scope named github_users with # Expects to find a list in scope named GITHUB_USERS with
# the following structure: # the following structure:
# #
# github_users: # GITHUB_USERS:
# - user: me_at_github # - user: me_at_github
# groups: # groups:
# - adm # - adm
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
name={{ item.user }} name={{ item.user }}
groups={{ ",".join(item.groups) }} groups={{ ",".join(item.groups) }}
shell=/bin/bash shell=/bin/bash
with_items: github_users with_items: GITHUB_USERS
tags: tags:
- users - users
- update - update
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
file: file:
path=/home/{{ item.user }}/.ssh state=directory mode=0700 path=/home/{{ item.user }}/.ssh state=directory mode=0700
owner={{ item.user }} group={{ item.user }} owner={{ item.user }} group={{ item.user }}
with_items: github_users with_items: GITHUB_USERS
tags: tags:
- users - users
- update - update
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
url=https://github.com/{{ item.user }}.keys url=https://github.com/{{ item.user }}.keys
dest=/home/{{ item.user }}/.ssh/authorized_keys mode=0600 dest=/home/{{ item.user }}/.ssh/authorized_keys mode=0600
owner={{ item.user }} group={{ item.user }} owner={{ item.user }} group={{ item.user }}
with_items: github_users with_items: GITHUB_USERS
tags: tags:
- users - users
- update - update
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
- name: common | Creating env users - name: common | Creating env users
user: name={{ item.user }} {% if item.groups %}groups={{ ",".join(item.groups) }}{% endif %} shell=/bin/bash user: name={{ item.user }} {% if item.groups %}groups={{ ",".join(item.groups) }}{% endif %} shell=/bin/bash
with_items: env_users with_items: ENV_USERS
when: env_users is defined when: ENV_USERS is defined
tags: tags:
- users - users
- update - update
......
...@@ -11,13 +11,19 @@ ...@@ -11,13 +11,19 @@
- install - install
- name: common | pip install virtualenv - name: common | pip install virtualenv
pip: name=virtualenv state=present pip: >
name=virtualenv
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags: tags:
- venv_base - venv_base
- install - install
- name: common | pip install virtualenvwrapper - name: common | pip install virtualenvwrapper
pip: name=virtualenvwrapper state=present pip: >
name=virtualenvwrapper
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags: tags:
- venv_base - venv_base
- install - install
...@@ -35,7 +41,11 @@ ...@@ -35,7 +41,11 @@
- install - install
- name: common | pip install gunicorn - name: common | pip install gunicorn
pip: name=gunicorn virtualenv="{{venv_dir}}" state=present pip: >
name=gunicorn
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags: tags:
- gunicorn - gunicorn
- install - install
--- ---
- include: create_users.yml - include: create_users.yml
- include: create_github_users.yml - include: create_github_users.yml
when: github_users is defined when: GITHUB_USERS is defined
- name: common | Add user www-data - name: common | Add user www-data
# This user should be created on the system by default # This user should be created on the system by default
...@@ -10,6 +10,14 @@ ...@@ -10,6 +10,14 @@
- pre_install - pre_install
- update - update
- name: common | Create the base directory for storage
file: >
path={{ storage_base_dir }}
state=directory
owner=root
group=root
mode=0755
- name: common | Create application root - name: common | Create application root
# In the future consider making group edx r/t adm # In the future consider making group edx r/t adm
file: path={{ app_base_dir }} state=directory owner=root group=adm mode=2775 file: path={{ app_base_dir }} state=directory owner=root group=adm mode=2775
...@@ -43,13 +51,13 @@ ...@@ -43,13 +51,13 @@
- update - update
- name: common | Create log directory - name: common | Create log directory
file: path=$log_base_dir state=directory mode=2770 group=adm owner=syslog file: path={{log_base_dir}} state=directory mode=2755 group=adm owner=syslog
tags: tags:
- pre_install - pre_install
- update - update
- name: common | Create alias from app_base_dir to the log_base_dir - name: common | Create alias from app_base_dir to the log_base_dir
file: state=link src=$log_base_dir path=$app_base_dir/log file: state=link src={{log_base_dir}} path={{app_base_dir}}/log
tags: tags:
- pre_install - pre_install
- logging - logging
......
---
devpi_venv_dir: "{{ app_base_dir }}/devpi/venvs/devpi"
devpi_pip_pkgs:
- devpi-server
- eventlet
devpi_nginx_port: 80
devpi_port: 4040
devpi_data_dir: /var/devpi/data
devpi_user: devpi
devpi_group: devpi
devpi_server_name: 'pypy.*'
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Defaults for role devpi
#
---
- name: devpi | restart devpi
supervisorctl: >
state=restarted
config={{ supervisor_cfg }}
name=devpi-server
- name: devpi | start devpi
supervisorctl: >
state=started
config={{ supervisor_cfg }}
name=devpi-server
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role devpi
#
# Overview:
# Creates a pypi caching server
#
# Dependencies:
# - common
# - nginx
# - supervisor
#
# Example play:
# roles:
# - common
# - role: nginx
# nginx_sites:
# - devpi
# - role: supervisor
# supervisor_servers:
# - devpi
# - devpi
---
- name: devpi | create devpi user
user: >
name={{ devpi_user }}
state=present
- name: devpi | create virtualenv directory
file: >
path={{ devpi_venv_dir }}
state=directory
owner={{ devpi_user }}
group={{ devpi_group }}
notify: devpi | restart devpi
- name: devpi | create the devpi data directory
file: >
path={{ devpi_data_dir }}
state=directory
owner={{ devpi_user }}
group={{ devpi_group }}
- name: devpi | install devpi pip pkgs
pip: >
name={{ item }}
state=present
virtualenv={{ devpi_venv_dir }}
with_items: devpi_pip_pkgs
notify: devpi | restart devpi
- name: supervisor | ensure supervisor is started
service: name=supervisor state=started
- name: devpi | ensure devpi is running
supervisorctl: >
state=started
config={{ supervisor_cfg }}
name=devpi-server
DISCERN_NGINX_PORT: 18070
DISCERN_BASIC_AUTH: False
discern_source_repo: https://github.com/edx/discern.git discern_source_repo: https://github.com/edx/discern.git
ease_source_repo: https://github.com/edx/ease.git ease_source_repo: https://github.com/edx/ease.git
ease_dir: $app_base_dir/ease ease_dir: $app_base_dir/ease
...@@ -6,7 +9,7 @@ discern_settings: discern.aws ...@@ -6,7 +9,7 @@ discern_settings: discern.aws
nltk_data_dir: /usr/share/nltk_data nltk_data_dir: /usr/share/nltk_data
ease_branch: master ease_branch: master
discern_branch: dev discern_branch: dev
nginx_listen_port: 80 discern_gunicorn_port: 8070
gunicorn_port: 7999 discern_gunicorn_host: 127.0.0.1
discern_user: discern discern_user: discern
site_name: discern site_name: discern
...@@ -4,6 +4,3 @@ ...@@ -4,6 +4,3 @@
- name: discern | restart celery - name: discern | restart celery
service: name=celery state=restarted service: name=celery state=restarted
- name: discern | restart nginx
service: name=nginx state=restarted
...@@ -109,20 +109,3 @@ ...@@ -109,20 +109,3 @@
#Have this separate from the other three because it doesn't take the noinput flag #Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern - name: discern | django update_index for discern
shell: ${venv_dir}/bin/python {{discern_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_dir}} shell: ${venv_dir}/bin/python {{discern_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_dir}}
- name: discern | create nginx directory and set perms
file: path=/etc/nginx/sites-available owner=root group=edx mode=2775 state=directory
#Install nginx sites available
#remove default link, render template, link template to sites-enabled to activate
- name: discern | Removing default nginx config
file: path=/etc/nginx/sites-enabled/default state=absent
notify: discern | restart nginx
- name: discern | render nginx sites available
template: src=nginx-discern.j2 dest=/etc/nginx/sites-available/{{ site_name }}
notify: discern | restart nginx
- name: discern | Creating nginx config link {{ site_name }}
file: src=/etc/nginx/sites-available/{{ site_name }} dest=/etc/nginx/sites-enabled/{{ site_name }} state=link owner=root group=root
notify: discern | restart nginx
...@@ -11,11 +11,12 @@ respawn limit 3 30 ...@@ -11,11 +11,12 @@ respawn limit 3 30
env PID=/var/run/gunicorn/discern.pid env PID=/var/run/gunicorn/discern.pid
env WORKERS={{ ansible_processor_cores * 2 }} env WORKERS={{ ansible_processor_cores * 2 }}
env PORT={{ gunicorn_port }} env PORT={{ discern_gunicorn_port }}
env ADDRESS={{ discern_gunicorn_host }}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE={{discern_settings}} env DJANGO_SETTINGS_MODULE={{discern_settings}}
chdir {{discern_dir}} chdir {{discern_dir}}
setuid {{discern_user}} setuid {{discern_user}}
exec {{venv_dir}}/bin/gunicorn --preload -b 127.0.0.1:$PORT -w $WORKERS --timeout=30 --pythonpath={{discern_dir}} discern.wsgi exec {{venv_dir}}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=30 --pythonpath={{discern_dir}} discern.wsgi
\ No newline at end of file
...@@ -2,157 +2,248 @@ ...@@ -2,157 +2,248 @@
# when the role is included # when the role is included
--- ---
# These are default values for the env and auth # These are variables that default to a localhost
# configuration files. There should be no # setup and are meant to be overwritten for
# host identifying or sensitive information and # different environments.
# the defaults should be appropriate for running #
# all roles on a single instance # Variables in all caps are environment specific
# Lowercase variables are internal to the role
#
# Defaults specified here should not contain
# any secrets or host identifying information.
# These are custom variables that can be overridden EDXAPP_LMS_BASE: ''
# on the command line to change specific values in the hash EDXAPP_PREVIEW_LMS_BASE: ''
c_lms_base: '' EDXAPP_CMS_BASE: ''
c_preview_lms_base: '' EDXAPP_AWS_ACCESS_KEY_ID: ''
EDXAPP_AWS_SECRET_ACCESS_KEY: ''
EDXAPP_XQUEUE_BASIC_AUTH: [ 'edx', 'edx' ]
EDXAPP_XQUEUE_DJANGO_AUTH:
username: 'lms'
password: 'password'
EDXAPP_MONGO_HOSTS: ['localhost']
EDXAPP_MONGO_PASSWORD: 'password'
EDXAPP_MONGO_PORT: 27017
EDXAPP_MONGO_USER: 'edxapp'
EDXAPP_MONGO_DB_NAME: 'edxapp'
EDXAPP_MYSQL_DB_NAME: 'edxapp'
EDXAPP_MYSQL_USER: 'root'
EDXAPP_MYSQL_PASSWORD: ''
EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend'
EDXAPP_LOG_LEVEL: 'INFO'
EDXAPP_MEMCACHE: [ 'localhost:11211' ]
EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:4567'
EDXAPP_COMMENTS_SERVICE_KEY: 'password'
EDXAPP_EDXAPP_SECRET_KEY: ''
EDXAPP_PEARSON_TEST_PASWORD: ''
EDXAPP_OEE_URL: 'http://localhost:18091/'
EDXAPP_OEE_USER: 'lms'
EDXAPP_OEE_PASSWORD: 'password'
EDXAPP_ANALYTICS_API_KEY: ''
EDXAPP_ZENDESK_USER: ''
EDXAPP_ZENDESK_API_KEY: ''
EDXAPP_CELERY_USER: 'celery'
EDXAPP_CELERY_PASSWORD: ''
EDXAPP_MITX_FEATURES:
AUTH_USE_OPENID_PROVIDER: true
CERTIFICATES_ENABLED: true
ENABLE_DISCUSSION_SERVICE: true
ENABLE_INSTRUCTOR_ANALYTICS: true
ENABLE_PEARSON_HACK_TEST: false
SUBDOMAIN_BRANDING: false
SUBDOMAIN_COURSE_LISTINGS: false
PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE
EDXAPP_BOOK_URL: ''
EDXAPP_SITE_NAME: 'example.com'
EDXAPP_MEDIA_URL: ''
EDXAPP_ANALYTICS_SERVER_URL: ''
EDXAPP_FEEDBACK_SUBMISSION_EMAIL: ''
EDXAPP_CELERY_BROKER_HOSTNAME: ''
EDXAPP_LOGGING_ENV: 'sandbox'
EDXAPP_SYSLOG_SERVER: ''
EDXAPP_RABBIT_HOSTNAME: 'rabbit.{{ENV_NAME}}.vpc.edx.org'
EDXAPP_XML_MAPPINGS: {}
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020
EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_LMS_BASIC_AUTH: False
EDXAPP_CMS_BASIC_AUTH: False
EDXAPP_LMS_PREVIEW_BASIC_AUTH: False
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings #Use YAML references (& and *) and hash merge <<: to factor out shared settings
#see http://atechie.net/2009/07/merging-hashes-in-yaml-conf-files/ #see http://atechie.net/2009/07/merging-hashes-in-yaml-conf-files/
edxapp_generic_auth_config: &edxapp_generic_auth edxapp_generic_auth_config: &edxapp_generic_auth
'AWS_ACCESS_KEY_ID': '' AWS_ACCESS_KEY_ID: $EDXAPP_AWS_ACCESS_KEY_ID
'AWS_SECRET_ACCESS_KEY': '' AWS_SECRET_ACCESS_KEY: $EDXAPP_AWS_SECRET_ACCESS_KEY
'SECRET_KEY': '' SECRET_KEY: $EDXAPP_EDXAPP_SECRET_KEY
'XQUEUE_INTERFACE': XQUEUE_INTERFACE:
'basic_auth': [ 'edx', 'edx'] basic_auth: $EDXAPP_XQUEUE_BASIC_AUTH
'django_auth': { 'password': 'password', django_auth: $EDXAPP_XQUEUE_DJANGO_AUTH
'username': 'lms'} url: $EDXAPP_XQUEUE_URL
'url': 'http://localhost:18040' DOC_STORE_CONFIG: &edxapp_generic_default_docstore
'CONTENTSTORE': db: $EDXAPP_MONGO_DB_NAME
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore' host: $EDXAPP_MONGO_HOSTS
'OPTIONS': password: $EDXAPP_MONGO_PASSWORD
'db': 'edxapp' port: $EDXAPP_MONGO_PORT
'host': [ 'localhost' ] user: $EDXAPP_MONGO_USER
'password': 'password' collection: 'modulestore'
'port': 27017 CONTENTSTORE:
'user': 'mongo' ENGINE: 'xmodule.contentstore.mongo.MongoContentStore'
'MODULESTORE': #
'default': # connection strings are duplicated temporarily for
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore' # backward compatibility
'OPTIONS': &generic_modulestore_default_options #
'collection': 'modulestore' OPTIONS:
'db': 'edxapp' db: $EDXAPP_MONGO_DB_NAME
'default_class': 'xmodule.hidden_module.HiddenDescriptor' host: $EDXAPP_MONGO_HOSTS
'fs_root': '/opt/wwc/data' password: $EDXAPP_MONGO_PASSWORD
'host': [ 'localhost' ] port: $EDXAPP_MONGO_PORT
'password': 'password' user: $EDXAPP_MONGO_USER
'port': 27017 DOC_STORE_CONFIG: *edxapp_generic_default_docstore
'render_template': 'mitxmako.shortcuts.render_to_string' MODULESTORE:
'user': 'mongo' default: &edxapp_generic_default_modulestore
'direct': ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore'
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore' OPTIONS: &generic_modulestore_default_options
'OPTIONS': *generic_modulestore_default_options collection: 'modulestore'
'DATABASES': db: $EDXAPP_MONGO_DB_NAME
'default': default_class: 'xmodule.hidden_module.HiddenDescriptor'
'ENGINE': 'django.db.backends.mysql' fs_root: '/opt/wwc/data'
'NAME': 'edxapp' host: $EDXAPP_MONGO_HOSTS
'USER': 'root' password: $EDXAPP_MONGO_PASSWORD
'PASSWORD': '' port: $EDXAPP_MONGO_PORT
# Provide the name of a host running mysql. render_template: 'mitxmako.shortcuts.render_to_string'
'HOST': 'localhost' # Needed for the CMS to be able to run update_templates
'PORT': '3306' user: $EDXAPP_MONGO_USER
'PEARSON_TEST_PASSWORD': '' DOC_STORE_CONFIG: *edxapp_generic_default_docstore
'OPEN_ENDED_GRADING_INTERFACE': direct: &edxapp_generic_direct_modulestore
'url': 'http://localhost:18091/' ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore'
'password': 'password' OPTIONS: *generic_modulestore_default_options
'peer_grading': 'peer_grading' DOC_STORE_CONFIG: *edxapp_generic_default_docstore
'staff_grading': 'staff_grading' DATABASES:
'grading_controller': 'grading_controller' default:
'username': 'lms' ENGINE: 'django.db.backends.mysql'
'ANALYTICS_API_KEY': '' NAME: $EDXAPP_MYSQL_DB_NAME
'ZENDESK_USER': '' USER: $EDXAPP_MYSQL_USER
'ZENDESK_API_KEY': '' PASSWORD: $EDXAPP_MYSQL_PASSWORD
'CELERY_BROKER_USER': 'celery' HOST: $EDXAPP_MYSQL_HOST
'CELERY_BROKER_PASSWORD': '' PORT: $EDXAPP_MYSQL_PORT
PEARSON_TEST_PASSWORD: $EDXAPP_PEARSON_TEST_PASSWORD
OPEN_ENDED_GRADING_INTERFACE:
url: $EDXAPP_OEE_URL
password: $EDXAPP_OEE_PASSWORD
peer_grading: 'peer_grading'
staff_grading: 'staff_grading'
grading_controller: 'grading_controller'
username: $EDXAPP_OEE_USER
ANALYTICS_API_KEY: $EDXAPP_ANALYTICS_API_KEY
ZENDESK_USER: $EDXAPP_ZENDESK_USER
ZENDESK_API_KEY: $EDXAPP_ZENDESK_API_KEY
CELERY_BROKER_USER: $EDXAPP_CELERY_USER
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
generic_env_config: &edxapp_generic_env generic_env_config: &edxapp_generic_env
'LMS_BASE': "{{ c_lms_base }}" LMS_BASE: $EDXAPP_LMS_BASE
'BOOK_URL': '' CMS_BASE: $EDXAPP_CMS_BASE
'CERT_QUEUE': 'certificates' BOOK_URL: $EDXAPP_BOOK_URL
'LOCAL_LOGLEVEL': 'INFO' CERT_QUEUE: 'certificates'
LOCAL_LOGLEVEL: $EDXAPP_LOG_LEVEL
# default email backed set to local SMTP # default email backed set to local SMTP
'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend' EMAIL_BACKEND: $EDXAPP_EMAIL_BACKEND
'MITX_FEATURES': MITX_FEATURES: $EDXAPP_MITX_FEATURES
'AUTH_USE_OPENID_PROVIDER': true WIKI_ENABLED: true
'CERTIFICATES_ENABLED': true SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
'ENABLE_DISCUSSION_SERVICE': true SITE_NAME: $EDXAPP_SITE_NAME
'ENABLE_INSTRUCTOR_ANALYTICS': true LOG_DIR: "{{ storage_base_dir }}/logs/edx"
'ENABLE_PEARSON_HACK_TEST': false MEDIA_URL: $EDXAPP_MEDIA_URL
'SUBDOMAIN_BRANDING': false ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL
'SUBDOMAIN_COURSE_LISTINGS': false FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
'PREVIEW_LMS_BASE': "{{ c_preview_lms_base }}" TIME_ZONE: 'America/New_York'
'WIKI_ENABLED': true CACHES:
'SYSLOG_SERVER': 'syslog.a.m.i4x.org' default: &default_generic_cache
'SITE_NAME': 'example.com' BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
'LOG_DIR': '/mnt/logs/edx' KEY_FUNCTION: 'util.memcache.safe_key'
'MEDIA_URL': '' KEY_PREFIX: 'sandbox_default'
'ANALYTICS_SERVER_URL': '' LOCATION: $EDXAPP_MEMCACHE
'FEEDBACK_SUBMISSION_EMAIL': '' general:
'TIME_ZONE': 'America/New_York'
'CACHES':
'default': &default_generic_cache
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache'
'KEY_FUNCTION': 'util.memcache.safe_key'
'KEY_PREFIX': 'sandbox_default'
'LOCATION': [ 'localhost:11211' ]
'general':
<<: *default_generic_cache <<: *default_generic_cache
'KEY_PREFIX': 'sandbox_general' KEY_PREFIX: 'sandbox_general'
'mongo_metadata_inheritance': mongo_metadata_inheritance:
<<: *default_generic_cache <<: *default_generic_cache
'KEY_PREFIX': 'integration_mongo_metadata_inheritance' KEY_PREFIX: 'integration_mongo_metadata_inheritance'
'staticfiles': staticfiles:
<<: *default_generic_cache <<: *default_generic_cache
'KEY_PREFIX': 'integration_static_files' KEY_PREFIX: 'integration_static_files'
'celery': celery:
<<: *default_generic_cache <<: *default_generic_cache
'KEY_PREFIX': 'integration_celery' KEY_PREFIX: 'integration_celery'
'CELERY_BROKER_TRANSPORT': 'amqp' CELERY_BROKER_TRANSPORT: 'amqp'
'CELERY_BROKER_HOSTNAME': '' CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME
'COMMENTS_SERVICE_URL': 'http://localhost:4567' COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL
'LOGGING_ENV': 'sandbox' LOGGING_ENV: $EDXAPP_LOGGING_ENV
'SESSION_COOKIE_DOMAIN': !!null SESSION_COOKIE_DOMAIN: !!null
'COMMENTS_SERVICE_KEY': 'password' COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY
'SEGMENT_IO_LMS': true SEGMENT_IO_LMS: true
'CODE_JAIL': CODE_JAIL:
'limits': limits:
'VMEM': 0 VMEM: 0
'REALTIME': 3 REALTIME: 3
lms_auth_config: lms_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
'MODULESTORE': MODULESTORE:
'default': default:
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore' ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
'OPTIONS': *generic_modulestore_default_options OPTIONS:
mappings: $EDXAPP_XML_MAPPINGS
stores:
xml:
ENGINE: 'xmodule.modulestore.xml.XMLModuleStore'
OPTIONS:
data_dir: '/opt/wwc/data'
default_class: 'xmodule.hidden_module.HiddenDescriptor'
default:
OPTIONS:
default_class: 'xmodule.hidden_module.HiddenDescriptor'
host: $EDXAPP_MONGO_HOSTS
db: $EDXAPP_MONGO_DB_NAME
collection: 'modulestore'
render_template: 'mitxmako.shortcuts.render_to_string'
user: $EDXAPP_MONGO_USER
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
fs_root: '/opt/wwc/data'
ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore'
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
lms_env_config: lms_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
lms_xml_auth_config:
<<: *edxapp_generic_auth
'MODULESTORE':
'default':
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore'
'OPTIONS':
'data_dir': '/opt/wwc/data'
'default_class': 'xmodule.hidden_module.HiddenDescriptor'
lms_xml_env_config:
<<: *edxapp_generic_env
cms_auth_config: cms_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
cms_env_config: cms_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
lms_preview_auth_config: lms_preview_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
'MODULESTORE': MODULESTORE:
'default': default: *edxapp_generic_default_modulestore
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore'
'OPTIONS': *generic_modulestore_default_options
lms_preview_env_config: lms_preview_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
...@@ -161,31 +252,20 @@ lms_preview_env_config: ...@@ -161,31 +252,20 @@ lms_preview_env_config:
# install dir for the edx-platform repo # install dir for the edx-platform repo
edx_platform_code_dir: "{{ app_base_dir }}/edx-platform" edx_platform_code_dir: "{{ app_base_dir }}/edx-platform"
# Default nginx listen ports
# These should be overrided if you want
# to serve all content on port 80
lms_xml_nginx_port: 18030 # gunicorn ports/hosts, these shouldn't need to be overridden
lms_nginx_port: 80 edxapp_cms_gunicorn_port: 8010
lms_preview_nginx_port: 18020 edxapp_cms_gunicorn_host: 127.0.0.1
cms_nginx_port: 18010 edxapp_lms_gunicorn_port: 8000
edxapp_lms_gunicorn_host: 127.0.0.1
edxapp_lms_preview_gunicorn_port: 8020
edxapp_lms_preview_gunicorn_host: 127.0.0.1
edxapp_cms_app_port: 8010
edxapp_lms_app_port: 8000
edxapp_lms_xml_app_port: 8030
edxapp_lms_preview_app_port: 8020
edxapp_cms_app_address: 127.0.0.1
edxapp_lms_app_address: 127.0.0.1
edxapp_lms_xml_app_address: 127.0.0.1
edxapp_lms_preview_app_address: 127.0.0.1
# These vars are for creating the application json config # These vars are for creating the application json config
# files. There are two for each service that uses the # files. There are two for each service that uses the
# 'edx-platform' code. Defining them will create the upstart # 'edx-platform' code. Defining them will create the upstart
# job and nginx configuration for the corresponding service. # job. It will also enable the corresponding section in the
# It will also enable the corresponding section in the
# 'edxapp' upstart job. # 'edxapp' upstart job.
service_variants_enabled: service_variants_enabled:
...@@ -199,18 +279,17 @@ edxapp_lms_env: 'lms.envs.aws' ...@@ -199,18 +279,17 @@ edxapp_lms_env: 'lms.envs.aws'
worker_core_mult: worker_core_mult:
lms: 4 lms: 4
lms_preview: 2 lms_preview: 2
lms_xml: 2
cms: 2 cms: 2
#Theming #Theming
#To turn off theming, specify edxapp_theme_name: '' #To turn off theming, specify edxapp_theme_name: ''
#Stanford, for example, uses edxapp_theme_name: 'stanford' #Stanford, for example, uses edxapp_theme_name: 'stanford'
edxapp_theme_name: '' edxapp_theme_name: ''
edxapp_theme_source_repo: 'https://github.com/Stanford-Online/edx-theme.git' edxapp_theme_source_repo: 'https://{{ GIT_MIRROR }}/Stanford-Online/edx-theme.git'
edxapp_theme_version: 'HEAD' edxapp_theme_version: 'HEAD'
# make this the public URL instead of writable # make this the public URL instead of writable
edx_platform_repo: https://github.com/edx/edx-platform.git edx_platform_repo: "https://{{ GIT_MIRROR }}/edx/edx-platform.git"
# `edx_platform_commit` can be anything that git recognizes as a commit # `edx_platform_commit` can be anything that git recognizes as a commit
# reference, including a tag, a branch name, or a commit hash # reference, including a tag, a branch name, or a commit hash
edx_platform_commit: 'release' edx_platform_commit: 'release'
...@@ -229,75 +308,32 @@ sandbox_post_requirements: "{{ edx_platform_code_dir }}/requirements/edx-sandbo ...@@ -229,75 +308,32 @@ sandbox_post_requirements: "{{ edx_platform_code_dir }}/requirements/edx-sandbo
install_sandbox_reqs_into_regular_venv: true install_sandbox_reqs_into_regular_venv: true
lms_debian_pkgs: lms_debian_pkgs:
- apparmor-utils # for compiling the virtualenv
- aspell # (only needed if wheel files aren't available)
- build-essential - build-essential
- curl - s3cmd
- dvipng - pkg-config
- fabric
- g++
- gcc
- gfortran
- ghostscript
- github-cli
- graphviz
- graphviz-dev - graphviz-dev
- gunicorn - graphviz
- inoticoming
- ipython
- libcrypt-ssleay-perl
- libcurl4-openssl-dev
- libdigest-sha-perl
- libfreetype6-dev
- libgeos-dev
- libgraphviz-dev
- libjpeg8-dev
- liblapack-dev
- liblwp-protocol-https-perl
- libmysqlclient-dev - libmysqlclient-dev
- libnet-amazon-ec2-perl # for scipy, do not install
- libpng12-dev # libopenblas-base, it will cause
- libreadline-dev # problems for numpy
- libreadline6-dev - gfortran
- libssl-dev - libatlas3gf-base
- libswitch-perl - liblapack-dev
- libwww-perl - g++
- libxml++2.6-dev
- libxml2-dev - libxml2-dev
- libxml2-utils
- libxslt1-dev - libxslt1-dev
- lynx-cur # apparmor
- maven2 - apparmor-utils
- mongodb-clients # misc
- mysql-client - curl
- ipython
- npm - npm
- ntp - ntp
- openjdk-7-jdk # for shapely
- openjdk-7-jre - libgeos-dev
- pep8
- perl
- pkg-config
- postfix
- pylint
- python-boto
- python-coverage-test-runner
- python-django-nose
- python-jenkins
- python-nose
- python-nosexcover
- python-numpy
- python-pip
- python-scipy
- rake
- reprepro
- rsyslog
- rubygems
- sqlite3
- super
- vagrant
- yui-compressor
- zip
- zlib1g-dev
# Ruby Specific Vars # Ruby Specific Vars
ruby_base: /opt/www ruby_base: /opt/www
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
service: name=edxapp state=started service: name=edxapp state=started
tags: tags:
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- deploy - deploy
...@@ -12,7 +11,6 @@ ...@@ -12,7 +11,6 @@
service: name=edxapp state=stopped service: name=edxapp state=stopped
tags: tags:
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- deploy - deploy
...@@ -21,7 +19,6 @@ ...@@ -21,7 +19,6 @@
service: name=edxapp state=restarted service: name=edxapp state=restarted
tags: tags:
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- deploy - deploy
# requires: # requires:
# - group_vars/all # - group_vars/all
# - common/tasks/main.yml # - common/tasks/main.yml
# - nginx/tasks/main.yml
--- ---
- name: create cms application config - name: create cms application config
template: src=cms.env.json.j2 dest=$app_base_dir/cms.env.json mode=640 owner=www-data group=adm template: src=cms.env.json.j2 dest=$app_base_dir/cms.env.json mode=640 owner=www-data group=adm
...@@ -9,6 +8,7 @@ ...@@ -9,6 +8,7 @@
- cms-env - cms-env
- cms - cms
- update - update
- deploy
- name: create cms auth file - name: create cms auth file
template: src=cms.auth.json.j2 dest=$app_base_dir/cms.auth.json mode=640 owner=www-data group=adm template: src=cms.auth.json.j2 dest=$app_base_dir/cms.auth.json mode=640 owner=www-data group=adm
...@@ -16,20 +16,16 @@ ...@@ -16,20 +16,16 @@
- cms-env - cms-env
- cms - cms
- update - update
- deploy
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=cms
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=cms-backend
when: celery_worker is not defined
- name: Create CMS log target directory - name: Create CMS log target directory
file: path={{log_base_dir}}/cms state=directory owner=syslog group=adm mode=2770 file: path={{log_base_dir}}/cms state=directory owner=syslog group=syslog mode=2750
tags: tags:
- cms - cms
- cms-env - cms-env
- logging - logging
- update - update
- deploy
# Creates CMS upstart file # Creates CMS upstart file
- include: upstart.yml basename=cms - include: upstart.yml basename=cms
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
when: celery_worker is not defined when: celery_worker is not defined
tags: tags:
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- deploy - deploy
...@@ -15,9 +14,20 @@ ...@@ -15,9 +14,20 @@
tags: tags:
- deploy - deploy
# update json configs for the application
- include: lms.yml
when: "'lms' in service_variants_enabled"
- include: cms.yml
when: "'cms' in service_variants_enabled"
- include: lms-preview.yml
when: "'lms-preview' in service_variants_enabled"
# Do A Checkout # Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edx_platform_code_dir}} - name: edxapp | checkout edx-platform repo into {{edx_platform_code_dir}}
git: dest={{edx_platform_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}} git: dest={{edx_platform_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}}
register: edx_platform_checkout
tags: tags:
- lms - lms
- cms - cms
...@@ -26,6 +36,7 @@ ...@@ -26,6 +36,7 @@
- name: git clean after checking out edx-platform - name: git clean after checking out edx-platform
shell: cd {{edx_platform_code_dir}} && git clean -xdf shell: cd {{edx_platform_code_dir}} && git clean -xdf
when: edx_platform_checkout.changed
tags: tags:
- lms - lms
- cms - cms
...@@ -46,7 +57,6 @@ ...@@ -46,7 +57,6 @@
when: edxapp_theme_name != '' when: edxapp_theme_name != ''
tags: tags:
- cms - cms
- lms-xml
- lms-preview - lms-preview
- lms - lms
- update - update
...@@ -60,6 +70,7 @@ ...@@ -60,6 +70,7 @@
file: path={{edx_platform_code_dir}} state=directory owner=www-data group=www-data recurse=yes file: path={{edx_platform_code_dir}} state=directory owner=www-data group=www-data recurse=yes
# Post Checkout tasks will get run as handlers when the {{ edx_platform_code_dir }} is ready. # Post Checkout tasks will get run as handlers when the {{ edx_platform_code_dir }} is ready.
# Look at the handlers/main.yml in this role for a description of the tasks stated below. # Look at the handlers/main.yml in this role for a description of the tasks stated below.
when: edx_platform_checkout.changed
tags: tags:
- lms - lms
- cms - cms
...@@ -68,14 +79,18 @@ ...@@ -68,14 +79,18 @@
# Ruby plays that need to be run after platform updates. # Ruby plays that need to be run after platform updates.
- name: gem | gem install bundler - name: gem | gem install bundler
shell: RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ rbenv_root }}/shims/gem install bundle chdir={{ edx_platform_code_dir }} shell: >
RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ rbenv_root }}/shims/gem install bundle
chdir={{ edx_platform_code_dir }}
tags: tags:
- ruby - ruby
- deploy - deploy
- install - install
- name: bundle | bundle install - name: bundle | bundle install
shell: RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ gem_home }}/bin/bundle install --binstubs chdir={{ edx_platform_code_dir }} shell: >
RBENV_ROOT={{ rbenv_root }} GEM_HOME={{ gem_home }} {{ gem_home }}/bin/bundle install --binstubs
chdir={{ edx_platform_code_dir }}
tags: tags:
- ruby - ruby
- deploy - deploy
...@@ -91,9 +106,33 @@ ...@@ -91,9 +106,33 @@
# Python plays that need to be run after platform updates. # Python plays that need to be run after platform updates.
# Substitute github mirror in all requirements files
#
- name: Updating requirement files for git mirror
command: |
/bin/sed -i -e 's/github\.com/{{ GIT_MIRROR }}/g' {{ item }}
with_items:
- "{{ pre_requirements_file }}"
- "{{ post_requirements_file }}"
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}"
- "{{ local_requirements_file }}"
- "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}"
tags:
- lms
- cms
- install
- deploy
# Install the python pre requirements into {{ venv_dir }} # Install the python pre requirements into {{ venv_dir }}
- name : install python pre-requirements - name : install python pre-requirements
pip: requirements="{{pre_requirements_file}}" virtualenv="{{venv_dir}}" state=present pip: >
requirements="{{pre_requirements_file}}"
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags: tags:
- lms - lms
- cms - cms
...@@ -105,7 +144,7 @@ ...@@ -105,7 +144,7 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install --exists-action w --use-mirrors -r {{ base_requirements_file }} shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install -i {{ PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ base_requirements_file }}
tags: tags:
- lms - lms
- cms - cms
...@@ -114,7 +153,11 @@ ...@@ -114,7 +153,11 @@
# Install the python post requirements into {{ venv_dir }} # Install the python post requirements into {{ venv_dir }}
- name : install python post-requirements - name : install python post-requirements
pip: requirements="{{post_requirements_file}}" virtualenv="{{venv_dir}}" state=present pip: >
requirements="{{post_requirements_file}}"
virtualenv="{{venv_dir}}"
state=present
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags: tags:
- lms - lms
- cms - cms
...@@ -126,7 +169,7 @@ ...@@ -126,7 +169,7 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install --exists-action w --use-mirrors -r {{ item }} shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install -i {{ PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
with_items: with_items:
- "{{ repo_requirements_file }}" - "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}" - "{{ github_requirements_file }}"
...@@ -143,7 +186,7 @@ ...@@ -143,7 +186,7 @@
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install --exists-action w --use-mirrors -r {{ item }} shell: cd {{ edx_platform_code_dir }} && {{ venv_dir }}/bin/pip install -i {{ PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
with_items: with_items:
- "{{ sandbox_base_requirements }}" - "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}" - "{{ sandbox_local_requirements }}"
...@@ -158,36 +201,6 @@ ...@@ -158,36 +201,6 @@
- name: changing group ownership to www-data for everything in the venv (workaround) - name: changing group ownership to www-data for everything in the venv (workaround)
shell: chgrp -R www-data {{ venv_dir }} shell: chgrp -R www-data {{ venv_dir }}
# Gather lms assets using rake if possible
- name: gather lms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ lms_variant }} rake lms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- lms
- lms-preview
- lms-xml
- deploy
# Gather cms assets using rake if possible
- name: gather cms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ cms_variant }} rake cms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- cms
- deploy
# https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726 # https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726
# This is necessary for when syncdb is run and the django_openid_auth module is installed, # This is necessary for when syncdb is run and the django_openid_auth module is installed,
# not sure if this fix will ever get merged # not sure if this fix will ever get merged
...@@ -198,7 +211,6 @@ ...@@ -198,7 +211,6 @@
tags: tags:
- deploy - deploy
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- syncdb - syncdb
...@@ -209,7 +221,6 @@ ...@@ -209,7 +221,6 @@
tags: tags:
- deploy - deploy
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- syncdb - syncdb
...@@ -220,17 +231,44 @@ ...@@ -220,17 +231,44 @@
tags: tags:
- deploy - deploy
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- syncdb - migrate
# Gather lms assets using rake if possible
- name: gather lms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ lms_variant }} rake lms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- lms
- lms-preview
- deploy
# Gather cms assets using rake if possible
- name: gather cms static assets with rake
shell: executable=/bin/bash chdir={{ edx_platform_code_dir }} SERVICE_VARIANT={{ cms_variant }} rake cms:gather_assets:aws
notify:
- restart edxapp
sudo: yes
sudo_user: www-data
when: celery_worker is not defined
environment: "{{ deploy_environment }}"
tags:
- cms
- deploy
- name: restart edxapp - name: restart edxapp
service: name=edxapp state=restarted service: name=edxapp state=restarted
when: celery_worker is not defined when: celery_worker is not defined
tags: tags:
- lms - lms
- lms-xml
- lms-preview - lms-preview
- cms - cms
- deploy - deploy
......
# requires: # requires:
# - group_vars/all # - group_vars/all
# - common/tasks/main.yml # - common/tasks/main.yml
# - nginx/tasks/main.yml
--- ---
- name: create lms application config - name: create lms application config
template: src=lms-preview.env.json.j2 dest=$app_base_dir/lms-preview.env.json mode=640 owner=www-data group=adm template: src=lms-preview.env.json.j2 dest=$app_base_dir/lms-preview.env.json mode=640 owner=www-data group=adm
tags: tags:
- lms-preview - lms-preview
- lms-preview-env - lms-preview-env
- deploy
- name: create lms auth file - name: create lms auth file
template: src=lms-preview.auth.json.j2 dest=$app_base_dir/lms-preview.auth.json mode=640 owner=www-data group=adm template: src=lms-preview.auth.json.j2 dest=$app_base_dir/lms-preview.auth.json mode=640 owner=www-data group=adm
tags: tags:
- lms-preview - lms-preview
- lms-preview-env - lms-preview-env
- deploy
- name: Create lms-preview log target directory - name: Create lms-preview log target directory
file: path={{log_base_dir}}/lms-preview state=directory owner=syslog group=adm mode=2770 file: path={{log_base_dir}}/lms-preview state=directory owner=syslog group=syslog mode=2750
tags: tags:
- lms-preview - lms-preview
- lms-preview-env - lms-preview-env
- logging - logging
- update - update
- deploy
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-preview
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-preview-backend
when: celery_worker is not defined
# Creates LMS Preview upstart file # Creates LMS Preview upstart file
- include: upstart.yml basename=lms-preview - include: upstart.yml basename=lms-preview
# requires:
# - group_vars/all
# - common/tasks/main.yml
# - nginx/tasks/main.yml
---
- name: create lms-xml application config
template: src=lms-xml.env.json.j2 dest=$app_base_dir/lms-xml.env.json mode=640 owner=www-data group=adm
tags:
- lms-xml-env
- lmx-xml
- update
- name: create lms-xml auth file
template: src=lms-xml.auth.json.j2 dest=$app_base_dir/lms-xml.auth.json mode=640 owner=www-data group=adm
tags:
- lms-xml-env
- lmx-xml
- update
- name: Create lms-xml log target directory
file: path={{log_base_dir}}/lms-xml state=directory owner=syslog group=adm mode=2770
tags:
- lms-xml
- lms-xml-env
- logging
- update
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-xml
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-xml-backend
when: celery_worker is not defined
# Creates upstart file
- include: upstart.yml basename=lms-xml
when: celery_worker is not defined
- include: upstart.yml basename=edx-worker-lms-xml
when: celery_worker is defined
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
- lms - lms
- lms-env - lms-env
- update - update
- deploy
- name: create lms auth file - name: create lms auth file
template: src=lms.auth.json.j2 dest=$app_base_dir/lms.auth.json mode=640 owner=www-data group=adm template: src=lms.auth.json.j2 dest=$app_base_dir/lms.auth.json mode=640 owner=www-data group=adm
...@@ -12,20 +13,16 @@ ...@@ -12,20 +13,16 @@
- lms - lms
- lms-env - lms-env
- update - update
- deploy
- name: Create lms log target directory - name: Create lms log target directory
file: path={{log_base_dir}}/lms state=directory owner=syslog group=adm mode=2770 file: path={{log_base_dir}}/lms state=directory owner=syslog group=syslog mode=2750
tags: tags:
- lms - lms
- lms-env - lms-env
- logging - logging
- update - update
- deploy
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms
when: celery_worker is not defined
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=lms-backend
when: celery_worker is not defined
# Creates LMS upstart file # Creates LMS upstart file
- include: upstart.yml basename=lms - include: upstart.yml basename=lms
......
# requires: # requires:
# - group_vars/all # - group_vars/all
# - common/tasks/main.yml # - common/tasks/main.yml
# - nginx/tasks/main.yml
--- ---
- name: Change permissions on datadir - name: Change permissions on datadir
file: path={{ app_base_dir }}/data state=directory owner=www-data group=www-data file: path={{ app_base_dir }}/data state=directory owner=www-data group=www-data
...@@ -35,16 +34,6 @@ ...@@ -35,16 +34,6 @@
- cms - cms
- install - install
- include: lms.yml
when: "'lms' in service_variants_enabled"
- include: lms-xml.yml
when: "'lms-xml' in service_variants_enabled"
- include: cms.yml
when: "'cms' in service_variants_enabled"
- include: lms-preview.yml
when: "'lms-preview' in service_variants_enabled"
- name: creating edxapp upstart script - name: creating edxapp upstart script
sudo: True sudo: True
template: src=edxapp.conf.j2 dest=/etc/init/edxapp.conf owner=root group=root template: src=edxapp.conf.j2 dest=/etc/init/edxapp.conf owner=root group=root
......
...@@ -6,3 +6,4 @@ ...@@ -6,3 +6,4 @@
- upstart - upstart
- gunicorn - gunicorn
- update - update
- deploy
...@@ -16,8 +16,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.cms }} ...@@ -16,8 +16,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.cms }}
{% else %} {% else %}
env WORKERS={{ worker_core_mult.cms }} env WORKERS={{ worker_core_mult.cms }}
{% endif %} {% endif %}
env PORT={{edxapp_cms_app_port}} env PORT={{edxapp_cms_gunicorn_port}}
env ADDRESS={{edxapp_cms_app_address}} env ADDRESS={{edxapp_cms_gunicorn_host}}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=cms.envs.aws env DJANGO_SETTINGS_MODULE=cms.envs.aws
env SERVICE_VARIANT="cms" env SERVICE_VARIANT="cms"
......
# gunicorn
# Templated and placed by ansible from jinja2 source
# lms-xml Celery Worker Upstart Script
description "cms celery worker"
stop on stopping edx-workers
respawn
instance edx.${SERVICE_VARIANT}.core.${QUEUE}
#env NEW_RELIC_CONFIG_FILE=/opt/wwc/newrelic.ini
#env NEWRELIC={{venv_dir}}/bin/newrelic-admin
env CONCURRENCY=${CONCURRENCY}
env LOGLEVEL=info
env DJANGO_SETTINGS_MODULE={{worker_django_settings_module}}
env PYTHONPATH={{edx_platform_code_dir}}
env SERVICE_VARIANT=${SERVICE_VARIANT}
setuid www-data
chdir {{edx_platform_code_dir}}
exec {{venv_dir}}/bin/python {{edx_platform_code_dir}}/manage.py lms --service-variant=$SERVICE_VARIANT --settings=$DJANGO_SETTINGS_MODULE celery worker --loglevel=$LOGLEVEL --queues=edx.${SERVICE_VARIANT}.core.${QUEUE} --hostname=edx.${SERVICE_VARIANT}.core.${QUEUE}.`hostname` --concurrency=$CONCURRENCY
...@@ -20,10 +20,4 @@ pre-start script ...@@ -20,10 +20,4 @@ pre-start script
start edx-worker-lms QUEUE=high CONCURRENCY=4 SERVICE_VARIANT=lms start edx-worker-lms QUEUE=high CONCURRENCY=4 SERVICE_VARIANT=lms
{% endif %} {% endif %}
{% if 'lms-xml' in service_variants_enabled %}
start edx-worker-lms-xml QUEUE=low CONCURRENCY=1 SERVICE_VARIANT=lms-xml
start edx-worker-lms-xml QUEUE=default CONCURRENCY=3 SERVICE_VARIANT=lms-xml
start edx-worker-lms-xml QUEUE=high CONCURRENCY=4 SERVICE_VARIANT=lms-xml
{% endif %}
end script end script
...@@ -11,12 +11,6 @@ stop on runlevel [!2345] ...@@ -11,12 +11,6 @@ stop on runlevel [!2345]
## ##
pre-start script pre-start script
{% if 'lms-xml' in service_variants_enabled %}
if [ -e /etc/init/lms-xml.conf ]; then
start wait-for-state WAIT_FOR=lms-xml WAITER=$UPSTART_JOB
fi
{% endif %}
{% if 'lms' in service_variants_enabled %} {% if 'lms' in service_variants_enabled %}
if [ -e /etc/init/lms.conf ]; then if [ -e /etc/init/lms.conf ]; then
start wait-for-state WAIT_FOR=lms WAITER=$UPSTART_JOB start wait-for-state WAIT_FOR=lms WAITER=$UPSTART_JOB
...@@ -51,12 +45,6 @@ end script ...@@ -51,12 +45,6 @@ end script
pre-stop script pre-stop script
{% if 'lms-xml' in service_variants_enabled %}
if [ -e /etc/init/lms-xml.conf ]; then
start wait-for-state WAIT_FOR=lms-xml WAITER=$UPSTART_JOB TARGET_GOAL="stop"
fi
{% endif %}
{% if 'lms' in service_variants_enabled %} {% if 'lms' in service_variants_enabled %}
if [ -e /etc/init/lms.conf ]; then if [ -e /etc/init/lms.conf ]; then
start wait-for-state WAIT_FOR=lms WAITER=$UPSTART_JOB TARGET_GOAL="stop" start wait-for-state WAIT_FOR=lms WAITER=$UPSTART_JOB TARGET_GOAL="stop"
......
...@@ -17,8 +17,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.lms_preview }} ...@@ -17,8 +17,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.lms_preview }}
{% else %} {% else %}
env WORKERS={{ worker_core_mult.lms_preview }} env WORKERS={{ worker_core_mult.lms_preview }}
{% endif %} {% endif %}
env PORT={{edxapp_lms_preview_app_port}} env PORT={{edxapp_lms_preview_gunicorn_port}}
env ADDRESS={{edxapp_lms_preview_app_address}} env ADDRESS={{edxapp_lms_preview_gunicorn_host}}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=lms.envs.aws env DJANGO_SETTINGS_MODULE=lms.envs.aws
env SERVICE_VARIANT="lms-preview" env SERVICE_VARIANT="lms-preview"
......
# gunicorn
# Templated and placed by ansible from jinja2 source
description "lms-xml gunicorn server"
start on started edxapp
stop on stopped edxapp
respawn
respawn limit 3 30
env PID=/var/tmp/lms-xml.pid
#env NEW_RELIC_CONFIG_FILE={{app_base_dir}}/newrelic.ini
#env NEWRELIC={{venv_dir}}/bin/newrelic-admin
{% if ansible_processor|length > 0 %}
env WORKERS={{ ansible_processor|length * worker_core_mult.lms_xml }}
{% else %}
env WORKERS={{ worker_core_mult.lms_xml }}
{% endif %}
env PORT={{edxapp_lms_xml_app_port}}
env ADDRESS={{edxapp_lms_xml_app_address}}
env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=lms.envs.aws
env SERVICE_VARIANT="lms-xml"
chdir {{edx_platform_code_dir}}
setuid www-data
exec {{venv_dir}}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=300 --pythonpath={{edx_platform_code_dir}} lms.wsgi
post-start script
while true
do
if $(curl -s -i localhost:$PORT/heartbeat | egrep -q '200 OK'); then
break;
else
sleep 1;
fi
done
end script
...@@ -14,8 +14,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.lms }} ...@@ -14,8 +14,8 @@ env WORKERS={{ ansible_processor|length * worker_core_mult.lms }}
{% else %} {% else %}
env WORKERS={{ worker_core_mult.lms }} env WORKERS={{ worker_core_mult.lms }}
{% endif %} {% endif %}
env PORT={{edxapp_lms_app_port}} env PORT={{edxapp_lms_gunicorn_port}}
env ADDRESS={{edxapp_lms_app_address}} env ADDRESS={{edxapp_lms_gunicorn_host}}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }} env DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }}
env SERVICE_VARIANT="lms" env SERVICE_VARIANT="lms"
......
---
edxlocal_debian_pkgs:
- python-mysqldb
- mysql-server-5.5
- postfix
- python-pycurl
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
# requires: # requires:
# - group_vars/all # - group_vars/all
# - common/tasks/main.yml # - common/tasks/main.yml
# - nginx/tasks/main.yml
# #
# This installs mysql-server-5.5 though # This installs mysql-server-5.5 though
# in production we use mysql-5.1.62. # in production we use mysql-5.1.62.
...@@ -11,14 +10,8 @@ ...@@ -11,14 +10,8 @@
# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz # http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz
# #
--- ---
- name: edxlocal | install python-pymongo (req for ansible) - name: edxlocal| install packages needed for single server
pip: name=pymongo apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: edxlocal | install python-mysqldb (req for ansible)
apt: pkg=python-mysqldb state=present
- name: edxlocal | install mysql server and recommends
apt: pkg=mysql-server-5.5 state=present install_recommends=yes
- name: edxlocal | create a database for edxapp - name: edxlocal | create a database for edxapp
mysql_db: > mysql_db: >
...@@ -38,37 +31,5 @@ ...@@ -38,37 +31,5 @@
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | install mongo server and recommends
apt: pkg=mongodb-server state=present install_recommends=yes
- name: edxlocal | stop mongo service
service: name=mongodb state=stopped
- name: edxlocal | move mongodb to /mnt
command: mv /var/lib/mongodb /mnt/. creates=/mnt/mongodb
- name: edxlocal | create mongodb symlink
file: src=/mnt/mongodb dest=/var/lib/mongodb state=link
- name: edxlocal | start mongo service
service: name=mongodb state=started
- name: edxlocal | wait for mongo server to start
wait_for: port=27017 delay=2
- name: edxlocal | create a mongodb user for edxapp
mongodb_user: >
database=edxapp
name=edxapp
password=password
state=present
- name: edxlocal | create a mongodb user for forums
mongodb_user: >
database=cs_comments_service
name=cs_comments_service
password=password
state=present
- name: edxlocal | install memcached - name: edxlocal | install memcached
apt: pkg=memcached state=present apt: pkg=memcached state=present
--- ---
- name: forum | test that the required service are listening - name: forum | test that the required service are listening
wait_for: port={{ item.port }} host={{ item.host }} timeout=10 wait_for: port={{ item.port }} host={{ item.host }} timeout=30
with_items: "{{ forum_services }}" with_items: "{{ forum_services }}"
tags: tags:
- forum - forum
......
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Defaults for role gh_mirror
#
---
gh_mirror_nginx_port: 80
gh_mirror_server_name: 'git.*'
gh_mirror_data_dir: /var/git/mirrors
gh_mirror_app_dir: /opt/gh_mirror
gh_mirror_user: git-mirror
gh_mirror_group: git-mirror
gh_mirror_orgs:
- edX
- MITx
- eventbrite
- dementrock
- mfogel
- mitocw
- Stanford-Online
gh_mirror_debian_pkgs:
- fcgiwrap
gh_mirror_pip_pkgs:
- pyyaml
- requests
gh_mirror_app_files:
- repos_from_orgs.py
#!/usr/bin/python
# Given a list of repos in a yaml
# file will create or update mirrors
#
# Generates /var/tmp/repos.json from
# a yaml file containing a list of
# github organizations
import yaml
import sys
import requests
import json
import subprocess
import os
import logging
import fcntl
from os.path import dirname, abspath, join
from argparse import ArgumentParser
def check_running(run_type=''):
pid_file = '{}-{}.pid'.format(
os.path.basename(__file__),run_type)
fp = open(pid_file, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# another instance is running
sys.exit(0)
def run_cmd(cmd):
logging.debug('running: {}\n'.format(cmd))
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
for line in iter(process.stdout.readline, ""):
logging.debug(line)
def parse_args():
parser = ArgumentParser()
parser.add_argument('-r', '--refresh', action='store_true',
help="Refresh the list of repos", default=False)
parser.add_argument('-d', '--datadir', help="repo directory")
return parser.parse_args()
def refresh_cache():
path = dirname(abspath(__file__))
try:
with open(join(path, 'orgs.yml')) as f:
orgs = yaml.load(f)
except IOError:
print "Unable to read {}/orgs.yml, does it exist?".format(path)
sys.exit(1)
repos = []
for org in orgs:
page = 1
while True:
r = requests.get('https://api.github.com/users/{}/repos?page={}'.format(org, page))
org_data = r.json()
# request pages until we get zero results
if not isinstance(org_data, list) or len(org_data) == 0:
break
for repo_data in org_data:
if 'html_url' in repo_data:
repos.append({'html_url': repo_data['html_url'],
'name': repo_data['name'],
'org': repo_data['owner']['login']})
page += 1
with open('/var/tmp/repos.json', 'wb') as f:
f.write(json.dumps(repos))
def update_repos():
with open('/var/tmp/repos.json') as f:
repos = json.load(f)
for repo in repos:
repo_path = os.path.join(args.datadir, repo['org'], repo['name'] + '.git')
if not os.path.exists(repo_path):
run_cmd('mkdir -p {}'.format(repo_path))
run_cmd('git clone --mirror {} {}'.format(repo['html_url'], repo_path))
run_cmd('cd {} && git update-server-info'.format(repo_path))
else:
run_cmd('cd {} && git fetch --all --tags'.format(repo_path))
run_cmd('cd {} && git update-server-info'.format(repo_path))
if __name__ == '__main__':
args = parse_args()
logging.basicConfig(filename='/var/log/repos-from-orgs.log',
level=logging.DEBUG)
if args.refresh:
check_running('refresh')
refresh_cache()
else:
check_running()
if not args.datadir:
print "Please specificy a repository directory"
sys.exit(1)
if not os.path.exists('/var/tmp/repos.json'):
refresh_cache()
update_repos()
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role gh_mirror
#
# Overview:
# Creates a github read-only mirror server
# Will sync all public repos in gh_mirror_orgs
#
# Dependencies:
# - common
# - nginx
#
# Example play:
# roles:
# - common
# - role: nginx
# nginx_sites:
# - gh_mirror
# - gh_mirror
---
- name: gh_mirror | install pip packages
pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs
- name: gh_mirror | install debian packages
apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present
update_cache=yes
- name: gh_mirror | create gh_mirror user
user: >
name={{ gh_mirror_user }}
state=present
- name: gh_mirror | create the gh_mirror data directory
file: >
path={{ gh_mirror_data_dir }}
state=directory
owner={{ gh_mirror_user }}
group={{ gh_mirror_group }}
- name: gh_mirror | create the gh_mirror app directory
file: >
path={{ gh_mirror_app_dir }}
state=directory
- name: gh_mirror | create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts
copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }}
with_items: "{{ gh_mirror_app_files }}"
- name: creating cron job to update repos
cron:
name: "update repos from github"
job: "/usr/bin/python {{ gh_mirror_app_dir }}/repos_from_orgs.py -d {{ gh_mirror_data_dir }}"
- name: creating cron to update github repo list
cron:
name: "refresh repo list from github"
job: "/usr/bin/python {{ gh_mirror_app_dir}}/repos_from_orgs.py -r"
minute: 0
# {{ ansible_managed }}
{{ gh_mirror_orgs | to_nice_yaml }}
---
# gh_users
#
# Creates OS accounts for users based on their github credential.
# Takes a list gh_users as a parameter which is a list of users
#
# roles:
# - role: gh_users
# gh_users:
# - user: github_admin_username
# groups:
# - adm
# - user: another_github_username
# groups: !!null
- fail: gh_users list must be defined for this parameterized role
when: not gh_users
- name: gh_users | create local user for github user
user:
name={{ item.user }}
groups={{ ",".join(item.groups) }}
shell=/bin/bash
with_items: gh_users
- name: gh_users | create .ssh directory
file:
path=/home/{{ item.user }}/.ssh state=directory mode=0700
owner={{ item.user }} group={{ item.user }}
with_items: gh_users
- name: gh_users | copy github key[s] to .ssh/authorized_keys
get_url:
url=https://github.com/{{ item.user }}.keys
dest=/home/{{ item.user }}/.ssh/authorized_keys mode=0600
owner={{ item.user }} group={{ item.user }}
with_items: gh_users
---
jenkins_home: /var/lib/jenkins
jenkins_phantomjs_url: https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2
jenkins_phantomjs_archive: phantomjs-1.9.1-linux-x86_64.tar.bz2
jenkins_phantomjs_folder: phantomjs-1.9.1-linux-x86_64
jenkins_user: "jenkins"
jenkins_user_home: /home/jenkins
jenkins_ruby_version: "1.9.3-p448"
jenkins_git_identity_path: "{{secure_dir}}/files/git-identity"
jenkins_debian_pkgs:
- ruby-bundler
- rubygems
- rbenv
- npm
- libgraphviz-dev
- gfortran
- libopenblas-dev
- liblapack-dev
- libxml2-dev
- libgeos-dev
- python-dev
- libmysqlclient-dev
- build-essential
- pkg-config
- libxslt1-dev
- rake
jenkins_plugins:
- ant.hpi
- backup.hpi
- build-name-setter.hpi
- build-timeout.hpi
- cobertura.hpi
- credentials.hpi
- cvs.hpi
- dashboard-view.hpi
- external-monitor-job.hpi
- ghprb.hpi
- git.hpi
- github.hpi
- git-client.hpi
- github-api.hpi
- github-oauth.hpi
- git-notes.hpi
- htmlpublisher.hpi
- javadoc.hpi
- jobConfigHistory.hpi
- ldap.hpi
- mailer.hpi
- mercurial.hpi
- nested-view.hpi
- next-build-number.hpi
- notification.hpi
- pam-auth.hpi
- parameterized-trigger.hpi
- postbuild-task.hpi
- rbenv.hpi
- ruby-runtime.hpi
- shiningpanda.hpi
- ssh-credentials.hpi
- ssh-slaves.hpi
- subversion.hpi
- thinBackup.hpi
- tmpcleaner.hpi
- token-macro.hpi
- translation.hpi
- violations.hpi
- multiple-scms.hpi
- timestamper.hpi
---
- name: start xvfb
service: name=xvfb state=started
- name: restart Jenkins
service: name=jenkins state=restarted
---
- name: browsers | Install Firefox
apt: pkg=firefox
- name: browsers | Download PhantomJS
get_url: url={{ jenkins_phantomjs_url }}
dest=/var/tmp/{{ jenkins_phantomjs_archive }}
- name: browsers | Untar PhantomJS
command: tar -xjf /var/tmp/{{ jenkins_phantomjs_archive }} -C /var/tmp/
creates=/var/tmp/{{ jenkins_phantomjs_folder }}
- name: browsers | Install PhantomJS
command: mv /var/tmp/{{ jenkins_phantomjs_folder }} /usr/local/bin/phantomjs
creates=/usr/local/bin/phantomjs
- name: browsers | Set PhantomJS permissions
file: path=/usr/local/bin/phantomjs mode=0755 state=directory
- name: browsers | Install chrome dependencies
apt: pkg=libgconf2-4,libxss1,libnss3-1d,libcurl3,xdg-utils
- name: browsers | Install unzip O_o
apt: pkg=unzip
- name: browsers | Install Google Chrome
get_url: url=https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb dest=/var/tmp/google-chrome-stable_current_amd64.deb
- name: browsers | Install Google Chrome 2
command: dpkg -i google-chrome-stable_current_amd64.deb
chdir=/var/tmp
ignore_errors: True
- name: browsers | Install ChromeDriver
get_url: url=https://chromedriver.googlecode.com/files/chromedriver_linux64_2.1.zip dest=/var/tmp/chromedriver_linux64_2.1.zip
- name: browsers | Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_linux64_2.1.zip
chdir=/var/tmp
- name: browsers | Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755
---
- name: Make backup directory
sudo_user: jenkins
shell: mkdir {{ jenkins_home }}/backup
- name: Git clone
sudo_user: jenkins
action: git repo=https://github.com/lapentab/jenkinsconfig.git dest={{jenkins_home}}/backup
---
- name: jenkins | Add the jenkins user to the edx group
user: name=jenkins append=yes groups="edx"
- name: jenkins | install jenkins specific system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}} state=present
- name: jenkins | Install gcli
sudo_user: jenkins
shell: /home/jenkins/.rbenv/bin/rbenv exec gem install github_cli
# This is done so that it will not report back to github while testing
- name: jenkins | TEMPORARILY DISABLE gcli
shell: echo "#!/bin/bash\necho gcli" > /usr/local/bin/gcli
- name: jenkins | Install Jenkins
shell: wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add -
- name: jenkins | Install Jenkins 2
shell: sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list'
- name: jenkins | Install Jenkins 3
shell: apt-get update
- name: jenkins | install jenkins 4
apt: pkg=jenkins state=present
- name: jenkins | Make plugins directory
sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins
- name: jenkins | Make virtualenv directory
shell: mkdir -p /mnt/virtualenvs
- name: jenkins | Chown virtualenv directory
file: path=/mnt/virtualenvs/ state=directory owner=jenkins recurse=yes
- name: jenkins | Make pip-cache directory
shell: mkdir -p /mnt/pip-cache
- name: jenkins | Chown pip-cache directory
file: path=/mnt/pip-cache/ state=directory owner=jenkins recurse=yes
- name: jenkins | Allow /usr/local/bin to be executable
shell: chmod -R go+x /usr/local/bin
- name: jenkins | Install Jenkins plugins
sudo_user: jenkins
get_url: url=http://updates.jenkins-ci.org/latest/${item}
dest={{ jenkins_home }}/plugins/${item}
with_items: "{{ jenkins_plugins }}"
#- timestamper.hpi
#- maven-plugin.hpi
notify:
- restart Jenkins
- name: jenkins | install ssh key for private git repos
copy: >
src="{{jenkins_git_identity_path}}"
dest="{{jenkins_user_home}}/.ssh/id_rsa"
force=yes
owner="{{jenkins_user}}"
group="{{jenkins_user}}"
mode=600
when: c_install_ssh_key is defined
---
- include: jenkins.yml
- include: xvfb.yml
- include: browsers.yml
- include: python-pkgs.yml
---
- name: python packages | Symbolic link site packages
file: src=/usr/local/lib/python2.7/site-packages dest=/usr/local/lib/python2.7/dist-packages state=link
ignore_errors: yes
- name: python packages | install numpy
shell: pip install numpy==1.6.2
- name: python packages | install scipy
shell: pip install scipy==0.11.0
# Set the display to the virtual frame buffer (Xvfb)
export DISPLAY=:1
jenkins_home: "{{ storage_base_dir }}/jenkins"
jenkins_user: "jenkins"
jenkins_group: "edx"
jenkins_server_name: "jenkins.testeng.edx.org"
jenkins_port: 8080
jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_1.530_all.deb"
jenkins_deb: "jenkins_1.530_all.deb"
jenkins_plugins:
- { name: "build-name-setter", version: "1.3" }
- { name: "build-pipeline-plugin", version: "1.4" }
- { name: "build-timeout", version: "1.11" }
- { name: "cobertura", version: "1.9.2" }
- { name: "copyartifact", version: "1.28" }
- { name: "credentials", version: "1.8.3" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "ec2", version: "1.19" }
- { name: "github", version: "1.8" }
- { name: "github-api", version: "1.44" }
- { name: "github-oauth", version: "0.14" }
- { name: "htmlpublisher", version: "1.2" }
- { name: "javadoc", version: "1.1" }
- { name: "jobConfigHistory", version: "2.4" }
- { name: "jquery", version: "1.7.2-1" }
- { name: "nested-view", version: "1.10" }
- { name: "next-build-number", version: "1.0" }
- { name: "notification", version: "1.5" }
- { name: "pam-auth", version: "1.0" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "postbuild-task", version: "1.8" }
- { name: "s3", version: "0.5" }
- { name: "ssh-agent", version: "1.3" }
- { name: "ssh-credentials", version: "1.5.1" }
- { name: "ssh-slaves", version: "1.4" }
- { name: "shiningpanda", version: "0.20" }
- { name: "tmpcleaner", version: "1.1" }
- { name: "token-macro", version: "1.8.1" }
- { name: "translation", version: "1.10" }
- { name: "violations", version: "0.7.11" }
- { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" }
jenkins_bundled_plugins:
- "credentials"
- "git"
- "ssh-credentials"
- "ssh-slaves"
jenkins_custom_plugins:
- { repo_name: "git-client-plugin",
repo_url: "https://github.com/edx/git-client-plugin.git",
package: "git-client.hpi",
version: "2f7fc4648fe7239918a7babd0515930d40d0a761" }
- { repo_name: "git-plugin",
repo_url: "https://github.com/edx/git-plugin.git",
package: "git.hpi",
version: "4c2fb3517ca11b04dfc06c714530f885698fcfb7" }
jenkins_debian_pkgs:
- openjdk-7-jdk
- nginx
- git
- maven
- daemon
- python-pycurl
---
- name: jenkins_master | restart Jenkins
service: name=jenkins state=restarted
- name: jenkins_master | start nginx
service: name=nginx state=started
- name: jenkins_master | reload nginx
service: name=nginx state=reloaded
---
- name: jenkins_master | install jenkins specific system packages
apt:
pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
tags:
- jenkins
- name: jenkins_master | Create jenkins group
group: name={{ jenkins_group }} state=present
- name: jenkins_master | Add the jenkins user to the group
user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }}
- name: jenkins_master | Download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: jenkins_master | Install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: jenkins_master | Stop Jenkins
service: name=jenkins state=stopped
# Move /var/lib/jenkins to Jenkins home (on the EBS)
- name: jenkins_master | Move /var/lib/jenkins
command: mv /var/lib/jenkins {{ jenkins_home }}
creates={{ jenkins_home }}
- name: jenkins_master | Set owner for Jenkins home
file: path={{ jenkins_home }} recurse=yes state=directory
owner={{ jenkins_user }} group={{ jenkins_group }}
# Symlink /var/lib/jenkins to {{ storage_base_dir }}/jenkins
# since Jenkins will expect its files to be in /var/lib/jenkins
- name: jenkins_master | Symlink /var/lib/jenkins
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
owner={{ jenkins_user }} group={{ jenkins_group }}
notify:
- jenkins_master | restart Jenkins
- name: jenkins_master | Make plugins directory
sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins
- name: jenkins_master | Download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/${item.name}/${item.version}/${item.name}.hpi
dest=/tmp/${item.name}_${item.version}
with_items: "{{ jenkins_plugins }}"
- name: jenkins_master | Install Jenkins plugins
command: cp /tmp/${item.name}_${item.version} {{ jenkins_home }}/plugins/${item.name}.hpi
with_items: "{{ jenkins_plugins }}"
- name: jenkins_master | Set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/${item.name}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_plugins }}"
notify:
- jenkins_master | restart Jenkins
# We had to fork some plugins to workaround
# certain issues. If these changes get merged
# upstream, we may be able to use the regular plugin install process.
# Until then, we compile and install the forks ourselves.
- name: jenkins_master | Checkout custom plugin repo
git: repo=${item.repo_url} dest=/tmp/${item.repo_name} version=${item.version}
with_items: "{{ jenkins_custom_plugins }}"
- name: jenkins_master | Compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/${item.repo_name}
with_items: "{{ jenkins_custom_plugins }}"
- name: jenkins_master | Install custom plugins
command: mv /tmp/${item.repo_name}/target/${item.package}
{{ jenkins_home }}/plugins/${item.package}
with_items: "{{ jenkins_custom_plugins }}"
notify:
- jenkins_master | restart Jenkins
- name: jenkins_master | Set custom plugin permissions
file: path={{ jenkins_home }}/plugins/${item.package}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_custom_plugins }}"
# Plugins that are bundled with Jenkins are "pinned".
# Jenkins will overwrite updated plugins with its built-in version
# unless we create a ".pinned" file for the plugin.
# See https://issues.jenkins-ci.org/browse/JENKINS-13129
- name: jenkins_master | Create plugin pin files
command: touch {{ jenkins_home }}/plugins/${item}.jpi.pinned
creates={{ jenkins_home }}/plugins/${item}.jpi.pinned
with_items: "{{ jenkins_bundled_plugins }}"
- name: jenkins_master | Setup nginix vhost
template:
src=etc/nginx/sites-available/jenkins.j2
dest=/etc/nginx/sites-available/jenkins
- name: jenkins_master | enable jenkins vhost
file:
src=/etc/nginx/sites-available/jenkins
dest=/etc/nginx/sites-enabled/jenkins
state=link
notify: jenkins_master | start nginx
server {
listen 80;
server_name {{ jenkins_server_name }};
location / {
proxy_pass http://localhost:{{ jenkins_port }};
# Rewrite HTTPS requests from WAN to HTTP requests on LAN
proxy_redirect http:// https://;
# The following settings from https://wiki.jenkins-ci.org/display/JENKINS/Running+Hudson+behind+Nginx
sendfile off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_connect_timeout 150;
proxy_send_timeout 100;
proxy_read_timeout 100;
proxy_buffers 4 32k;
client_max_body_size 8m;
client_body_buffer_size 128k;
}
}
\ No newline at end of file
---
jenkins_workspace: "{{ storage_base_dir }}/jenkins"
jenkins_phantomjs_url: https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2
jenkins_phantomjs_archive: phantomjs-1.9.1-linux-x86_64.tar.bz2
jenkins_phantomjs_folder: phantomjs-1.9.1-linux-x86_64
jenkins_user: "jenkins"
jenkins_group: "jenkins"
jenkins_user_home: /home/jenkins
# System packages
jenkins_debian_pkgs:
- ack-grep
- build-essential
- git
- gfortran
- graphviz
- lynx-cur
- libgraphviz-dev
- libopenblas-dev
- liblapack-dev
- libxml2-dev
- libgeos-dev
- libmysqlclient-dev
- libxslt1-dev
- mongodb
- npm
- pkg-config
- python2.7
- python-pip
- python2.7-dev
- unzip
- xml-twig-tools
# Chrome and ChromeDriver
chrome_apt_key: "https://dl-ssl.google.com/linux/linux_signing_key.pub"
chrome_apt_repo: "http://dl.google.com/linux/chrome/deb/"
chromedriver_version: 2.3
chromedriver_url: "http://chromedriver.storage.googleapis.com/{{ chromedriver_version }}/chromedriver_linux64.zip"
# Ruby Specific Vars
rbenv_root: "{{ jenkins_user_home }}/.rbenv"
rbenv_repo: "https://github.com/sstephenson/rbenv.git"
ruby_build_repo: "https://github.com/sstephenson/ruby-build.git"
ruby_version: "1.9.3-p374"
# JSCover direct download URL
jscover_url: "http://superb-dca2.dl.sourceforge.net/project/jscover/JSCover-1.0.2.zip"
jscover_version: "1.0.2"
# Mongo config
mongo_dir: "{{ storage_base_dir }}/mongodb"
mongo_log_dir: "{{ storage_base_dir }}/logs/mongodb"
# URL of S3 bucket containing pre-compiled Python packages
python_pkg_url: "https://s3.amazonaws.com/jenkins.python_pkgs"
python_download_dir: "{{ storage_base_dir }}/python_pkgs"
python_virtualenv: "{{ storage_base_dir}}/venv"
#!/usr/bin/env bash
set -e
#####################################################
#
# download_python_pkgs.sh
#
# Use download .egg packages from an S3 bucket
#
# Usage:
#
# download_python_pkgs.sh S3_URL SAVE_DIR
#
# where `S3_URL` is the URL of an S3 bucket
# containing .egg files
#
# and `SAVE_DIR` is the directory in which to save
# the .egg files.
#
######################################################
if [ $# -ne 2 ]; then
echo "Usage: $0 S3_URL SAVE_DIR"
exit 1
fi
S3_URL=$1
SAVE_DIR=$2
# Create the save directory if it doesn't already exist
mkdir -p $SAVE_DIR
# Retrieve the list of files in the bucket
echo "Downloading Python packages from S3..."
curl $S3_URL | xml_grep 'Key' --text_only > $SAVE_DIR/python_pkgs.txt
# Install each package into the virtualenv
# If an error occurs, print stderr but do not abort
echo "Installing Python packages..."
while read package; do
curl $S3_URL/$package > $SAVE_DIR/$package || echo "Could not download $package"
done < $SAVE_DIR/python_pkgs.txt
#!/usr/bin/env bash
set -e
#####################################################
#
# install_python_pkgs.sh
#
# Use easy_install to install all
# .egg files in a folder into a virtualenv.
#
# Usage:
#
# install_python_pkgs.sh EGG_DIR VENV
#
# where `EGG_DIR` is the directory containing
# the .egg files
#
# and `VENV` is the virtualenv in which to install
# the packages. If the virtualenv does not yet
# exist, it will be created.
#
# If the virtualenv has already been created
# and the packages installed, then the script
# will skip installation.
#
######################################################
if [ $# -ne 2 ]; then
echo "Usage: $0 EGG_DIR VENV"
exit 1
fi
EGG_DIR=$1
VENV=$2
if [ -e $VENV/install_finished ]; then
echo "$VENV already exists; skipping installation..."
else
# Create python egg cache and set correct permissions
PYTHON_EGG_CACHE=$HOME/.python-eggs
mkdir -p $PYTHON_EGG_CACHE
chmod 700 -R $PYTHON_EGG_CACHE
# Create and activate the virtualenv
echo "No virtualenv found; creating it..."
mkdir -p $VENV
virtualenv $VENV
. $VENV/bin/activate
# Install the .egg files into the virtualenv
echo "Installing Python eggs..."
for egg_file in $EGG_DIR/*.egg; do
easy_install $egg_file || true
done
# Create indicator that we finished successfully.
# If we were interrupted (maybe the job was aborted),
# then this file won't be created, so the next
# job will retry the intallation (instead of skipping it).
touch $VENV/install_finished
fi
---
- name: jenkins_worker | Install Firefox
apt: pkg=firefox
- name: jenkins_worker | Install dbus-x11 (FF requirement)
apt: pkg=dbus-x11
- name: jenkins_worker | Download PhantomJS
get_url: url={{ jenkins_phantomjs_url }}
dest=/var/tmp/{{ jenkins_phantomjs_archive }}
- name: jenkins_worker | Untar PhantomJS
command: tar -xjf /var/tmp/{{ jenkins_phantomjs_archive }} -C /var/tmp/
creates=/var/tmp/{{ jenkins_phantomjs_folder }}
- name: jenkins_worker | Install PhantomJS
command: mv /var/tmp/{{ jenkins_phantomjs_folder }} /usr/local/bin/phantomjs
creates=/usr/local/bin/phantomjs
- name: jenkins_worker | Set PhantomJS permissions
file: path=/usr/local/bin/phantomjs mode=0755 state=directory
- name: jenkins_worker | Install Chrome dependencies
apt: pkg=libgconf2-4,libxss1,libnss3-1d,libcurl3,xdg-utils
- name: jenkins_worker | Google Chrome apt key
apt_key: url={{ chrome_apt_key }} state=present
- name: jenkins_worker | Google Chrome apt repo
apt_repository: repo='deb {{ chrome_apt_repo }} stable main'
- name: jenkins_worker | Install Google Chrome
apt: pkg=google-chrome-stable state=present update_cache=yes
- name: jenkins_worker | Install ChromeDriver
get_url: url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: jenkins_worker | Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp
- name: jenkins_worker | Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: jenkins_worker | Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755
---
- name: jenkins_worker | Install Java
apt: pkg=openjdk-7-jre-headless state=present
- name: jenkins_worker | Download JSCover
get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip
- name: jenkins_worker | Unzip JSCover
shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover
creates=/var/tmp/jscover
- name: jenkins_worker | Install JSCover JAR
command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar
creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar
- name: jenkins_worker | Set JSCover permissions
file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file
owner=root group=root mode=0755
---
# jenkins
#
# Provision a Jenkins instance.
#
# Parameters:
# `jenkins_user`: jenkins
# `jenkins_home`: /var/lib/jenkins
# `jenkins_user_home`: /home/jenkins
- include: system.yml
- include: python.yml
- include: ruby.yml
- include: mongo.yml
- include: xvfb.yml
- include: browsers.yml
- include: jscover.yml
---
# Configure Mongo to use {{ storage_base_dir }} so we don't
# run out of disk space
- name: jenkins_worker | Stop mongo service
service: name=mongodb state=stopped
- name: jenkins_worker | Configure Mongo
template: src=mongodb_conf.j2
dest=/etc/mongodb.conf
owner=root
group=root
- name: jenkins_worker | Configure Mongo upstart script
template: src=mongodb_upstart.j2
dest=/etc/init/mongodb.conf
owner=root
group=root
- name: jenkins_worker | Start the mongo service
service: name=mongodb state=stopped
---
- name: jenkins_worker | Install virtualenv
pip: name=virtualenv state=present
- name: jenkins_worker | Install virtualenv wrapper
pip: name=virtualenvwrapper state=present
- name: jenkins_worker | Install requests
pip: name=requests state=present
# Install bash scripts
- name: jenkins_worker | Install Python packages scripts
copy: src="${item}" dest="/usr/local/bin/${item}"
force=yes
owner=root group=root
mode=755
with_items:
- download_python_pkgs.sh
- install_python_pkgs.sh
# Install scripts requiring a GitHub OAuth token
- fail: jenkins_worker | OAuth token not defined
when: github_oauth_token is not defined
- name: jenkins_worker | Install Python GitHub PR auth script
template: src="github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py"
owner=root group=root
mode=755
- name: jenkins_worker | Install Python GitHub post status script
template: src="github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py"
owner=root group=root
mode=755
# Install upstart script to download Python packages from S3
- name: jenkins_worker | Install Python packages upstart script
template: src="python_pkgs.conf.j2" dest="/etc/init/python_pkgs.conf"
---
- name: jenkins_worker | Install rbenv
git: repo={{ rbenv_repo }} dest={{ rbenv_root }}
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Install ruby-build
git: repo={{ ruby_build_repo }} dest={{ rbenv_root }}/plugins/ruby-build
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Install ruby
command: "{{ rbenv_root }}/bin/rbenv install {{ ruby_version }}
creates={{ rbenv_root }}/versions/{{ ruby_version }}"
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Set global ruby
command: "{{ rbenv_root }}/bin/rbenv global {{ ruby_version }}"
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Install bundler
command: "{{ rbenv_root }}/shims/gem install bundler"
sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Rbenv rehash
command: "{{ rbenv_root }}/bin/rbenv rehash"
sudo_user: "{{ jenkins_user }}"
---
- name: jenkins_worker | Create jenkins group
group: name={{ jenkins_group }} state=present
- name: jenkins_worker | Add the jenkins user to the group
user: name={{ jenkins_user }} append=yes group={{ jenkins_group }}
# We need the upstart script to create the build directory
# so that (a) it will be run when a new instance is created
# on the current EBS, and (b) it will be run as root.
- name: jenkins_worker | Install upstart script to create build dir
template: src=jenkins_workspace.conf.j2
dest=/etc/init/jenkins_workspace.conf
owner=root group=root
# Because of a bug in the latest release of the EC2 plugin
# we need to use a key generated by Amazon (not imported)
# To satisfy this, we allow users to log in as Jenkins
# using the same keypair the instance was started with.
- name: jenkins_worker | Create .ssh directory
file: path={{ jenkins_user_home }}/.ssh state=directory
owner={{ jenkins_user }} group={{ jenkins_group }}
- name: jenkins_worker | Copy ssh keys for jenkins
command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys
- name: jenkins_worker | Set key permissions
file: path={{ jenkins_user_home }}/.ssh/authorized_keys
owner={{ jenkins_user }} group={{ jenkins_group }}
mode=400
# Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors
- name: jenkins_worker | Add git apt repository
apt_repository: repo='ppa:git-core/ppa'
- name: jenkins_worker | Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
- name: jenkins_worker | Add script to set up environment variables
template: src=jenkins_env.sh.j2 dest=/usr/local/bin/jenkins_env.sh
owner=root group=root mode=0555
--- ---
- name: xvfb | install xvfb - name: jenkins_worker | install xvfb
apt: pkg=xvfb state=present apt: pkg=xvfb state=present
- name: xvfb | configure display - name: jenkins_worker | create xvfb upstart script
template: src=xvfb.sh.j2 dest=/etc/profile.d/xvfb.sh owner=root group=root mode=0755
- name: xvfb | create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: xvfb | start xvfb - name: jenkins_worker | start xvfb
shell: start xvfb shell: start xvfb
ignore_errors: yes ignore_errors: yes
#!/usr/bin/env python
"""
Update the status of a GitHub commit.
"""
import sys
import requests
import json
from textwrap import dedent
# The Ansible script will fill in the GitHub OAuth token.
# That way, we can give the jenkins user on the worker
# execute-only access to this script, ensuring that
# the jenkins user cannot retrieve the token.
GITHUB_OAUTH_TOKEN = "{{ github_oauth_token }}"
USAGE = "Usage: {0} ORG REPO SHA STATUS TARGET_URL DESCRIPTION"
VALID_STATUS_LIST = ['pending', 'success', 'error', 'failure']
def parse_args(arg_list):
"""
Parse the list of arguments, returning a dict.
Prints an error message and exits if the arguments are invalid.
"""
if len(arg_list) != 7:
print USAGE.format(arg_list[0])
exit(1)
# Check that the build status is valid
status = arg_list[4]
if not status in VALID_STATUS_LIST:
print "Invalid status: must be one of {0}".format(", ".join(VALID_STATUS_LIST))
exit(1)
return {
'org': arg_list[1],
'repo': arg_list[2],
'sha': arg_list[3],
'status': arg_list[4],
'target_url': arg_list[5],
'description': arg_list[6]
}
def post_status(org, repo, sha, status, target_url, description):
"""
Post a new status to GitHub.
See http://developer.github.com/v3/repos/statuses/ for details.
Prints an error message and exits if unsuccessful.
"""
url = "https://api.github.com/repos/{0}/{1}/statuses/{2}?access_token={3}".format(
org, repo, sha, GITHUB_OAUTH_TOKEN
)
params = {
'state': status,
'target_url': target_url,
'description': description
}
response = requests.post(url, data=json.dumps(params))
if response.status_code != 201:
print dedent("""
Could not post status:
HTTP response code is {0}
Content: {1}
""").format(response.status_code, response.text).strip()
exit(1)
def main():
"""
Post the status to GitHub.
"""
if not GITHUB_OAUTH_TOKEN:
print "No GitHub Oauth token configured."
exit(1)
arg_dict = parse_args(sys.argv)
post_status(
arg_dict['org'], arg_dict['repo'],
arg_dict['sha'], arg_dict['status'],
arg_dict['target_url'], arg_dict['description']
)
if __name__ == "__main__":
main()
#!/usr/bin/env python
"""
Determine whether we allow a GitHub PR to be
built automatically. Checks a whitelist
of repo owners and compares to the HEAD
repo of the pull request.
Uses an environment variable `GITHUB_OWNER_WHITELIST`
to check whether the owner of the PR repo is whitelisted.
This is a comma-separated list of organizations and
users. For example, a bash script might define:
export GITHUB_OWNER_WHITELIST="edx,a_user,another_user"
to allow PRs from repos owned by "edx", "a_usr", and "another_user"
"""
import sys
import os
import requests
from textwrap import dedent
# The Ansible script will fill in the GitHub OAuth token.
# That way, we can give the jenkins user on the worker
# execute-only access to this script, ensuring that
# the jenkins user cannot retrieve the token.
GITHUB_OAUTH_TOKEN = "{{ github_oauth_token }}"
USAGE = "Usage: {0} ORG REPO PULL_REQUEST_NUM"
def parse_args(arg_list):
"""
Parse the list of arguments, returning a dict of the form
{
'org': GITHUB_ORG,
'repo': GITHUB_REPO,
'pr_num': GITHUB_PR_NUM
}
Prints an error message and exits if the arguments are invalid.
"""
if len(arg_list) != 4:
print USAGE.format(arg_list[0])
exit(1)
# Retrieve the PR number and check that it's an integer
try:
pr_num = int(arg_list[3])
except TypeError:
print "'{0}' is not a number".format(arg_list[3])
return {
'org': arg_list[1],
'repo': arg_list[2],
'pr_num': pr_num
}
def pr_repo_owner(org, repo, pr_num):
"""
Return the name of the owner of the repo from the
HEAD of the PR.
"""
# Query GitHub for information about the pull request
url = "https://api.github.com/repos/{0}/{1}/pulls/{2}?access_token={3}".format(
org, repo, pr_num, GITHUB_OAUTH_TOKEN
)
response = requests.get(url)
if response.status_code != 200:
print dedent("""
Could not retrieve info for pull request #{0}.
HTTP status code: {1}
""".format(pr_num, response.status_code)).strip()
exit(1)
# Parse the response as json
try:
pr_data = response.json()
except TypeError:
print "Could not parse info for pull request #{0}".format(pr_num)
exit(1)
# Retrieve the owner of the repo
try:
return pr_data['head']['repo']['owner']['login']
except KeyError:
print "Could not get repo owner from PR info"
exit(1)
def main():
"""
Exits with code 0 (success) if the PR is from a whitelisted
repo; otherwise, exits with status 1 (failure).
"""
if not GITHUB_OAUTH_TOKEN:
print "No GitHub Oauth token configured."
exit(1)
arg_dict = parse_args(sys.argv)
owner = pr_repo_owner(arg_dict['org'], arg_dict['repo'], arg_dict['pr_num'])
# Check that the owner is whitelisted
whitelist_owners = os.environ.get('GITHUB_OWNER_WHITELIST', '').split(',')
if owner not in whitelist_owners:
print dedent("""
Owner '{0}' is not in the whitelist.
You can update the whitelist by setting the environment variable
`GITHUB_OWNER_WHITELIST` to a comma-separated list of organizations
and users.
""".format(owner)).strip()
exit(1)
else:
print "Owner '{0}' is authorized".format(owner)
exit(0)
if __name__ == "__main__":
main()
# Configure Ruby
export RBENV_ROOT="{{ rbenv_root }}"
export PATH=$RBENV_ROOT/bin/:$PATH
eval "$(rbenv init -)"
# Configure JavaScript coverage
export JSCOVER_JAR=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar
# Set the display to the virtual frame buffer (Xvfb)
export DISPLAY=:1
# Create directory for Jenkins jobs
start on runlevel [2345]
task
script
mkdir -p {{ jenkins_workspace }}
chown {{ jenkins_user }}:{{ jenkins_group }} {{ jenkins_workspace }}
chmod 700 {{ jenkins_workspace }}
end script
# mongodb.conf
dbpath={{ mongo_dir }}
logpath={{ mongo_log_dir }}/mongodb.log
logappend=true
bind_ip = 127.0.0.1
port = 27017
journal=true
# Ubuntu upstart file at /etc/init/mongodb.conf
pre-start script
mkdir -p {{ mongo_dir }}
mkdir -p {{ mongo_log_dir }}
touch {{ mongo_log_dir }}/mongodb.log
chown mongodb:nogroup -R {{ mongo_dir }}
chown mongodb:nogroup -R {{ mongo_log_dir }}
end script
start on runlevel [2345]
stop on runlevel [06]
script
ENABLE_MONGODB="yes"
if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
end script
# Pre-install Python packages from S3
start on runlevel [2345]
task
script
# Create the directory to hold Python virtualenvs
mkdir -p {{ python_virtualenv }}
# Download .egg files from S3
download_python_pkgs.sh {{ python_pkg_url }} {{ python_download_dir }}
# Give the Jenkins user access
chown {{ jenkins_user }}:{{ jenkins_group }} -R {{ python_download_dir }}
chmod 500 -R {{ python_download_dir }}
chown {{ jenkins_user }}:{{ jenkins_group }} -R {{ python_virtualenv }}
chmod 700 -R {{ python_virtualenv }}
end script
# Launches an ec2 instance and blocks until the instance is up
# adds it to the host group
# Will terminate an instance if one and only one already exists
# with the same name
- name: lookup tags for terminating existing instance
local_action:
module: ec2_lookup
region: "{{ region }}"
tags:
Name: "{{ name_tag }}"
register: tag_lookup
when: terminate_instance == true
- debug: msg="Too many results returned, not terminating!"
when: terminate_instance == true and tag_lookup.instance_ids|length > 1
- name: terminating single instance
local_action:
module: ec2
state: 'absent'
region: "{{ region }}"
instance_ids: ${tag_lookup.instance_ids}
when: terminate_instance == true and tag_lookup.instance_ids|length == 1
- name: launch_ec2 | Launch ec2 instance
local_action:
module: ec2
keypair: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ ami }}"
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
root_ebs_size: "{{ root_ebs_size }}"
register: ec2
- name: launch_ec2 | Add DNS name
local_action:
module: route53
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name studio
local_action:
module: route53
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "studio.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name preview
local_action:
module: route53
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "preview.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add new instance to host group
local_action: >
add_host
hostname={{ item.public_ip }}
groupname=launched
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Wait for SSH to come up
local_action: >
wait_for
host={{ item.public_dns_name }}
state=started
port=22
delay=60
timeout=320
with_items: "{{ ec2.instances }}"
---
instance_tags: '{"from_ansible": "true"}'
# Launches an ec2 instance and blocks until the instance is up
# adds it to the host group
- name: launch_instance | Launch instance
local_action:
module: ec2
keypair: "{{keypair}}"
group: "{{security_group}}"
instance_type: "{{instance_type}}"
image: "{{image}}"
wait: true
region: "{{region}}"
instance_tags: "{{instance_tags}}"
register: ec2
- name: launch_instance | Add new instance to host group
local_action: add_host hostname=${item.public_ip} groupname=launched
with_items: ${ec2.instances}
- name: launch_instance | Wait for SSH to come up
local_action: wait_for host=${item.public_dns_name} port=22 delay=60 timeout=320 state=started
with_items: ${ec2.instances}
mongo_dbpath: /var/lib/mongodb
mongo_logpath: /var/log/mongodb/mongodb.log
mongo_logappend: true
mongo_version: 2.4.7
mongo_bind_ip: 127.0.0.1
mongo_extra_conf: ''
MONGO_USERS:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
---
- name: restart mongo
service: name=mongodb state=restarted
--- ---
- name: mongo | install python-pymongo (req for ansible) - name: mongo | install python pymongo for mongo_user ansible module
pip: name=pymongo pip: >
name=pymongo
state=present
version=2.6.3
extra_args="-i {{ PYPI_MIRROR_URL }}"
tags: mongo
- name: mongo | add the mongodb signing key
apt_key: >
id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc
state=present
- name: mongo | add the mongodb repo to the sources list
apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
state=present
- name: mongo | install mongo server and recommends - name: mongo | install mongo server and recommends
apt: pkg=mongodb-server state=present install_recommends=yes apt: >
pkg=mongodb-10gen={{ mongo_version }}
state=present
install_recommends=yes
update_cache=yes
- name: mongo | stop mongo service - name: mongo | stop mongo service
service: name=mongodb state=stopped service: name=mongodb state=stopped
tags: mongo
- name: mongo | move mongodb to /mnt - name: mongo | move mongodb to {{ storage_base_dir }}
command: mv /var/lib/mongodb /mnt/. creates=/mnt/mongodb command: mv /var/lib/mongodb {{ storage_base_dir}}/. creates={{ storage_base_dir }}/mongodb
tags: mongo
- name: mongo | create mongodb symlink - name: mongo | create mongodb symlink
file: src=/mnt/mongodb dest=/var/lib/mongodb state=link file: src={{ storage_base_dir }}/mongodb dest=/var/lib/mongodb state=link
tags: mongo
- name: mongo | copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes
notify: restart mongo
tags: mongo
- name: mongo | start mongo service - name: mongo | start mongo service
service: name=mongodb state=started service: name=mongodb state=started
tags: mongo
- name: mongo | wait for mongo server to start - name: mongo | wait for mongo server to start
wait_for: port=27017 delay=2 wait_for: port=27017 delay=2
tags: mongo
- name: mongo | create a mongodb user - name: mongo | create a mongodb user
mongodb_user: > mongodb_user: >
database=cs_comments_service database={{ item.database }}
name=cs_comments_service name={{ item.user }}
password=password password={{ item.password }}
state=present state=present
with_items: MONGO_USERS
tags: mongo
# Do not edit this file directly, it was generated by ansible
# mongodb.conf
# Where to store the data.
dbpath={{ mongo_dbpath }}
#where to log
logpath={{ mongo_logpath }}
logappend={{ mongo_logappend }}
bind_ip = {{ mongo_bind_ip }}
#port = 27017
# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
journal=true
# Enables periodic logging of CPU utilization and I/O wait
#cpu = true
# Turn on/off security. Off is currently the default
#noauth = true
#auth = true
# Verbose logging output.
#verbose = true
# Inspect all client data for validity on receipt (useful for
# developing drivers)
#objcheck = true
# Enable db quota management
#quota = true
# Set oplogging level where n is
# 0=off (default)
# 1=W
# 2=R
# 3=both
# 7=W+some reads
#oplog = 0
# Diagnostic/debugging option
#nocursors = true
# Ignore query hints
#nohints = true
# Disable the HTTP interface (Defaults to localhost:27018).
#nohttpinterface = true
# Turns off server-side scripting. This will result in greatly limited
# functionality
#noscripting = true
# Turns off table scans. Any query that would do a table scan fails.
#notablescan = true
# Disable data file preallocation.
#noprealloc = true
# Specify .ns file size for new databases.
# nssize = <size>
# Accout token for Mongo monitoring server.
#mms-token = <token>
# Server name for Mongo monitoring server.
#mms-name = <server-name>
# Ping interval for Mongo monitoring server.
#mms-interval = <seconds>
# Replication Options
# in replicated mongo databases, specify here whether this is a slave or master
#slave = true
#source = master.example.com
# Slave only: specify a single database to replicate
#only = master.example.com
# or
#master = true
#source = slave.example.com
# Address of a server to pair with.
#pairwith = <server:port>
# Address of arbiter server.
#arbiter = <server:port>
# Automatically resync if slave data is stale
#autoresync
# Custom size for replication operation log.
#oplogSize = <MB>
# Size limit for in-memory storage of op ids.
#opIdMem = <bytes>
{{ mongo_extra_conf }}
\ No newline at end of file
...@@ -4,6 +4,18 @@ pkgs: ...@@ -4,6 +4,18 @@ pkgs:
nginx: nginx:
state: installed state: installed
nginx_xserver_gunicorn_hosts:
- 127.0.0.1
nginx_xqueue_gunicorn_hosts:
- 127.0.0.1
nginx_ora_gunicorn_hosts:
- 127.0.0.1
nginx_lms_gunicorn_hosts:
- 127.0.0.1
nginx_lms_preview_gunicorn_hosts:
- 127.0.0.1
nginx_cms_gunicorn_hosts:
- 127.0.0.1
nginx_cfg: nginx_cfg:
# - link - turn on # - link - turn on
......
--- ---
- name: nginx | restart nginx - name: nginx | restart nginx
service: name=nginx state=restarted service: name=nginx state=restarted
sudo: True
- name: nginx | reload nginx
service: name=nginx state=reloaded
...@@ -11,16 +11,36 @@ ...@@ -11,16 +11,36 @@
- name: nginx | Server configuration file - name: nginx | Server configuration file
copy: src={{secure_dir}}/files/nginx.conf dest=/etc/nginx/nginx.conf owner=root group=root mode=0644 copy: src={{secure_dir}}/files/nginx.conf dest=/etc/nginx/nginx.conf owner=root group=root mode=0644
when: nginx_conf is defined when: nginx_conf is defined
notify: nginx | restart nginx notify: nginx | reload nginx
tags: tags:
- nginx - nginx
- install - install
# Standard configuration that is common across all roles - name: nginx | Creating common nginx configuration
# Default values for these variables are set in group_vars/all template: src=edx-release.j2 dest=/etc/nginx/sites-available/edx-release owner=root group=root mode=0600
# Note: remove spaces in {{..}}, otherwise you will get a template parsing error. notify: nginx | reload nginx
- include: nginx_site.yml state={{nginx_cfg.sites_enabled.edx_release}} site_name=edx-release tags:
- include: nginx_site.yml state={{nginx_cfg.sites_enabled.basic_auth}} site_name=basic-auth - nginx
- name: nginx | Creating link for common nginx configuration
file: src=/etc/nginx/sites-available/edx-release dest=/etc/nginx/sites-enabled/edx-release state=link owner=root group=root
notify: nginx | reload nginx
tags:
- nginx
- name: nginx | Copying nginx configs for {{ nginx_sites }}
template: src={{ item }}.j2 dest=/etc/nginx/sites-available/{{ item }} owner=root group=root mode=0600
notify: nginx | reload nginx
with_items: nginx_sites
tags:
- nginx
- name: nginx | Creating nginx config links for {{ nginx_sites }}
file: src=/etc/nginx/sites-available/{{ item }} dest=/etc/nginx/sites-enabled/{{ item }} state=link owner=root group=root
notify: nginx | reload nginx
with_items: nginx_sites
tags:
- nginx
- name: nginx | Write out default htpasswd file - name: nginx | Write out default htpasswd file
copy: content={{ nginx_cfg.htpasswd }} dest=/etc/nginx/nginx.htpasswd owner=www-data group=www-data mode=0600 copy: content={{ nginx_cfg.htpasswd }} dest=/etc/nginx/nginx.htpasswd owner=www-data group=www-data mode=0600
...@@ -29,7 +49,7 @@ ...@@ -29,7 +49,7 @@
- update - update
- name: nginx | Create nginx log file location (just in case) - name: nginx | Create nginx log file location (just in case)
file: path={{log_base_dir}}/nginx state=directory owner=syslog group=adm mode=2770 file: path={{log_base_dir}}/nginx state=directory owner=syslog group=syslog mode=2770 recurse=yes
tags: tags:
- nginx - nginx
- logging - logging
...@@ -38,7 +58,7 @@ ...@@ -38,7 +58,7 @@
# removing default link # removing default link
- name: nginx | Removing default nginx config and restart (enabled) - name: nginx | Removing default nginx config and restart (enabled)
file: path=/etc/nginx/sites-enabled/default state=absent file: path=/etc/nginx/sites-enabled/default state=absent
notify: nginx | restart nginx notify: nginx | reload nginx
tags: tags:
- nginx - nginx
- update - update
...@@ -59,13 +79,7 @@ ...@@ -59,13 +79,7 @@
- name: nginx | Removing default nginx config (available) - name: nginx | Removing default nginx config (available)
file: path=/etc/nginx/sites-available/default state=absent file: path=/etc/nginx/sites-available/default state=absent
tags: notify: nginx | reload nginx
- nginx
- update
- name: nginx | Register the fact that nginx has run
command: echo True
register: nginx_role_run
tags: tags:
- nginx - nginx
- update - update
......
# Requires nginx package
---
- name: nginx | Copying nginx config {{ site_name }}
template: src={{ item }} dest=/etc/nginx/sites-available/{{ site_name }} owner=root group=root mode=0600
first_available_file:
- "{{ local_dir }}/nginx/templates/{{ template_subdir }}/{{ site_name }}.j2"
- "{{ local_dir }}/nginx/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/nginx/templates/{{ site_name }}.j2"
notify: nginx | restart nginx
when: nginx_role_run is defined
tags:
- nginx
- lms
- cms
- nginx-env
- update
- name: nginx | Creating nginx config link {{ site_name }}
file: src=/etc/nginx/sites-available/{{ site_name }} dest=/etc/nginx/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: nginx | restart nginx
when: nginx_role_run is defined
tags:
- nginx
- lms
- cms
- nginx-env
- update
server {
listen 80;
location / {
auth_basic "Restricted"; auth_basic "Restricted";
auth_basic_user_file /etc/nginx/nginx.htpasswd; auth_basic_user_file /etc/nginx/nginx.htpasswd;
root {{app_base_dir}}/main_static; root {{ app_base_dir }}/main_static;
index index.html index index.html
proxy_set_header X-Forwarded-Proto https; proxy_set_header X-Forwarded-Proto https;
}
}
upstream cms-backend {
# For a TCP configuration:
server 127.0.0.1:8010 fail_timeout=0;
}
upstream cms-backend {
{% for host in nginx_cms_gunicorn_hosts %}
server {{ host }}:{{ edxapp_cms_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
server { server {
# CMS configuration file for nginx, templated by ansible # CMS configuration file for nginx, templated by ansible
listen {{cms_nginx_port}}; listen {{EDXAPP_CMS_NGINX_PORT}};
server_name studio.*; server_name studio.*;
...@@ -26,6 +32,9 @@ server { ...@@ -26,6 +32,9 @@ server {
} }
location / { location / {
{% if EDXAPP_CMS_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_cms_app; try_files $uri @proxy_to_cms_app;
} }
......
server {
listen {{ devpi_nginx_port }};
server_name {{ devpi_server_name }};
gzip on;
gzip_min_length 2000;
gzip_proxied any;
gzip_types text/html application/json;
location / {
root {{ devpi_data_dir }};
proxy_pass http://localhost:{{ devpi_port }};
proxy_set_header X-outside-url $scheme://$host;
proxy_set_header X-Real-IP $remote_addr;
}
}
server { server {
listen {{nginx_listen_port}}; listen {{ DISCERN_NGINX_PORT }};
server_name localhost; server_name localhost;
set $my_host $http_host; set $my_host $http_host;
...@@ -9,7 +9,7 @@ server { ...@@ -9,7 +9,7 @@ server {
# https://docs.djangoproject.com/en/dev/howto/static-files/#serving-static-files-in-production # https://docs.djangoproject.com/en/dev/howto/static-files/#serving-static-files-in-production
location /static/ { # STATIC_URL location /static/ { # STATIC_URL
alias {{discern_dir}}/staticfiles/; alias {{ discern_dir }}/staticfiles/;
expires 1m; expires 1m;
autoindex on; autoindex on;
} }
...@@ -20,6 +20,9 @@ server { ...@@ -20,6 +20,9 @@ server {
} }
location / { location / {
{% if DISCERN_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port; proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for; proxy_set_header X-Forwarded-For $http_x_forwarded_for;
......
server { server {
listen 8099 default_server; listen 8099 default_server;
server_name mitx_release.*;
location = /versions.html { location = /versions.html {
alias {{ nginx_cfg.version_html }}; alias {{ nginx_cfg.version_html }};
} }
......
server {
listen {{ gh_mirror_nginx_port }};
server_name {{ gh_mirror_server_name }};
location ~ (/.*) {
root {{ gh_mirror_data_dir }};
fastcgi_pass unix:/var/run/fcgiwrap.socket;
fastcgi_param SCRIPT_FILENAME /usr/lib/git-core/git-http-backend;
# This won't work if the include is put before
# SCRIPT_FILENAME
include fastcgi_params;
# export all repositories under GIT_PROJECT_ROOT
fastcgi_param GIT_HTTP_EXPORT_ALL "";
fastcgi_param GIT_PROJECT_ROOT {{ gh_mirror_data_dir }};
fastcgi_param PATH_INFO $1;
}
}
upstream lms-backend {
# For a TCP configuration:
server 127.0.0.1:8000 fail_timeout=0;
}
upstream lms-preview-backend {
# For a TCP configuration:
server 127.0.0.1:8020 fail_timeout=0;
}
upstream lms-preview-backend {
{% for host in nginx_lms_preview_gunicorn_hosts %}
server {{ host }}:{{ edxapp_lms_preview_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
server { server {
# LMS-preview configuration file for nginx, templated by ansible # LMS-preview configuration file for nginx, templated by ansible
listen {{lms_preview_nginx_port}}; listen {{EDXAPP_LMS_PREVIEW_NGINX_PORT}};
server_name preview.*; server_name preview.*;
...@@ -23,6 +29,10 @@ server { ...@@ -23,6 +29,10 @@ server {
} }
location / { location / {
{% if EDXAPP_LMS_PREVIEW_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_lms-preview_app; try_files $uri @proxy_to_lms-preview_app;
} }
......
upstream lms-xml-backend {
# For a TCP configuration:
server 127.0.0.1:8030 fail_timeout=0;
}
server {
# LMS-preview configuration file for nginx, templated by ansible
listen {{lms_xml_nginx_port}};
# CS184 requires uploads of up to 4MB for submitting screenshots.
# CMS requires larger value for course assest, values provided
# via hiera.
client_max_body_size 4M;
rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last;
location @proxy_to_lms-preview_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://lms-xml-backend;
}
location @proxy_to_lms-xml_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://lms-xml-backend;
}
location / {
try_files $uri @proxy_to_lms-xml_app;
}
# No basic auth security on the github_service_hook url, so that github can use it for cms
location /github_service_hook {
try_files $uri @proxy_to_lms-xml_app;
}
# No basic auth security on the heartbeat url, so that ELB can use it
location /heartbeat {
try_files $uri @proxy_to_lms-xml_app;
}
# Check security on this
location ~ /static/(?P<file>.*) {
root {{app_base_dir}};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
# Forward to HTTPS if we're an HTTP request...
if ($http_x_forwarded_proto = "http") {
set $do_redirect "true";
}
# Run our actual redirect...
if ($do_redirect = "true") {
rewrite ^ https://$host$request_uri? permanent;
}
# Monitoring support for datadog.
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1/32;
deny all;
}
}
upstream lms-backend {
{% for host in nginx_lms_gunicorn_hosts %}
server {{ host }}:{{ edxapp_lms_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
server { server {
# LMS configuration file for nginx, templated by ansible # LMS configuration file for nginx, templated by ansible
listen {{lms_nginx_port}} default; listen {{EDXAPP_LMS_NGINX_PORT}} default;
access_log {{log_base_dir}}/nginx/access.log; access_log {{log_base_dir}}/nginx/access.log;
error_log {{log_base_dir}}/nginx/error.log error; error_log {{log_base_dir}}/nginx/error.log error;
...@@ -25,6 +31,10 @@ server { ...@@ -25,6 +31,10 @@ server {
} }
location / { location / {
{% if EDXAPP_LMS_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_lms_app; try_files $uri @proxy_to_lms_app;
} }
......
upstream app_server { upstream app_server {
# For a TCP configuration: {% for host in nginx_ora_gunicorn_hosts %}
server 127.0.0.1:{{ ora_gunicorn_port }} fail_timeout=0; server {{ host }}:{{ ora_gunicorn_port }} fail_timeout=0;
{% endfor %}
} }
server { server {
listen {{ ora_nginx_port}} default_server; listen {{ ORA_NGINX_PORT }} default_server;
location / { location / {
{% if ORA_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_app; try_files $uri @proxy_to_app;
} }
......
upstream xqueue_app_server { upstream xqueue_app_server {
# For a TCP configuration: {% for host in nginx_xqueue_gunicorn_hosts %}
server 127.0.0.1:{{ xqueue_gunicorn_port }} fail_timeout=0; server {{ host }}:{{ xqueue_gunicorn_port }} fail_timeout=0;
{% endfor %}
} }
server { server {
listen {{ xqueue_nginx_port }} default_server; listen {{ XQUEUE_NGINX_PORT }} default_server;
location / { location / {
{% if XQUEUE_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_app; try_files $uri @proxy_to_app;
} }
# No basic auth security on the heartbeat url, so that ELB can use it
location /xqueue/status/{
try_files $uri @proxy_to_app;
}
location @proxy_to_app { location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
......
...@@ -9,14 +9,18 @@ ...@@ -9,14 +9,18 @@
# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples. # Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples.
## ##
upstream xserver_app_server { upstream xserver_app_server {
# For a TCP configuration: {% for host in nginx_xserver_gunicorn_hosts %}
server 127.0.0.1:{{ xserver_port }} fail_timeout=0; server {{ host }}:{{ xserver_gunicorn_port }} fail_timeout=0;
{% endfor %}
} }
server { server {
listen {{ xserver_nginx_port }} default_server; listen {{ XSERVER_NGINX_PORT }} default_server;
location / { location / {
{% if XSERVER_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_app; try_files $uri @proxy_to_app;
} }
......
--- ---
notifier_user: "notifier" NOTIFIER_USER: "notifier"
notifier_web_user: "www-user" NOTIFIER_WEB_USER: "www-user"
notifier_home: "/opt/wwc/notifier" NOTIFIER_HOME: "/opt/wwc/notifier"
notifier_venv_dir: "{{ notifier_home }}/virtualenvs/notifier" NOTIFIER_VENV_DIR: "{{ NOTIFIER_HOME }}/virtualenvs/notifier"
notifier_db_dir: "{{ notifier_home }}/db" NOTIFIER_DB_DIR: "{{ NOTIFIER_HOME }}/db"
notifier_source_repo: "git@github.com:edx/notifier.git" NOTIFIER_SOURCE_REPO: "https://github.com/edx/notifier.git"
notifier_code_dir: "{{ notifier_home }}/src" NOTIFIER_CODE_DIR: "{{ NOTIFIER_HOME }}/src"
notifier_version: "master" NOTIFIER_VERSION: "master"
notifier_git_identity_path: "{{ secure_dir }}/files/git-identity" NOTIFIER_GIT_IDENTITY_PATH: "{{ secure_dir }}/files/git-identity"
notifier_requirements_file: "{{ notifier_code_dir }}/requirements.txt" NOTIFIER_REQUIREMENTS_FILE: "{{ NOTIFIER_CODE_DIR }}/requirements.txt"
notifier_log_level: "INFO" NOTIFIER_LOG_LEVEL: "INFO"
notifier_rsyslog_enabled: "yes" NOTIFIER_RSYSLOG_ENABLED: "yes"
notifier_digest_task_interval: "1440" NOTIFIER_DIGEST_TASK_INTERVAL: "1440"
notifier_env: "Development" NOTIFIER_ENV: "Development"
notifier_email_backend: "console" NOTIFIER_EMAIL_BACKEND: "console"
notifier_email_host: "localhost" NOTIFIER_EMAIL_HOST: "localhost"
notifier_email_port: 25 NOTIFIER_EMAIL_PORT: 25
notifier_email_user: "" NOTIFIER_EMAIL_USER: ""
notifier_email_pass: "" NOTIFIER_EMAIL_PASS: ""
notifier_email_use_tls: "False" NOTIFIER_EMAIL_USE_TLS: "False"
notifier_email_domain: "notifications.edx.org" NOTIFIER_EMAIL_DOMAIN: "notifications.edx.org"
notifier_email_rewrite_recipient: "" NOTIFIER_EMAIL_REWRITE_RECIPIENT: ""
notifier_lms_url_base: "http://localhost:8000" NOTIFIER_LMS_URL_BASE: "http://localhost:8000"
notifier_lms_secret_key: "PUT_YOUR_SECRET_KEY_HERE" NOTIFIER_LMS_SECRET_KEY: "PUT_YOUR_SECRET_KEY_HERE"
notifier_comment_service_base: "http://localhost:4567" NOTIFIER_COMMENT_SERVICE_BASE: "http://localhost:4567"
notifier_comment_service_api_key: "PUT_YOUR_API_KEY_HERE" NOTIFIER_COMMENT_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
notifier_user_service_base: "http://localhost:8000" NOTIFIER_USER_SERVICE_BASE: "http://localhost:8000"
notifier_user_service_api_key: "PUT_YOUR_API_KEY_HERE" NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
notifier_user_service_http_auth_user: "guido" NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: "guido"
notifier_user_service_http_auth_pass: "vanrossum" NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: "vanrossum"
notifier_celery_broker_url: "django://" NOTIFIER_CELERY_BROKER_URL: "django://"
notifier_supervisor_log_dest: "/mnt/logs/supervisor" NOTIFIER_SUPERVISOR_LOG_DEST: "{{ storage_base_dir }}/logs/supervisor"
notifer_requests_ca_bundle: "/etc/ssl/certs/ca-certificates.crt" NOTIFER_REQUESTS_CA_BUNDLE: "/etc/ssl/certs/ca-certificates.crt"
notifier_dd_api_key: "NOT_USED" # data dog NOTIFIER_DD_API_KEY: "NOT_USED" # data dog
notifier_debian_pkgs: notifier_debian_pkgs:
- apparmor-utils - apparmor-utils
...@@ -62,27 +62,27 @@ notifier_debian_pkgs: ...@@ -62,27 +62,27 @@ notifier_debian_pkgs:
# the env variable for the supervisor job definition. # the env variable for the supervisor job definition.
# #
notifier_env_vars: notifier_env_vars:
NOTIFIER_ENV: "{{ notifier_env }}" NOTIFIER_ENV: $NOTIFIER_ENV
NOTIFIER_DB_DIR: "{{ notifier_db_dir }}" NOTIFIER_DB_DIR: $NOTIFIER_DB_DIR
EMAIL_BACKEND: "{{ notifier_email_backend }}" EMAIL_BACKEND: $NOTIFIER_EMAIL_BACKEND
EMAIL_HOST: "{{ notifier_email_host }}" EMAIL_HOST: $NOTIFIER_EMAIL_HOST
EMAIL_PORT: "{{ notifier_email_port }}" EMAIL_PORT: $NOTIFIER_EMAIL_PORT
EMAIL_HOST_USER: "{{ notifier_email_user }}" EMAIL_HOST_USER: $NOTIFIER_EMAIL_USER
EMAIL_HOST_PASSWORD: "{{ notifier_email_pass }}" EMAIL_HOST_PASSWORD: $NOTIFIER_EMAIL_PASS
EMAIL_USE_TLS: "{{ notifier_email_use_tls }}" EMAIL_USE_TLS: $NOTIFIER_EMAIL_USE_TLS
EMAIL_DOMAIN: "{{ notifier_email_domain }}" EMAIL_DOMAIN: $NOTIFIER_EMAIL_DOMAIN
EMAIL_REWRITE_RECIPIENT: "{{ notifier_email_rewrite_recipient }}" EMAIL_REWRITE_RECIPIENT: $NOTIFIER_EMAIL_REWRITE_RECIPIENT
LMS_URL_BASE: "{{ notifier_lms_url_base }}" LMS_URL_BASE: $NOTIFIER_LMS_URL_BASE
SECRET_KEY: "{{ notifier_lms_secret_key }}" SECRET_KEY: $NOTIFIER_LMS_SECRET_KEY
CS_URL_BASE: "{{ notifier_comment_service_base }}" CS_URL_BASE: $NOTIFIER_COMMENT_SERVICE_BASE
CS_API_KEY: "{{ notifier_comment_service_api_key }}" CS_API_KEY: $NOTIFIER_COMMENT_SERVICE_API_KEY
US_URL_BASE: "{{ notifier_user_service_base }}" US_URL_BASE: $NOTIFIER_USER_SERVICE_BASE
US_API_KEY: "{{ notifier_user_service_api_key }}" US_API_KEY: $NOTIFIER_USER_SERVICE_API_KEY
DATADOG_API_KEY: "{{ notifier_dd_api_key }}" DATADOG_API_KEY: $NOTIFIER_DD_API_KEY
LOG_LEVEL: "{{ notifier_log_level }}" LOG_LEVEL: $NOTIFIER_LOG_LEVEL
RSYSLOG_ENABLED: "{{ notifier_rsyslog_enabled }}" RSYSLOG_ENABLED: $NOTIFIER_RSYSLOG_ENABLED
BROKER_URL: "{{ notifier_celery_broker_url }}" BROKER_URL: $NOTIFIER_CELERY_BROKER_URL
REQUESTS_CA_BUNDLE: "{{ notifer_requests_ca_bundle }}" REQUESTS_CA_BUNDLE: $NOTIFER_REQUESTS_CA_BUNDLE
US_HTTP_AUTH_USER: "{{ notifier_user_service_http_auth_user }}" US_HTTP_AUTH_USER: $NOTIFIER_USER_SERVICE_HTTP_AUTH_USER
US_HTTP_AUTH_PASS: "{{ notifier_user_service_http_auth_pass }}" US_HTTP_AUTH_PASS: $NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS
FORUM_DIGEST_TASK_INTERVAL: "{{ notifier_digest_task_interval }}" FORUM_DIGEST_TASK_INTERVAL: $NOTIFIER_DIGEST_TASK_INTERVAL
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
- name: notifier | checkout code - name: notifier | checkout code
git: git:
dest={{ notifier_code_dir }} repo={{ notifier_source_repo }} dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }}
version={{ notifier_version }} version={{ NOTIFIER_VERSION }}
notify: notify:
- notifier | restart notifier - notifier | restart notifier
tags: tags:
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
- name: notifier | source repo group perms - name: notifier | source repo group perms
file: file:
path={{ notifier_source_repo }} mode=2775 state=directory path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory
tags: tags:
- notifier - notifier
- deploy - deploy
...@@ -30,10 +30,10 @@ ...@@ -30,10 +30,10 @@
- name: notifier | install application requirements - name: notifier | install application requirements
pip: pip:
requirements="{{ notifier_requirements_file }}" requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv="{{ notifier_venv_dir }}" state=present virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present
sudo: true sudo: true
sudo_user: "{{ notifier_user }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
- notifier | restart notifier - notifier | restart notifier
tags: tags:
...@@ -44,9 +44,9 @@ ...@@ -44,9 +44,9 @@
- name: notifier | syncdb - name: notifier | syncdb
shell: > shell: >
cd {{ notifier_code_dir }} && {{ notifier_venv_dir }}/bin/python manage.py syncdb cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb
sudo: true sudo: true
sudo_user: "{{ notifier_user }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
- notifier | restart notifier - notifier | restart notifier
tags: tags:
......
...@@ -71,10 +71,10 @@ ...@@ -71,10 +71,10 @@
- update - update
- ubuntu - ubuntu
- name: notifier | create notifier user {{ notifier_user }} - name: notifier | create notifier user {{ NOTIFIER_USER }}
user: user:
name={{ notifier_user }} state=present shell=/bin/bash name={{ NOTIFIER_USER }} state=present shell=/bin/bash
home={{ notifier_home }} createhome=yes home={{ NOTIFIER_HOME }} createhome=yes
tags: tags:
- notifier - notifier
- install - install
...@@ -82,8 +82,8 @@ ...@@ -82,8 +82,8 @@
- name: notifier | setup the notifier env - name: notifier | setup the notifier env
template: template:
src=notifier_env.j2 dest={{ notifier_home }}/notifier_env src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env
owner="{{ notifier_user }}" group="{{ notifier_user }}" owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}"
tags: tags:
- notifier - notifier
- install - install
...@@ -92,14 +92,14 @@ ...@@ -92,14 +92,14 @@
- name: notifier | drop a bash_profile - name: notifier | drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ notifier_home }}/.bash_profile dest={{ NOTIFIER_HOME }}/.bash_profile
owner={{ notifier_user }} owner={{ NOTIFIER_USER }}
group={{ notifier_user }} group={{ NOTIFIER_USER }}
- name: notifier | ensure .bashrc exists - name: notifier | ensure .bashrc exists
shell: touch {{ notifier_home }}/.bashrc shell: touch {{ NOTIFIER_HOME }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ notifier_user }}" sudo_user: "{{ NOTIFIER_USER }}"
tags: tags:
- notifier - notifier
- install - install
...@@ -107,9 +107,9 @@ ...@@ -107,9 +107,9 @@
- name: notifier | add source of notifier_env to .bashrc - name: notifier | add source of notifier_env to .bashrc
lineinfile: lineinfile:
dest={{ notifier_home }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ notifier_home }}/notifier_env' regexp='. {{ NOTIFIER_HOME }}/notifier_env'
line='. {{ notifier_home }}/notifier_env' line='. {{ NOTIFIER_HOME }}/notifier_env'
tags: tags:
- notifier - notifier
- install - install
...@@ -117,9 +117,9 @@ ...@@ -117,9 +117,9 @@
- name: notifier | add source venv to .bashrc - name: notifier | add source venv to .bashrc
lineinfile: lineinfile:
dest={{ notifier_home }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ notifier_venv_dir }}/bin/activate' regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ notifier_venv_dir }}/bin/activate' line='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
tags: tags:
- notifier - notifier
- install - install
...@@ -127,7 +127,7 @@ ...@@ -127,7 +127,7 @@
- name: notifier | create notifier DB directory - name: notifier | create notifier DB directory
file: file:
path="{{ notifier_db_dir }}" mode=2775 state=directory path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory
tags: tags:
- notifier - notifier
- install - install
...@@ -135,7 +135,7 @@ ...@@ -135,7 +135,7 @@
- name: notifier | create notifier/bin directory - name: notifier | create notifier/bin directory
file: file:
path="{{ notifier_home }}/bin" mode=2775 state=directory path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory
tags: tags:
- notifier - notifier
- install - install
...@@ -143,7 +143,7 @@ ...@@ -143,7 +143,7 @@
- name: common | create supervisor log directoy - name: common | create supervisor log directoy
file: file:
path={{notifier_supervisor_log_dest }} mode=2750 state=directory path={{NOTIFIER_SUPERVISOR_LOG_DEST }} mode=2750 state=directory
tags: tags:
- notifier - notifier
- install - install
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
; ;
[program:notifier-scheduler] [program:notifier-scheduler]
command={{ notifier_venv_dir }}/bin/python manage.py scheduler command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py scheduler
process_name=%(program_name)s process_name=%(program_name)s
numprocs=1 numprocs=1
directory={{ notifier_code_dir }} directory={{ NOTIFIER_CODE_DIR }}
umask=022 umask=022
autostart=true autostart=true
autorestart=true autorestart=true
...@@ -18,11 +18,11 @@ stopsignal=TERM ...@@ -18,11 +18,11 @@ stopsignal=TERM
stopwaitsecs=10 stopwaitsecs=10
user=notifier user=notifier
redirect_stderr=false redirect_stderr=false
stdout_logfile={{ notifier_supervisor_log_dest }}/notifier-scheduler-stdout.log stdout_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stdout.log
stdout_logfile_maxbytes=1MB stdout_logfile_maxbytes=1MB
stdout_logfile_backups=10 stdout_logfile_backups=10
stdout_capture_maxbytes=1MB stdout_capture_maxbytes=1MB
stderr_logfile={{notifier_supervisor_log_dest }}/notifier-scheduler-stderr.log stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stderr.log
stderr_logfile_maxbytes=1MB stderr_logfile_maxbytes=1MB
stderr_logfile_backups=10 stderr_logfile_backups=10
stderr_capture_maxbytes=1MB stderr_capture_maxbytes=1MB
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
; ;
[program:notifier-celery-workers] [program:notifier-celery-workers]
command={{ notifier_venv_dir }}/bin/python manage.py celery worker -l {{ notifier_log_level }} command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py celery worker -l {{ NOTIFIER_LOG_LEVEL }}
process_name=%(program_name)s process_name=%(program_name)s
numprocs=1 numprocs=1
directory={{ notifier_code_dir }} directory={{ NOTIFIER_CODE_DIR }}
umask=022 umask=022
autostart=true autostart=true
autorestart=true autorestart=true
...@@ -18,11 +18,11 @@ stopsignal=TERM ...@@ -18,11 +18,11 @@ stopsignal=TERM
stopwaitsecs=10 stopwaitsecs=10
user=notifier user=notifier
redirect_stderr=false redirect_stderr=false
stdout_logfile={{notifier_supervisor_log_dest }}/notifier-celery-workers-stdout.log stdout_logfile={{NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-celery-workers-stdout.log
stdout_logfile_maxbytes=1MB stdout_logfile_maxbytes=1MB
stdout_logfile_backups=10 stdout_logfile_backups=10
stdout_capture_maxbytes=1MB stdout_capture_maxbytes=1MB
stderr_logfile={{notifier_supervisor_log_dest }}/notifier-celery-workers-stderr.log stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-celery-workers-stderr.log
stderr_logfile_maxbytes=1MB stderr_logfile_maxbytes=1MB
stderr_logfile_backups=10 stderr_logfile_backups=10
stderr_capture_maxbytes=1MB stderr_capture_maxbytes=1MB
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
; ;
[program:notifier-scheduler] [program:notifier-scheduler]
command={{ notifier_venv_dir }}/bin/python manage.py scheduler command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py scheduler
process_name=%(program_name)s process_name=%(program_name)s
numprocs=1 numprocs=1
directory={{ notifier_code_dir }} directory={{ NOTIFIER_CODE_DIR }}
umask=022 umask=022
autostart=true autostart=true
autorestart=true autorestart=true
...@@ -18,11 +18,11 @@ stopsignal=TERM ...@@ -18,11 +18,11 @@ stopsignal=TERM
stopwaitsecs=10 stopwaitsecs=10
user=notifier user=notifier
redirect_stderr=false redirect_stderr=false
stdout_logfile={{ notifier_supervisor_log_dest }}/notifier-scheduler-stdout.log stdout_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stdout.log
stdout_logfile_maxbytes=1MB stdout_logfile_maxbytes=1MB
stdout_logfile_backups=10 stdout_logfile_backups=10
stdout_capture_maxbytes=1MB stdout_capture_maxbytes=1MB
stderr_logfile={{notifier_supervisor_log_dest }}/notifier-scheduler-stderr.log stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stderr.log
stderr_logfile_maxbytes=1MB stderr_logfile_maxbytes=1MB
stderr_logfile_backups=10 stderr_logfile_backups=10
stderr_capture_maxbytes=1MB stderr_capture_maxbytes=1MB
......
# vars for the ORA role # vars for the ORA role
--- ---
ORA_NGINX_PORT: 18060
ORA_BASIC_AUTH: False
ora_code_dir: "{{ app_base_dir }}/edx-ora" ora_code_dir: "{{ app_base_dir }}/edx-ora"
# Default nginx listen port # Default nginx listen port
# These should be overrided if you want # These should be overrided if you want
...@@ -9,8 +12,8 @@ ora_user_home: "/opt/edx-ora" ...@@ -9,8 +12,8 @@ ora_user_home: "/opt/edx-ora"
ora_venv_dir: "{{ ora_user_home }}/virtualenvs/{{ ora_user }}" ora_venv_dir: "{{ ora_user_home }}/virtualenvs/{{ ora_user }}"
ease_venv_dir: "{{ ora_venv_dir }}" ease_venv_dir: "{{ ora_venv_dir }}"
ora_gunicorn_workers: 4 ora_gunicorn_workers: 4
ora_nginx_port: 18091 ora_gunicorn_port: 8060
ora_gunicorn_port: 8091 ora_gunicorn_host: 127.0.0.1
# ora_env_config and ora_auth_config # ora_env_config and ora_auth_config
# should be overridden for your # should be overridden for your
......
...@@ -29,6 +29,30 @@ ...@@ -29,6 +29,30 @@
- ora - ora
- deploy - deploy
- name: ora | create ora application config
template: src=ora.env.json.j2 dest={{ora_code_dir}}/../ora.env.json mode=0640 owner={{ ora_user }} group=adm
tags:
- ora
- deploy
- name: ora | create ora auth file
template: src=ora.auth.json.j2 dest={{ora_code_dir}}/../ora.auth.json mode=0640 owner={{ ora_user }} group=adm
tags:
- ora
- deploy
- name: ora | create ora upstart script
template: src=edx-ora.conf.j2 dest=/etc/init/edx-ora.conf mode=0640 owner=root group=adm
tags:
- ora
- deploy
- name: ora | create ora-celery upstart script
template: src=edx-ora-celery.conf.j2 dest=/etc/init/edx-ora-celery.conf mode=0640 owner=root group=adm
tags:
- ora
- deploy
# Do A Checkout # Do A Checkout
- name: ora | git checkout ora repo into $app_base_dir - name: ora | git checkout ora repo into $app_base_dir
git: dest={{ora_code_dir}} repo={{ora_source_repo}} version={{ora_version}} git: dest={{ora_code_dir}} repo={{ora_source_repo}} version={{ora_version}}
......
# requires: # requires:
# - group_vars/all # - group_vars/all
# - common/tasks/main.yml # - common/tasks/main.yml
# - nginx/tasks/main.yml
--- ---
- name: ora | create the ora application user
user: name={{ ora_user }}
tags:
- ora
- name: ora | Create ml_models directory - name: ora | Create ml_models directory
file: path={{ora_code_dir}}/../ml_models state=directory owner={{ ora_user }} group={{ ora_user }} file: path={{ora_code_dir}}/../ml_models state=directory owner={{ ora_user }} group={{ ora_user }}
tags: tags:
...@@ -14,34 +18,11 @@ ...@@ -14,34 +18,11 @@
tags: tags:
- ora - ora
- name: ora | create ora application config
template: src=ora.env.json.j2 dest={{ora_code_dir}}/../ora.env.json mode=0640 owner={{ ora_user }} group=adm
tags:
- ora
- name: ora | create ora auth file
template: src=ora.auth.json.j2 dest={{ora_code_dir}}/../ora.auth.json mode=0640 owner={{ ora_user }} group=adm
tags:
- ora
- name: ora | create ora upstart script
template: src=edx-ora.conf.j2 dest=/etc/init/edx-ora.conf mode=0640 owner=root group=adm
tags:
- ora
- name: ora | create ora-celery upstart script
template: src=edx-ora-celery.conf.j2 dest=/etc/init/edx-ora-celery.conf mode=0640 owner=root group=adm
tags:
- ora
- name: ora | install debian packages that ora needs - name: ora | install debian packages that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
with_items: ora_debian_pkgs with_items: ora_debian_pkgs
tags: tags:
- ora - ora
# Install nginx site
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=ora
- include: ease.yml - include: ease.yml
- include: deploy.yml - include: deploy.yml
...@@ -12,6 +12,7 @@ respawn limit 3 30 ...@@ -12,6 +12,7 @@ respawn limit 3 30
env PID=/var/run/gunicorn/edx-ora.pid env PID=/var/run/gunicorn/edx-ora.pid
env WORKERS={{ ora_gunicorn_workers }} env WORKERS={{ ora_gunicorn_workers }}
env PORT={{ ora_gunicorn_port }} env PORT={{ ora_gunicorn_port }}
env ADDRESS={{ ora_gunicorn_host }}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=edx_ora.aws env DJANGO_SETTINGS_MODULE=edx_ora.aws
env SERVICE_VARIANT=ora env SERVICE_VARIANT=ora
...@@ -22,4 +23,4 @@ end script ...@@ -22,4 +23,4 @@ end script
chdir {{ ora_code_dir }} chdir {{ ora_code_dir }}
setuid {{ ora_user }} setuid {{ ora_user }}
exec {{ ora_venv_dir}}/bin/gunicorn --preload -b 127.0.0.1:$PORT -w $WORKERS --timeout=90 --pythonpath={{ ora_code_dir}} edx_ora.wsgi exec {{ ora_venv_dir}}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=90 --pythonpath={{ ora_code_dir}} edx_ora.wsgi
...@@ -16,14 +16,19 @@ rabbitmq_port: 5672 ...@@ -16,14 +16,19 @@ rabbitmq_port: 5672
rabbitmq_management_port: 15672 rabbitmq_management_port: 15672
rabbitmq_ip: "{{ ansible_default_ipv4.address }}" rabbitmq_ip: "{{ ansible_default_ipv4.address }}"
rabbitmq_auth_config: # Vars meant to be overridden.
erlang_cookie: "CHANGE ME" RABBIT_ERLANG_COOKIE: 'DEFAULT_COOKIE'
admins: RABBIT_USERS:
- name: 'admin' - name: 'admin'
password: 'the example admin password' password: 'the example admin password'
- name: 'edx' - name: 'edx'
password: 'edx' password: 'edx'
# Structure for auth config file.
rabbitmq_auth_config:
erlang_cookie: $RABBIT_ERLANG_COOKIE
admins: $RABBIT_USERS
# If the system is running out of an Amazon Web Services # If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out # cloudformation stack, this group name can used to pull out
# the name of the stack the rabbit server resides in. # the name of the stack the rabbit server resides in.
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Vars for role s3fs
#
s3fs_version: 'master'
s3fs_download_src: 'https://github.com/tongwang/s3fs-c/archive'
s3fs_archive: 'master.zip'
s3fs_unzip_dest: 's3fs-c-master'
s3fs_download_url: '{{ s3fs_download_src }}/{{ s3fs_archive }}'
s3fs_temp_dir: '/var/tmp'
#
# vars are namespace with the module name.
#
s3fs_role_name: s3fs
#
# OS packages
#
s3fs_debian_pkgs:
- make
- g++
- libcurl4-openssl-dev
- libssl-dev
- libxml2-dev
- libfuse-dev
s3fs_redhat_pkgs:
- gcc
- libstdc++-devel
- gcc-c++
- fuse
- fuse-devel
- curl-devel
- libxml2-devel
- openssl-devel
- mailcap
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role s3fs
#
# Overview:
#
# Installs a forked version of s3fs from
# https://github.com/tongwang/s3fs-c/archive that supports
# using IAM roles for authentication.
#
# Dependencies:
#
# The role depending upon s3fs is responsible for mounting the
# buckets.
#
# Assuming the following config
#
# my_role_s3fs_mounts:
# - { bucket: "my_bucket", mount_point: "{{ storage_base_dir}}/s3/my_bucket", owner: "root", group: "adm", mode: "0755" }
#
# The role would need to include tasks like the following
#
# - name: my_role | create s3fs mount points
# file:
# path={{ item.mount_point }} owner={{ item.owner }}
# group={{ item.group }} mode={{ item.mode }} state="directory"
# with_items: "{{ my_role_s3fs_mounts }}"
#
# - name: my_role | mount s3 buckets
# mount:
# name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs
# opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted
# with_items: "{{ myrole_s3fs_mounts }}"
#
# Example play:
#
# Required sudo for the installation phase.
#
# - name: Configure instance(s)
# hosts: s3fs_hosts
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/common/common.yml"
# - "{{ secure_dir }}/vars/users.yml"
# gather_facts: True
# roles:
# - common
# - s3fs
#
- name: s3fs | install system packages
apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present
tags:
- s3fs
- install
- update
- name: s3fs | fetch package
get_url:
url={{ s3fs_download_url }}
dest={{ s3fs_temp_dir }}
- name: s3fs | unzip package
shell:
/usr/bin/unzip {{ s3fs_archive }}
chdir={{ s3fs_temp_dir }}
creates={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}/configure
- name: s3fs | configure
shell:
./configure
chdir={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}
creates={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}/config.status
- name: s3fs | make
shell:
/usr/bin/make
chdir={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}
creates={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}/src/s3cmd
- name: s3fs | make install
shell:
/usr/bin/make install
chdir={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Vars for role splunk
#
#
# vars are namespace with the module name.
#
splunk_role_name: 'splunk'
SPLUNKFORWARDER_SERVER: 'localhost:9997'
SPLUNKFORWARDER_PACKAGE_LOCATION: !!null
SPLUNKFORWARDER_DEB: !!null
SPLUNKFORWARDER_PASSWORD: !!null
SPLUNKFORWARDER_LOG_ITEMS:
- directory: '{{log_base_dir}}'
recursive: true
index: '{{ENV_TYPE}}-{{ENV_NAME}}'
sourcetype: 'edx'
- directory: '/var/log'
recursive: true
index: '{{ENV_TYPE}}-{{ENV_NAME}}'
sourcetype: 'syslog'
- directory: '{{log_base_dir}}/nginx'
recursive: true
index: '{{ENV_TYPE}}-{{ENV_NAME}}'
sourcetype: 'nginx'
#
# OS packages
#
splunk_debian_pkgs:
- gdebi
splunk_redhat_pkgs: []
splunkforwarder_output_dir: '/opt/splunkforwarder/'
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role splunk
#
# Overview:
#
#
# Restart Splunk
- name: splunkforwarder | restart splunkforwarder
service: name=splunk state=restarted
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role splunk
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
# Install Splunk Forwarder
- name: splunkforwarder| install splunkforwarder specific system packages
apt: pkg={{','.join(splunk_debian_pkgs)}} state=present
tags:
- splunk
- install
- update
- name: splunkforwarder | download the splunk deb
get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}"
register: download_deb
- name: splunkforwarder | install splunk forwarder
shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}}
when: download_deb.changed
# Create splunk user
- name: splunkforwarder | create splunk user
user: name=splunk group=splunk createhome=no state=present append=yes groups=syslog
when: download_deb.changed
# Need to start splunk manually so that it can create various files
# and directories that aren't created till the first run and are needed
# to run some of the below commands.
- name: splunkforwarder | start splunk manually
shell: >
{{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{splunkforwarder_output_dir}}/var/lib/splunk
when: download_deb.changed
register: started_manually
- name: splunkforwarder | stop splunk manually
shell: >
{{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt
when: download_deb.changed and started_manually.changed
- name: splunkforwarder | create boot script
shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk
register: create_boot_script
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
# Update credentials
- name: splunkforwarder | update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
- name: splunkforwarder | add chkconfig to init script
shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk'
when: download_deb.changed and create_boot_script.changed
notify: splunkforwarder | restart splunkforwarder
# Ensure permissions on splunk content
- name: splunkforwarder | ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder
# Drop template files.
- name: splunkforwarder | drop input configuration
template:
src=opt/splunkforwarder/etc/system/local/inputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/inputs.conf
owner=splunk
group=splunk
mode=644
notify: splunkforwarder | restart splunkforwarder
- name: splunkforwarder | create outputs config file
template:
src=opt/splunkforwarder/etc/system/local/outputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/outputs.conf
owner=splunk
group=splunk
mode=644
notify: splunkforwarder | restart splunkforwarder
[default]
host = {{ansible_hostname}}
{% for loggable in SPLUNKFORWARDER_LOG_ITEMS%}
[monitor://{{loggable.directory}}]
recursive = {{loggable.recursive|default(false)}}
{% if loggable.sourcetype is defined %}
sourcetype = {{loggable.sourcetype}}
{% endif %}
{% if loggable.index is defined %}
index = {{loggable.index}}
{% endif %}
{% endfor %}
[tcpout]
defaultGroup = default_output_server
[tcpout:default_output_server]
server = {{SPLUNKFORWARDER_SERVER}}
[tcpout-server://{{SPLUNKFORWARDER_SERVER}}]
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Defaults for role supervisor
#
---
supervisor_log_dir: /var/log/supervisor
supervisor_cfg: /etc/supervisord.conf
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role supervisor
#
# Overview:
# Parameterized role for supervisord
# Supervisor templates must exist in the
# templates/ dir for each server
#
# Dependencies:
# - common
#
# Example play:
# roles:
# - common
# - role: supervisor
# supervisor_servers:
# - ...
---
- fail: supervisor_servers is a required parameter for this role
when: supervisor_servers is not defined
- name: supervisor | install supervisor at the system level
pip: name=supervisor state=present
- name: supervisor | create supervisor directories
file: name={{ item }} state=directory
with_items:
- /etc/supervisor
- /etc/supervisor/conf.d
- "{{ supervisor_log_dir }}"
- name: supervisor | create supervisor upstart job
template: src=supervisor-upstart.conf.j2 dest=/etc/init/supervisor.conf
- name: supervisor | create supervisor master config
template: src=supervisord.conf.j2 dest={{ supervisor_cfg }}
- name: supervisor | create supervisor configs
template: src={{ item }}.conf.j2 dest=/etc/supervisor/conf.d/{{ item }}.conf
with_items: supervisor_servers
- name: supervisor | ensure supervisor is started
service: name=supervisor state=started
[program:devpi-server]
command={{ devpi_venv_dir }}/bin/devpi-server --port {{ devpi_port }} --serverdir {{ devpi_data_dir }}
priority=999
startsecs = 5
redirect_stderr = True
autostart=True
user={{ devpi_user }}
description "supervisord"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
exec /usr/local/bin/supervisord --nodaemon --configuration {{ supervisor_cfg }}
; supervisor config file
[unix_http_server]
file=/var/run//supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
[supervisord]
logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP)
; the below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///var/run//supervisor.sock ; use a unix:// URL for a unix socket
; The [include] section can just contain the "files" setting. This
; setting can list multiple files (separated by whitespace or
; newlines). It can also contain wildcards. The filenames are
; interpreted as relative to this file. Included files *cannot*
; include files themselves.
[include]
files = /etc/supervisor/conf.d/*.conf
# variables common to the xqueue role, automatically loaded # variables common to the xqueue role, automatically loaded
# when the role is included # when the role is included
--- ---
XQUEUE_NGINX_PORT: 18040
XQUEUE_BASIC_AUTH: False
xqueue_code_dir: "{{ app_base_dir }}/xqueue" xqueue_code_dir: "{{ app_base_dir }}/xqueue"
# Default nginx listen port # Default nginx listen port
# These should be overrided if you want # These should be overrided if you want
# to serve all content on port 80 # to serve all content on port 80
xqueue_code_dir: "{{ app_base_dir }}/xqueue" xqueue_code_dir: "{{ app_base_dir }}/xqueue"
xqueue_nginx_port: 18040
xqueue_gunicorn_port: 8040 xqueue_gunicorn_port: 8040
xqueue_gunicorn_host: 127.0.0.1
xqueue_user: "xqueue" xqueue_user: "xqueue"
xqueue_user_home: "/opt/xqueue" xqueue_user_home: "/opt/xqueue"
xqueue_venv_dir: "{{ xqueue_user_home }}/virtualenvs/{{ xqueue_user }}" xqueue_venv_dir: "{{ xqueue_user_home }}/virtualenvs/{{ xqueue_user }}"
xqueue_env_config: XQUEUE_QUEUES:
'XQUEUES':
# push queue # push queue
'edX-DemoX': 'http://localhost:18050' 'edX-DemoX': 'http://localhost:18050'
# pull queues # pull queues
'test-pull': !!null 'test-pull': !!null
'certificates': !!null 'certificates': !!null
'open-ended': !!null 'open-ended': !!null
'XQUEUE_WORKERS_PER_QUEUE': 12 XQUEUE_LOGGING_ENV: sandbox
'LOGGING_ENV' : 'sandbox' XQUEUE_SYSLOG_SERVER: 'localhost'
'LOG_DIR' : '/logs' XQUEUE_S3_BUCKET : 'sandbox-bucket'
'SYSLOG_SERVER' : 'syslog.a.m.i4x.org' XQUEUE_S3_PATH_PREFIX: 'sandbox-xqueue'
'RABBIT_HOST' : 'localhost' XQUEUE_LOCAL_LOGLEVEL: 'INFO'
'S3_BUCKET_PREFIX' : 'sandbox-bucket' XQUEUE_AWS_ACCESS_KEY_ID : ''
XQUEUE_AWS_SECRET_ACCESS_KEY : ''
XQUEUE_BASIC_AUTH: ['edx', 'edx']
XQUEUE_DJANGO_USER: 'lms'
XQUEUE_DJANGO_PASSWORD: 'password'
XQUEUE_RABBITMQ_USER: 'edx'
XQUEUE_RABBITMQ_PASS: 'edx'
XQUEUE_RABBITMQ_HOSTNAME: 'localhost'
XQUEUE_MYSQL_DB_NAME: 'xqueue'
XQUEUE_MYSQL_USER: 'root'
XQUEUE_MYSQL_PASSWORD: ''
XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306'
xqueue_env_config:
XQUEUES: $XQUEUE_QUEUES
XQUEUE_WORKERS_PER_QUEUE: 12
LOGGING_ENV: $XQUEUE_LOGGING_ENV
SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER
LOG_DIR: "{{ storage_base_dir }}/logs/xqueue"
RABBIT_HOST: $XQUEUE_RABBITMQ_HOSTNAME
S3_BUCKET: $XQUEUE_S3_BUCKET
S3_PATH_PREFIX: $XQUEUE_S3_PATH_PREFIX
LOCAL_LOGLEVEL: $XQUEUE_LOCAL_LOGLEVEL
xqueue_auth_config: xqueue_auth_config:
'AWS_ACCESS_KEY_ID' : '' AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID
'AWS_SECRET_ACCESS_KEY' : '' AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY
'REQUESTS_BASIC_AUTH': ['edx', 'edx'] REQUESTS_BASIC_AUTH: $XQUEUE_BASIC_AUTH
'USERS': {'lms': 'password'} USERS: { '{{XQUEUE_DJANGO_USER}}' : $XQUEUE_DJANGO_PASSWORD }
'RABBITMQ_USER': 'edx' DATABASES:
'RABBITMQ_PASS': 'edx' default:
'DATABASES': ENGINE: "django.db.backends.mysql"
'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'xqueue', 'USER': 'root', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '3306' } NAME: $XQUEUE_MYSQL_DB_NAME
USER: $XQUEUE_MYSQL_USER
PASSWORD: $XQUEUE_MYSQL_PASSWORD
HOST: $XQUEUE_MYSQL_HOST
PORT: $XQUEUE_MYSQL_PORT
RABBITMQ_USER: $XQUEUE_RABBITMQ_USER
RABBITMQ_PASS: $XQUEUE_RABBITMQ_PASS
xqueue_create_db: 'yes' xqueue_create_db: 'yes'
xqueue_source_repo: https://github.com/edx/xqueue.git xqueue_source_repo: https://github.com/edx/xqueue.git
...@@ -50,76 +82,22 @@ xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt" ...@@ -50,76 +82,22 @@ xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt"
# copied from the LMS role for now since there is a lot # copied from the LMS role for now since there is a lot
# of overlap # of overlap
xqueue_debian_pkgs: xqueue_debian_pkgs:
- apparmor-utils # for compiling the virtualenv
- aspell # (only needed if wheel files aren't available)
- build-essential - build-essential
- curl - s3cmd
- dvipng - pkg-config
- fabric
- g++
- gcc
- gfortran
- ghostscript
- git
- github-cli
- graphviz
- graphviz-dev - graphviz-dev
- gunicorn - graphviz
- inoticoming
- ipython
- libcrypt-ssleay-perl
- libcurl4-openssl-dev
- libdigest-sha-perl
- libfreetype6-dev
- libgeos-dev
- libgraphviz-dev
- libjpeg8-dev
- liblapack-dev
- liblwp-protocol-https-perl
- libmysqlclient-dev - libmysqlclient-dev
- libnet-amazon-ec2-perl # apparmor
- libpng12-dev - apparmor-utils
- libreadline-dev # misc
- libreadline6-dev - curl
- libssl-dev - ipython
- libswitch-perl
- libwww-perl
- libxml++2.6-dev
- libxml2-dev
- libxml2-utils
- libxslt1-dev
- maven2
- mongodb
- mongodb-clients
- mysql-client
- npm - npm
- ntp - ntp
- openjdk-7-jdk # for shapely
- openjdk-7-jre - libgeos-dev
- pep8
- perl
- pkg-config
- postfix
- pylint
- python-boto
- python-coverage-test-runner
- python-django-nose
- python-jenkins
- python-nose
- python-nosexcover
- python-numpy
- python-pip
- python-scipy
- python-mysqldb
- rake
- reprepro
- rsyslog
- rubygems
- sqlite3
- super
- vagrant
- yui-compressor
- zip
- zlib1g-dev
# Needed to be able to create the xqueue mysqldb. # Needed to be able to create the xqueue mysqldb.
- python-mysqldb - python-mysqldb
...@@ -11,8 +11,20 @@ ...@@ -11,8 +11,20 @@
- xqueue - xqueue
- deploy - deploy
- name: xqueue | create xqueue application config
template: src=xqueue.env.json.j2 dest={{app_base_dir}}/xqueue.env.json mode=0640 owner={{ xqueue_user }} group=adm
tags:
- xqueue
- deploy
- name: xqueue | create xqueue auth file
template: src=xqueue.auth.json.j2 dest={{app_base_dir}}/xqueue.auth.json mode=0640 owner={{ xqueue_user }} group=adm
tags:
- xqueue
- deploy
# Do A Checkout # Do A Checkout
- name: xqueue | git checkout xqueue repo into $app_base_dir - name: xqueue | git checkout xqueue repo into {{app_base_dir}}
git: dest={{xqueue_code_dir}} repo={{xqueue_source_repo}} version={{xqueue_version}} git: dest={{xqueue_code_dir}} repo={{xqueue_source_repo}} version={{xqueue_version}}
tags: tags:
- xqueue - xqueue
......
# requires: # requires:
# - group_vars/all # - group_vars/all
# - common/tasks/main.yml # - common/tasks/main.yml
# - nginx/tasks/main.yml
--- ---
# Check out xqueue repo to {{xqueue_code_dir}} # Check out xqueue repo to {{xqueue_code_dir}}
- name: xqueue | install git and its recommends - name: xqueue | install git and its recommends
...@@ -17,6 +16,34 @@ ...@@ -17,6 +16,34 @@
tags: tags:
- xqueue - xqueue
- name: xqueue | create {{ xqueue_user_home }}
# workaround for the case where the parent
# directory doesn't exist
file: >
path={{ xqueue_user_home }}
state=directory
- name: xqueue | create xqueue user {{ xqueue_user }}
user: >
name={{ xqueue_user }}
state=present
shell=/bin/bash
home={{ xqueue_user_home }}
createhome=yes
tags:
- forum
- update
- name: xqueue | ensure homedir permissions {{ xqueue_user_home }}
# workaround for the case where the parent
# directory doesn't exist
file: >
path={{ xqueue_user_home }}
owner={{ xqueue_user }}
group={{ xqueue_user }}
state=directory
recurse=yes
- name: xqueue | create xqueue db - name: xqueue | create xqueue db
mysql_db: > mysql_db: >
name={{xqueue_auth_config.DATABASES.default.NAME}} name={{xqueue_auth_config.DATABASES.default.NAME}}
...@@ -27,37 +54,19 @@ ...@@ -27,37 +54,19 @@
encoding=utf8 encoding=utf8
when: xqueue_create_db is defined and xqueue_create_db|lower == "yes" when: xqueue_create_db is defined and xqueue_create_db|lower == "yes"
- name: xqueue | create xqueue application config
template: src=xqueue.env.json.j2 dest={{app_base_dir}}/xqueue.env.json mode=0640 owner={{ xqueue_user }} group=adm
notify:
- xqueue | restart xqueue
- xqueue | restart xqueue consumer
tags:
- xqueue
- name: xqueue | create xqueue auth file
template: src=xqueue.auth.json.j2 dest={{app_base_dir}}/xqueue.auth.json mode=0640 owner={{ xqueue_user }} group=adm
notify:
- xqueue | restart xqueue
- xqueue | restart xqueue consumer
tags:
- xqueue
- name: xqueue | creating xqueue upstart script - name: xqueue | creating xqueue upstart script
template: src=xqueue.conf.j2 dest=/etc/init/xqueue.conf mode=0640 owner=root group=adm template: src=xqueue.conf.j2 dest=/etc/init/xqueue.conf mode=0640 owner=root group=adm
notify:
- xqueue | restart xqueue
tags: tags:
- xqueue - xqueue
- deploy
- name: xqueue | create xqueue consumer upstart script - name: xqueue | create xqueue consumer upstart script
template: src=xqueue_consumer.conf.j2 dest=/etc/init/xqueue_consumer.conf mode=0640 owner=root group=adm template: src=xqueue_consumer.conf.j2 dest=/etc/init/xqueue_consumer.conf mode=0640 owner=root group=adm
notify:
- xqueue | restart xqueue consumer
tags: tags:
- xqueue - xqueue
- deploy
# Install nginx site
- include: ../../nginx/tasks/nginx_site.yml state=link site_name=xqueue
- include: deploy.yml - include: deploy.yml
...@@ -13,6 +13,7 @@ env WORKERS={{ ansible_processor|length * 2 }} ...@@ -13,6 +13,7 @@ env WORKERS={{ ansible_processor|length * 2 }}
env WORKERS=2 env WORKERS=2
{% endif %} {% endif %}
env PORT={{ xqueue_gunicorn_port }} env PORT={{ xqueue_gunicorn_port }}
env ADDRESS={{ xqueue_gunicorn_host }}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=xqueue.aws_settings env DJANGO_SETTINGS_MODULE=xqueue.aws_settings
env SERVICE_VARIANT="xqueue" env SERVICE_VARIANT="xqueue"
...@@ -21,4 +22,4 @@ env SERVICE_VARIANT="xqueue" ...@@ -21,4 +22,4 @@ env SERVICE_VARIANT="xqueue"
chdir {{ xqueue_code_dir }} chdir {{ xqueue_code_dir }}
setuid {{ xqueue_user }} setuid {{ xqueue_user }}
exec {{ xqueue_venv_dir }}/bin/gunicorn --preload -b 127.0.0.1:$PORT -w $WORKERS --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi exec {{ xqueue_venv_dir }}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
# Variables for the xserver. # Variables for the xserver.
--- ---
xserver_env_config: {} XSERVER_NGINX_PORT: 18050
XSERVER_BASIC_AUTH: False
XSERVER_RUN_URL: ''
XSERVER_GRADER_ROOT: ''
XSERVER_LOGGING_ENV: 'sandbox'
XSERVER_SYSLOG_SERVER: ''
# by default do not check out the content
# repo needed on the xserver for grading
# python submissions, TODO: replace with an open
# source repo
XSERVER_GRADER_CHECKOUT: False
xserver_env_config:
RUN_URL: $XSERVER_RUN_URL
GRADER_ROOT: $XSERVER_GRADER_ROOT
LOGGING_ENV: $XSERVER_LOGGING_ENV
LOG_DIR: "{{ storage_base_dir }}/logs/xserver"
SYSLOG_SERVER: $XSERVER_SYSLOG_SERVER
SANDBOX_PYTHON: '/opt/edx_apparmor_sandbox/bin/python'
xserver_git_identity_path: "{{ secure_dir }}/files/git-identity" xserver_git_identity_path: "{{ secure_dir }}/files/git-identity"
xserver_code_dir: "{{ app_base_dir }}/xserver" xserver_code_dir: "{{ app_base_dir }}/xserver"
xserver_source_repo: "git://github.com/edx/xserver.git" xserver_source_repo: "git://github.com/edx/xserver.git"
...@@ -17,8 +37,8 @@ xserver_sandbox_venv_dir: "{{ venv_dir }}_apparmor_sandbox" ...@@ -17,8 +37,8 @@ xserver_sandbox_venv_dir: "{{ venv_dir }}_apparmor_sandbox"
xserver_requirements_file: "{{ xserver_code_dir }}/requirements.txt" xserver_requirements_file: "{{ xserver_code_dir }}/requirements.txt"
xserver_port: 8050 xserver_gunicorn_port: 8050
xserver_nginx_port: 18050 xserver_gunicorn_host: 'localhost'
xserver_debian_pkgs: xserver_debian_pkgs:
- build-essential - build-essential
......
- name: xserver | restart nginx
service: name=nginx state=restarted
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
git: dest={{xserver_grader_dir}} repo={{xserver_grader_source}} version={{xserver_grader_version}} git: dest={{xserver_grader_dir}} repo={{xserver_grader_source}} version={{xserver_grader_version}}
environment: environment:
GIT_SSH: /tmp/git_ssh.sh GIT_SSH: /tmp/git_ssh.sh
when: c_skip_grader_checkout is not defined or c_skip_grader_checkout==False when: XSERVER_GRADER_CHECKOUT
tags: tags:
- deploy - deploy
......
...@@ -53,6 +53,4 @@ ...@@ -53,6 +53,4 @@
- name: xserver | upload ssh script - name: xserver | upload ssh script
copy: src=git_ssh.sh dest=/tmp/git_ssh.sh force=yes owner=root group=adm mode=750 copy: src=git_ssh.sh dest=/tmp/git_ssh.sh force=yes owner=root group=adm mode=750
- include: nginx.yml
- include: deploy.yml - include: deploy.yml
- name: xserver | add xserver nginx configuration
template: src=simple-proxy.j2 dest=/etc/nginx/sites-available/simple-proxy
notify:
- xserver | restart nginx
- name: xserver | enable xserver nginx configuration
file: src=/etc/nginx/sites-available/simple-proxy dest=/etc/nginx/sites-enabled/simple-proxy state=link
notify:
- xserver | restart nginx
...@@ -13,7 +13,8 @@ env PID=/var/tmp/xserver.pid ...@@ -13,7 +13,8 @@ env PID=/var/tmp/xserver.pid
env NEW_RELIC_CONFIG_FILE={{ app_base_dir }}/newrelic.ini env NEW_RELIC_CONFIG_FILE={{ app_base_dir }}/newrelic.ini
env NEWRELIC={{ venv_dir }}/bin/newrelic-admin env NEWRELIC={{ venv_dir }}/bin/newrelic-admin
env WORKERS={{ ansible_processor|length }} env WORKERS={{ ansible_processor|length }}
env PORT={{ xserver_port }} env PORT={{ xserver_gunicorn_port }}
env ADDRESS={{ xserver_gunicorn_host }}
env LANG=en_US.UTF-8 env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=xserver_aws_settings env DJANGO_SETTINGS_MODULE=xserver_aws_settings
env SERVICE_VARIANT="xserver" env SERVICE_VARIANT="xserver"
...@@ -22,5 +23,5 @@ env SERVICE_VARIANT="xserver" ...@@ -22,5 +23,5 @@ env SERVICE_VARIANT="xserver"
chdir {{ xserver_code_dir }} chdir {{ xserver_code_dir }}
setuid www-data setuid www-data
exec {{ venv_dir }}/bin/gunicorn --preload -b 127.0.0.1:$PORT -w $WORKERS --timeout=30 --pythonpath={{ xserver_code_dir }} pyxserver_wsgi:application exec {{ venv_dir }}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=30 --pythonpath={{ xserver_code_dir }} pyxserver_wsgi:application
#!/usr/bin/env python
"""
Generate a GitHub OAuth token with a particular
set of permissions.
Usage:
github_oauth_token.py USERNAME PASSWORD [SCOPE ...]
Example:
github_oauth_token.py jenkins_user repo:status public_repo
This will prompt the user for the password.
"""
import sys
import requests
import json
import getpass
from textwrap import dedent
USAGE = "Usage: {0} USERNAME NOTE [SCOPE ...]"
def parse_args(arg_list):
"""
Return a dict of the command line arguments.
Prints an error message and exits if the arguments are invalid.
"""
if len(arg_list) < 4:
print USAGE.format(arg_list[0])
exit(1)
# Prompt for the password
password = getpass.getpass()
return {
'username': arg_list[1],
'password': password,
'note': arg_list[2],
'scopes': arg_list[3:],
}
def get_oauth_token(username, password, scopes, note):
"""
Create a GitHub OAuth token with the given scopes.
If unsuccessful, print an error message and exit.
Returns a tuple `(token, scopes)`
"""
params = {'scopes': scopes, 'note': note}
response = response = requests.post(
'https://api.github.com/authorizations',
data=json.dumps(params),
auth=(username, password)
)
if response.status_code != 201:
print dedent("""
Could not create OAuth token.
HTTP status code: {0}
Content: {1}
""".format(response.status_code, response.text)).strip()
exit(1)
try:
token_data = response.json()
return token_data['token'], token_data['scopes']
except TypeError:
print "Could not parse response data."
exit(1)
except KeyError:
print "Could not retrieve data from response."
exit(1)
def main():
arg_dict = parse_args(sys.argv)
token, scopes = get_oauth_token(
arg_dict['username'], arg_dict['password'],
arg_dict['scopes'], arg_dict['note']
)
print "Token: {0}".format(token)
print "Scopes: {0}".format(", ".join(scopes))
if __name__ == "__main__":
main()
...@@ -9,8 +9,15 @@ ...@@ -9,8 +9,15 @@
- "group_vars/all" - "group_vars/all"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xserver
- xqueue
- edxlocal - edxlocal
- mongo
- edxapp - edxapp
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True } - { role: 'edxapp', celery_worker: True }
......
...@@ -9,6 +9,11 @@ ...@@ -9,6 +9,11 @@
- "group_vars/all" - "group_vars/all"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal - edxlocal
- { role: 'edxapp', lms_nginx_port: 18030, lms_xml_nginx_port: 80 } - mongo
- { role: 'edxapp', EDXAPP_LMS_NGINX_PORT: 18030, EDXAPP_LMS_XML_NGINX_PORT: 80 }
...@@ -9,8 +9,13 @@ ...@@ -9,8 +9,13 @@
- "group_vars/all" - "group_vars/all"
roles: roles:
- common - common
- nginx - role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal - edxlocal
- mongo
- edxapp - edxapp
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
......
...@@ -14,7 +14,6 @@ ipython==0.13.1 ...@@ -14,7 +14,6 @@ ipython==0.13.1
jenkinsapi==0.1.11 jenkinsapi==0.1.11
lxml==3.1beta1 lxml==3.1beta1
newrelic==1.10.2.38 newrelic==1.10.2.38
paramiko==1.9.0
path.py==3.0.1 path.py==3.0.1
pingdom==0.2.0 pingdom==0.2.0
pycrypto==2.6 pycrypto==2.6
...@@ -29,3 +28,4 @@ six==1.2.0 ...@@ -29,3 +28,4 @@ six==1.2.0
-e git+https://github.com/bos/statprof.py.git@a17f7923b102c9039763583be9e377e8422e8f5f#egg=statprof-dev -e git+https://github.com/bos/statprof.py.git@a17f7923b102c9039763583be9e377e8422e8f5f#egg=statprof-dev
ujson==1.30 ujson==1.30
wsgiref==0.1.2 wsgiref==0.1.2
ansible==1.3.2
#!/usr/bin/env bash
# Ansible configuration/deploy wrapper script that
# assumes the following parameters set
# as environment variables
#
# - dns_name - REQUIRED
# - dns_zone
# - edxapp_version
# - forum_version
# - xqueue_version
# - xserver_version
# - ora_version
# - ease_version
# - deploy
# - keypair
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/util/jenkins/ascii-convert.sh"
fi
if [[ -z $dns_name ]]; then
echo "The hostname is required to know what machine to configure"
exit 1
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
extra_vars="/var/tmp/extra-vars-$$.yml"
cat << EOF > $extra_vars
---
EDXAPP_PREVIEW_LMS_BASE: preview.${dns_name}.${dns_zone}
EDXAPP_LMS_BASE: ${dns_name}.${dns_zone}
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
XSERVER_GRADER_CHECKOUT: False
c_skip_grader_checkout: True
edx_platform_commit: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
xserver_version: $xserver_version
ora_version: $ora_version
ease_version: $ease_version
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
EOF
cat $extra_vars
cd playbooks/edx-east
./ec2.py --refresh
ansible-playbook -vvv $playbook -i ./ec2.py -e "@$extra_vars" --user ubuntu --tags deploy
#!/usr/bin/env bash
# Ansible deployment wrapper script that
# assumes the following parameters set
# as environment variables
#
# {edxapp,forum,xqueue,xserver,ora} - true/false
# {edxapp,forum,xqueue,xserver,ora}_version - commit or tag
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/util/jenkins/ascii-convert.sh"
fi
if [[ -z $github_username ]]; then
github_username=$BUILD_USER_ID
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
extra_vars="/var/tmp/extra-vars-$$.yml"
if [[ -z $deploy_host ]]; then
deploy_host="${github_username}.m.sandbox.edx.org"
fi
cat << EOF > $extra_vars
---
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
XSERVER_GRADER_CHECKOUT: False
c_skip_grader_checkout: True
edx_platform_commit: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
xserver_version: $xserver_version
ora_version: $ora_version
ease_version: $ease_version
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
PYPI_MIRROR_URL: 'https://pypi.edx.org/root/pypi/+simple/'
GIT_MIRROR: 'git.edx.org'
migrate_only: '$run_migration'
EOF
cat $extra_vars
echo "Deploying to $deploy_host"
declare -A deploy
deploy[edxapp]=$edxapp
deploy[forum]=$forum
deploy[xqueue]=$xqueue
deploy[xserver]=$xserver
deploy[ora]=$ora
ssh-keygen -f "/var/lib/jenkins/.ssh/known_hosts" -R "$deploy_host"
cd playbooks/edx-east
for i in "${!deploy[@]}"; do
if [[ ${deploy[$i]} == "true" ]]; then
ansible-playbook -vvvv deploy_${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy
fi
done
rm -f "$extra_vars"
#!/usr/bin/env bash
# Ansible provisioning wrapper script that
# assumes the following parameters set
# as environment variables
#
# - github_username
# - server_type
# - instance_type
# - region
# - aws_account
# - keypair
# - ami
# - root_ebs_size
# - security_group
# - dns_zone
# - dns_name
# - environment
# - name_tag
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/util/jenkins/ascii-convert.sh"
fi
if [[ -z $github_username ]]; then
github_username=$BUILD_USER_ID
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
extra_vars="/var/tmp/extra-vars-$$.yml"
if [[ -z $dns_name ]]; then
dns_name=$github_username
fi
if [[ -z $name_tag ]]; then
name_tag=${github_username}-${environment}
fi
if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-65db8b0c"
elif [[ $server_type == "ubuntu_12.04" ]]; then
ami="ami-d0f89fb9"
fi
fi
if [[ -z $instance_type ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
instance_type="m1.medium"
elif [[ $server_type == "ubuntu_12.04" ]]; then
instance_type="m1.small"
fi
fi
cat << EOF > $extra_vars
---
EDXAPP_PREVIEW_LMS_BASE: preview.${dns_name}.${dns_zone}
EDXAPP_LMS_BASE: ${dns_name}.${dns_zone}
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
dns_name: $dns_name
keypair: $keypair
instance_type: $instance_type
security_group: $security_group
ami: $ami
region: $region
instance_tags: '{"environment": "$environment", "github_username": "$github_username", "Name": "$name_tag", "source": "jenkins", "owner": "$BUILD_USER"}'
root_ebs_size: $root_ebs_size
name_tag: $name_tag
PYPI_MIRROR_URL: 'https://pypi.edx.org/root/pypi/+simple/'
GIT_MIRROR: 'git.edx.org'
gh_users:
- user: jarv
groups:
- adm
- user: feanil
groups:
- adm
- user: e0d
groups:
- adm
- user: ${github_username}
groups:
- adm
dns_zone: $dns_zone
EOF
cat $extra_vars
cd playbooks/edx-east
# run the tasks to launch an ec2 instance from AMI
ansible-playbook -vvvv edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu
# run tasks to update application config files for the sandbox hostname
if [[ $server_type == "full_edx_installation" ]]; then
ansible-playbook -vvvv edx_continuous_integration.yml -i "${dns_name}.${dns_zone}," -e "@${extra_vars}" --user ubuntu --tags "lms-env,cms-env,lms-preview-env"
fi
rm -f "$extra_vars"
#!/usr/bin/env bash
function ascii_convert {
echo $1 | iconv -f utf8 -t ascii//TRANSLIT//IGNORE
}
# remove non-ascii chars from build user vars
BUILD_USER_LAST_NAME=$(ascii_convert $BUILD_USER_LAST_NAME)
BUILD_USER_FIRST_NAME=$(ascii_convert $BUILD_USER_FIRST_NAME)
BUILD_USER_ID=$(ascii_convert $BUILD_USER_ID)
BUILD_USER=$(ascii_convert $BUILD_USER)
...@@ -13,6 +13,8 @@ Options: ...@@ -13,6 +13,8 @@ Options:
import boto import boto
from docopt import docopt from docopt import docopt
from vpcutil import vpc_for_stack_name from vpcutil import vpc_for_stack_name
from vpcutil import stack_name_for_vpc
from collections import defaultdict
VERSION="vpc tools 0.1" VERSION="vpc tools 0.1"
...@@ -29,6 +31,7 @@ JUMPBOX_CONFIG = """ ...@@ -29,6 +31,7 @@ JUMPBOX_CONFIG = """
""" """
HOST_CONFIG = """ HOST_CONFIG = """
# Instance ID: {instance_id}
Host {name} Host {name}
ProxyCommand ssh {config_file} -W %h:%p {jump_box} ProxyCommand ssh {config_file} -W %h:%p {jump_box}
HostName {ip} HostName {ip}
...@@ -47,6 +50,7 @@ def dispatch(args): ...@@ -47,6 +50,7 @@ def dispatch(args):
def _ssh_config(args): def _ssh_config(args):
if args.get("vpc"): if args.get("vpc"):
vpc_id = args.get("<vpc_id>") vpc_id = args.get("<vpc_id>")
stack_name = stack_name_for_vpc(vpc_id)
elif args.get("stack-name"): elif args.get("stack-name"):
stack_name = args.get("<stack_name>") stack_name = args.get("<stack_name>")
vpc_id = vpc_for_stack_name(stack_name) vpc_id = vpc_for_stack_name(stack_name)
...@@ -71,17 +75,23 @@ def _ssh_config(args): ...@@ -71,17 +75,23 @@ def _ssh_config(args):
else: else:
config_file = "" config_file = ""
jump_box = "{vpc_id}-jumpbox".format(vpc_id=vpc_id) jump_box = "{stack_name}-jumpbox".format(stack_name=stack_name)
friendly = "{vpc_id}-{logical_id}-{instance_id}" friendly = "{stack_name}-{logical_id}-{instance_number}"
id_type_counter = defaultdict(int)
reservations = vpc.get_all_instances(filters={'vpc-id' : vpc_id}) reservations = vpc.get_all_instances(filters={'vpc-id' : vpc_id})
for reservation in reservations: for reservation in reservations:
for instance in reservation.instances: for instance in reservation.instances:
logical_id = instance.__dict__['tags']['aws:cloudformation:logical-id'] if 'group' in instance.tags:
logical_id = instance.tags['group']
else:
logical_id = instance.tags['aws:cloudformation:logical-id']
instance_number = id_type_counter[logical_id]
id_type_counter[logical_id] += 1
if logical_id == "BastionHost": if logical_id == "BastionHost" or logical_id == 'bastion':
print JUMPBOX_CONFIG.format( print JUMPBOX_CONFIG.format(
jump_box=jump_box, jump_box=jump_box,
...@@ -90,33 +100,32 @@ def _ssh_config(args): ...@@ -90,33 +100,32 @@ def _ssh_config(args):
identity_file=identity_file, identity_file=identity_file,
strict_host_check=strict_host_check) strict_host_check=strict_host_check)
else: # Print host config even for the bastion box because that is how
# ansible accesses it.
print HOST_CONFIG.format( print HOST_CONFIG.format(
name=instance.private_ip_address, name=instance.private_ip_address,
vpc_id=vpc_id,
jump_box=jump_box, jump_box=jump_box,
ip=instance.private_ip_address, ip=instance.private_ip_address,
user=user, user=user,
logical_id=logical_id,
identity_file=identity_file, identity_file=identity_file,
config_file=config_file, config_file=config_file,
strict_host_check=strict_host_check) strict_host_check=strict_host_check,
instance_id=instance.id)
#duplicating for convenience with ansible #duplicating for convenience with ansible
name = friendly.format(vpc_id=vpc_id, name = friendly.format(stack_name=stack_name,
logical_id=logical_id, logical_id=logical_id,
instance_id=instance.id) instance_number=instance_number)
print HOST_CONFIG.format( print HOST_CONFIG.format(
name=name, name=name,
vpc_id=vpc_id,
jump_box=jump_box, jump_box=jump_box,
ip=instance.private_ip_address, ip=instance.private_ip_address,
user=user, user=user,
logical_id=logical_id,
identity_file=identity_file, identity_file=identity_file,
config_file=config_file, config_file=config_file,
strict_host_check=strict_host_check) strict_host_check=strict_host_check,
instance_id=instance.id)
if __name__ == '__main__': if __name__ == '__main__':
args = docopt(__doc__, version=VERSION) args = docopt(__doc__, version=VERSION)
......
...@@ -7,3 +7,15 @@ def vpc_for_stack_name(stack_name): ...@@ -7,3 +7,15 @@ def vpc_for_stack_name(stack_name):
if resource.resource_type == 'AWS::EC2::VPC': if resource.resource_type == 'AWS::EC2::VPC':
return resource.physical_resource_id return resource.physical_resource_id
def stack_name_for_vpc(vpc_name):
cfn_tag_key = 'aws:cloudformation:stack-name'
vpc = boto.connect_vpc()
resource = vpc.get_all_vpcs(vpc_ids=[vpc_name])[0]
if cfn_tag_key in resource.tags:
return resource.tags[cfn_tag_key]
else:
msg = "VPC({}) is not part of a cloudformation stack.".format(vpc_name)
raise Exception(msg)
MEMORY = 1536 MEMORY = 2048
CPU_COUNT = 2 CPU_COUNT = 2
Vagrant.configure("2") do |config| Vagrant.configure("2") do |config|
...@@ -18,7 +18,7 @@ Vagrant.configure("2") do |config| ...@@ -18,7 +18,7 @@ Vagrant.configure("2") do |config|
# point Vagrant at the location of your playbook you want to run # point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../playbooks/vagrant-fullstack.yml" ansible.playbook = "../../playbooks/vagrant-fullstack.yml"
ansible.inventory_path = "../../playbooks/vagrant/inventory.ini" ansible.inventory_path = "../../playbooks/vagrant/inventory.ini"
ansible.extra_vars = { c_skip_grader_checkout: 'True' } ansible.extra_vars = { XSERVER_GRADER_CHECKOUT: 'False' }
ansible.verbose = "extra" ansible.verbose = "extra"
end end
end end
MEMORY = 1024 MEMORY = 2048
CPU_COUNT = 2 CPU_COUNT = 2
Vagrant.configure("2") do |config| Vagrant.configure("2") do |config|
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment