Commit 944e697a by Han Su Kim

Merge pull request #1495 from edx/rc/kifli

Release...rc/kifli
parents a76cf246 bc0a114a
......@@ -10,3 +10,9 @@ vagrant/*/devstack/edx-platform
vagrant/*/devstack/cs_comments_service
vagrant/*/devstack/ora
vagrant_ansible_inventory_default
### OS X artifacts
*.DS_Store
.AppleDouble
:2e_*
:2e#
......@@ -3,7 +3,7 @@ language: python
python:
- "2.7"
install:
- "sudo apt-get install -y npm python-demjson"
- "sudo apt-get install -y nodejs python-demjson"
- "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson"
script:
......@@ -24,9 +24,17 @@ script:
fi
done
- |
plays="aws bastion certs commoncluster common demo devpi discern edx_ansible edxapp elasticsearch forum ora rabbitmq worker xqueue xserver"
set -e
cd playbooks/edx-east
for play in $plays; do
ansible-playbook -i localhost, --syntax-check ${play}.yml
ROLE_DIRS=$(/bin/ls -d roles/*)
cat <<EOF >travis-test.yml
- name: Play to test all roles
hosts: all
roles:
EOF
for role_dir in $ROLE_DIRS; do
echo " - $(basename $role_dir)" >> travis-test.yml
done
ansible-playbook -i localhost, --syntax-check travis-test.yml
......@@ -32,3 +32,6 @@ Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com>
Ben Patterson <bpatterson@edx.org>
Jason Zhu <fmyzjs@gmail.com>
Rohit Karajgi <rohit.karajgi@gmail.com>
- Role: common
- We now remove the default syslog.d conf file (50-default.conf) this will
break people who have hand edited that file.
- Role: edxapp
- Updated the module store settings to match the new settings format.
- Role: analytics-api
- Added a new role for the analytics-api Django app. Currently a private repo
......@@ -18,3 +25,7 @@
- Update `CMS_HOSTNAME` default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
- Role: Mongo
- Fixed case of variable used in if block that breaks cluster configuration
by changing mongo_clustered to MONGO_CLUSTERED.
......@@ -2865,15 +2865,13 @@
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
},
"PropagateAtLaunch":true
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
}
],
"UserData": { "Fn::Base64" : { "Fn::Join" : ["", [
......
{
"AWSTemplateFormatVersion":"2010-09-09",
"Description":"Separate VPC for database clones and replicas.",
"Parameters":{
"EnvironmentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which environment to use during the configuration phase, e.g., stage, prod, sandbox",
"Default":"prod"
},
"DeploymentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, <university>, <org>",
"Default":"edx"
},
"KeyName":{
"Type":"String",
"Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server",
"Default":"deployment-201407"
},
"ClassB":{
"Default":"1",
"Description":"The second octet of the Class B to be allocated for this VPC. 10.?.xxx.xxx",
"Type":"Number",
"MinValue":"0",
"MaxValue":"255",
"ConstraintDescription":"ClassB value must be between 0 and 255."
}
},
"Mappings":{
"SubnetConfig":{
"VPC": { "CIDR":".0.0/16" },
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
"us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" },
"us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" },
"eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" },
"sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" },
"ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" },
"ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" },
"ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" }
}
},
"Resources":{
"EdxVPC":{
"Type":"AWS::EC2::VPC",
"Properties":{
"EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"InstanceTenancy":"default"
}
},
"Data01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-sanitized-dbs"
}
]
}
},
"Data02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-non-sanitized-clones"
}
]
}
},
"PrivateRouteTable":{
"Type":"AWS::EC2::RouteTable",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"PrivateSubnetRouteTableAssociationData01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationData02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateNetworkAcl":{
"Type":"AWS::EC2::NetworkAcl",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"InboundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"OutBoundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"PrivateSubnetNetworkAclAssociationData01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationData02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up access to the data subnet",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"3306",
"ToPort":"3306",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":"0.0.0.0/0"
}
]
}
},
"EdxDBSubnetGroup":{
"Type":"AWS::RDS::DBSubnetGroup",
"Properties":{
"DBSubnetGroupDescription":"Subnets available for the RDS DB Instance",
"SubnetIds":[
{
"Ref":"Data01"
},
{
"Ref":"Data02"
}
]
}
},
"DBSecurityGroup":{
"Type":"AWS::RDS::DBSecurityGroup",
"Properties":{
"EC2VpcId":{
"Ref":"EdxVPC"
},
"GroupDescription":"Data access"
}
}
}
}
......@@ -20,6 +20,7 @@ import os
import sys
import time
import json
import socket
try:
import boto.sqs
from boto.exception import NoAuthHandlerFound
......@@ -132,4 +133,12 @@ class CallbackModule(object):
# only keep the last 20 or so lines to avoid payload size errors
if len(payload[msg_type]['stdout_lines']) > 20:
payload[msg_type]['stdout_lines'] = ['(clipping) ... '] + payload[msg_type]['stdout_lines'][-20:]
self.sqs.send_message(self.queue, json.dumps(payload))
while True:
try:
self.sqs.send_message(self.queue, json.dumps(payload))
break
except socket.gaierror as e:
print 'socket.gaierror will retry: ' + e
time.sleep(1)
except Exception as e:
raise e
......@@ -225,12 +225,16 @@ class Ec2Inventory(object):
cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if 'AWS_PROFILE' in os.environ:
aws_profile = "{}-".format(os.environ.get('AWS_PROFILE'))
else:
aws_profile = ""
self.cache_path_cache = cache_path + "/{}ansible-ec2.cache".format(aws_profile)
self.cache_path_tags = cache_path + "/{}ansible-ec2.tags.cache".format(aws_profile)
self.cache_path_index = cache_path + "/{}ansible-ec2.index".format(aws_profile)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
......
# A simple utility play to add a public key to the authorized key
# file for the ubuntu user.
# You must pass in the entire line that you are adding.
# Example: ansible-playbook add-ubuntu-key.yml -c local -i 127.0.0.1, \
# -e "public_key=deployment-201407" \
# -e owner=jarv -e keyfile=/home/jarv/.ssh/authorized_keys
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- name: Deploy aide IDS
hosts: all
sudo: True
gather_facts: True
roles:
- aide
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: all
sudo: True
serial: 1
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- shell: echo "rabbit@ip-{{ item|replace('.', '-') }}"
when: item != ansible_ec2_local_ipv4
with_items: hostvars.keys()
register: list
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster {{ item.stdout }}
when: item.stdout is defined
with_items: list.results
- command: rabbitmqctl start_app
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -22,25 +22,24 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: nginx
nginx_sites:
- xqueue
- xqueue
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
......@@ -51,6 +50,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......
......@@ -4,91 +4,6 @@
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
......@@ -108,6 +23,17 @@
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
- "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}"
- "EDXAPP_MONGO_PASS: {{ EDXAPP_MONGO_PASS }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
......
......@@ -30,17 +30,11 @@
ora_db_root_user: 'None'
discern_db_root_user: 'None'
vars_prompt:
# passwords use vars_prompt so they aren't in the
# bash history
- name: "db_root_pass"
prompt: "Password for root mysql user"
private: True
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- fail: msg="db_root_pass is not defined"
when: db_root_pass is not defined
- name: install python mysqldb module
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
sudo: yes
......
......@@ -4,12 +4,12 @@
gather_facts: False
vars:
keypair: continuous-integration
instance_type: m1.medium
security_group: sandbox
instance_type: t2.medium
security_group: sandbox-vpc
# ubuntu 12.04
ami: ami-d0f89fb9
ami: ami-f478849c
region: us-east-1
zone: us-east-1b
zone: us-east-1c
instance_tags:
environment: sandbox
github_username: temp
......@@ -21,6 +21,7 @@
dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp
elb: false
vpc_subnet_id: subnet-cd867aba
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
......@@ -33,6 +34,8 @@
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
terminate_instance: true
instance_profile_name: sandbox
......
......@@ -2,6 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars:
roles:
- aws
- role: nginx
......@@ -10,6 +11,9 @@
- cms
nginx_default_sites:
- lms
nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}"
nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}"
nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}"
- edxapp
- role: datadog
when: COMMON_ENABLE_DATADOG
......@@ -17,3 +21,5 @@
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: minos
when: COMMON_ENABLE_MINOS
......@@ -9,7 +9,7 @@
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }}
python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
......@@ -22,6 +22,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -38,6 +39,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -15,7 +15,6 @@
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles:
- common
- edxlocal
- mongo
- browsers
......
#!/usr/bin/env python
"""
Build an ansible inventory based on autoscaling group instance lifecycle state.
Outputs JSON to stdout with keys for each state and combination of autoscaling
group and state.
{
"InService": [
"10.0.47.127",
"10.0.46.174"
],
"Terminating:Wait": [
"10.0.48.104"
],
"e-d-CommonClusterServerAsGroup": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.48.104"
]
}
"""
import argparse
import boto
import json
from collections import defaultdict
class LifecycleInventory():
profile = None
def __init__(self, profile):
parser = argparse.ArgumentParser()
self.profile = profile
def get_instance_dict(self):
ec2 = boto.connect_ec2(profile_name=self.profile)
reservations = ec2.get_all_instances()
dict = {}
for instance in [i for r in reservations for i in r.instances]:
dict[instance.id] = instance
return dict
def run(self):
autoscale = boto.connect_autoscale(profile_name=self.profile)
groups = autoscale.get_all_groups()
instances = self.get_instance_dict()
inventory = defaultdict(list)
for group in groups:
for instance in group.instances:
private_ip_address = instances[instance.instance_id].private_ip_address
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
LifecycleInventory(args.profile).run()
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
vars:
roles:
- common
- minos
- name: Deploy MongoDB
hosts: all
sudo: True
gather_facts: True
roles:
- mongo
- mongo_mms
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# Example ansible-playbook -i redirect.example.com -e@/path/to/secure/var/file.yml
#
# the secure var file will need to have the following vars defined:
#
# NGINX_ENABLE_SSL
# NGINX_SSL_CERTIFICATE
# NGINX_SSL_KEY
# # for the redirects use $scheme://example.com to match the protocol
#
# secure vars example:
# # Vars for setting up the nginx redirect instance
# NGINX_ENABLE_SSL: True
# NGINX_SSL_CERTIFICATE: '../../../example-secure/ssl/example.com.crt'
# NGINX_SSL_KEY: '../../../example-secure/ssl/example.com.key'
# nginx_redirects:
# - server_name: nginx-redirect.example.edx.org
# redirect: "http://www.example.com"
# - server_name: example.com
# redirect: "http://www.example.com"
# default: true
#
#
#
# - ...
- name: utility play to setup an nginx redirect
hosts: all
sudo: True
gather_facts: True
roles:
- role: nginx
nginx_sites:
- nginx_redirect
......@@ -29,6 +29,8 @@
notify:
- "restart edxapp"
- "restart workers"
tags:
- deploy
- name: syncdb and migrate
shell: >
......@@ -40,6 +42,8 @@
notify:
- "restart edxapp"
- "restart workers"
tags:
- deploy
handlers:
- name: restart edxapp
......
......@@ -24,6 +24,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -39,6 +40,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# A simple utility play to remove a public key from the authorized key
# file for the ubuntu user
# You must pass in the entire line that you are adding
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
when: line_count.stdout|int != 1
# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml
# -e@/vars/env.yml --limit Terminating_Wait
#
# This is separate because it's use of handlers
# leads to various race conditions.
#
- name: Stop all services
hosts: Terminating_Wait
sudo: True
gather_facts: False
vars:
STOP_ALL_EDX_SERVICES_EXTRA_ARGS: "--no-wait"
roles:
- stop_all_edx_services
- name: Server retirement workflow
hosts: Terminating_Wait
sudo: True
gather_facts: False
tasks:
- name: Force a log rotation
command: /usr/sbin/logrotate -f /etc/logrotate.d/{{ item }}
with_items:
- "apport"
- "apt"
- "aptitude"
- "dpkg"
- "hourly"
- "landscape-client"
- "newrelic-sysmond"
- "nginx"
- "nginx-access"
- "nginx-error"
- "ppp"
- "rsyslog"
- "ufw"
- "unattended-upgrades"
- "upstart"
- name: Force a log rotation
command: /usr/sbin/logrotate -f /etc/logrotate.d/hourly/{{ item }}
with_items:
- "tracking.log"
- "edx-services"
- name: Terminate existing s3 log sync
command: /usr/bin/pkill send-logs-to-s3 || true
- name: Send logs to s3
command: /edx/bin/send-logs-to-s3
- name: Run minos verification
hosts: Terminating_Wait
sudo: True
gather_facts: False
tasks:
- name: Run minos
command: /edx/app/minos/venvs/bin/minos --config /edx/etc/minos/minos.yml --json
- name: Deploy snort IDS
hosts: all
sudo: True
gather_facts: True
roles:
- snort
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- name: Deploy Splunk
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: True
ENABLE_NEWRELIC: True
roles:
- datadog
- splunkforwarder
- newrelic
......@@ -19,6 +19,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
......@@ -33,6 +34,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
......@@ -12,3 +12,6 @@
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: minos
when: COMMON_ENABLE_MINOS
\ No newline at end of file
......@@ -21,6 +21,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -45,6 +46,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -25,6 +25,7 @@ ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null
ALTON_AWS_CREDENTIALS: !!null
# Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null
......@@ -55,6 +56,7 @@ alton_environment:
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
BOTO_CONFIG: "{{ alton_app_dir }}/.boto"
#
# OS packages
......
- name: configure the boto profiles for alton
template: >
src="boto.j2"
dest="{{ alton_app_dir }}/.boto"
owner="{{ alton_user }}"
group="{{ common_web_user }}"
mode="0640"
notify: restart alton
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
register: alton_checkout
notify: restart alton
- name: install the requirements
......@@ -55,3 +65,5 @@
state=started
when: not disable_edx_services
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
tags:
- deploy
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:alton" : "{{ alton_source_repo }} {{ alton_checkout.after |truncate(7,True,'')}}"
when: alton_checkout.after is defined
tags:
- deploy
{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
......@@ -15,7 +15,7 @@ ANALYTICS_API_GIT_IDENTITY: !!null
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
ANALYTICS_API_NEWRELIC_APPNAME: "your Newrelic appname"
ANALYTICS_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api"
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
......@@ -58,6 +58,7 @@ ANALYTICS_API_CONFIG:
HOST: 'localhost'
PORT: '3306'
ANALYTICS_API_GUNICORN_WORKERS: "2"
#
# vars are namespace with the module name.
#
......@@ -79,7 +80,6 @@ analytics_api_code_dir: "{{ analytics_api_app_dir }}/edx-analytics-data-api"
analytics_api_conf_dir: "{{ analytics_api_home }}"
analytics_api_gunicorn_host: "127.0.0.1"
analytics_api_gunicorn_port: "8100"
analytics_api_gunicorn_workers: "8"
analytics_api_gunicorn_timeout: "300"
analytics_api_django_settings: "production"
......
......@@ -21,3 +21,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
\ No newline at end of file
......@@ -57,7 +57,7 @@
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }} --create-user
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }}
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
......@@ -109,3 +109,6 @@
- name: remove read-only ssh key for the content repo
file: path={{ analytics_api_git_identity_file }} state=absent
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:analytics_api" : "{{ analytics_api_source_repo }} {{ analytics_api_code_checkout.after |truncate(7,True,'')}}"
when: analytics_api_code_checkout.after is defined
......@@ -2,17 +2,17 @@
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
{% set executable = analytics_api_venv_bin + '/newrelic-admin run-program ' + analytics_api_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = analytics_api_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
export NEW_RELIC_APP_NAME="{{ ANALYTICS_API_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ analytics_api_gunicorn_workers }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
......@@ -18,7 +18,7 @@ AS_DB_ANALYTICS_HOST: 'localhost'
AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
AS_WORKERS: '2'
# add public keys to enable the automator user
# for running manage.py commands
......
......@@ -18,7 +18,7 @@ ANALYTICS_DB_ANALYTICS_HOST: 'localhost'
ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '4'
ANALYTICS_WORKERS: '2'
DATABASES:
default: &databases_default
......
......@@ -27,12 +27,12 @@ AWS_S3_LOGS_FROM_EMAIL: dummy@example.com
# You should be overriding the environment and deployment vars
# Order of precedence is left to right for exclude and include options
AWS_S3_LOG_PATHS:
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/tracking"
path: "{{ COMMON_LOG_DIR }}/tracking/*"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/application"
path: "{{ COMMON_LOG_DIR }}/!(*tracking*)"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/system"
path: "/var/log/*"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-tracking-logs"
path: "{{ COMMON_LOG_DIR }}/*tracking*"
#
# vars are namespace with the module name.
......@@ -45,7 +45,8 @@ aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
aws_region: "us-east-1"
# default path to the aws binary
aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
s3cmd_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
aws_cmd: "/usr/local/bin/aws"
#
# OS packages
#
......@@ -55,8 +56,8 @@ aws_debian_pkgs:
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
- boto==2.29.1
- awscli==1.4.2
- boto==2.32.0
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
......
......@@ -84,18 +84,19 @@
dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }}
when: AWS_S3_LOGS
- name: run s3 log sync script on shutdown
file: >
state=link
src={{ COMMON_BIN_DIR }}/send-logs-to-s3
path=/etc/rc0.d/S00send-logs-to-s3
- name: run s3 log sync script on supervisor shutdown
template: >
src=etc/init/sync-on-stop.conf.j2
dest=/etc/init/sync-on-stop.conf
owner=root group=root mode=0644
when: AWS_S3_LOGS
# cron job runs the aws s3 sync script
# this job will log its output to /var/log/aws
- name: cronjob for s3 log sync
cron: >
name="cronjob for s3 log sync"
user=root
minute=0
job={{ aws_s3_sync_script }}
cron:
name: "cronjob for s3 log sync"
user: root
minute: 0
job: "{{ aws_s3_sync_script }} > /dev/null 2>&1"
when: AWS_S3_LOGS
---
aws_profile: !!null
s3_bucket: 'edx-prod-edx'
bucket_path: 'test'
voters:
- BellwetherVoter:
config:
- ProccessQuienscenceVoter:
config:
process_name: 'gunicorn'
- TrackingLogVoter:
config:
aws_profile: !!null
s3_bucket: 'edx-prod-edx'
bucket_path: 'test'
local_directory: '{{ COMMON_LOG_DIR }}'
start on stopped supervisor
description "sync s3 logs on supervisor shutdown"
script
/bin/bash {{ aws_s3_sync_script }}
end script
......@@ -13,6 +13,10 @@ fi
exec > >(tee "{{ aws_s3_logfile }}")
exec 2>&1
# s3cmd sync requires a valid home
# directory
export HOME=/
shopt -s extglob
usage() {
......@@ -90,5 +94,5 @@ region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%}
$noop {{ aws_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
$noop {{ s3cmd_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %}
# browsermob-proxy
browsermob_proxy_version: '2.0-beta-9'
browsermob_proxy_url: 'https://s3-us-west-1.amazonaws.com/lightbody-bmp/browsermob-proxy-{{ browsermob_proxy_version }}-bin.zip'
#!/bin/sh
/etc/browsermob-proxy/bin/browsermob-proxy
# Install browsermob-proxy, which is used for page performance testing with bok-choy
---
- name: get zip file
get_url: >
url={{ browsermob_proxy_url }}
dest=/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
register: download_browsermob_proxy
- name: unzip into /var/tmp/
shell: >
unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
chdir=/var/tmp
when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/
shell: >
mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of main script
file: >
path=/etc/browsermob-proxy/bin/browsermob-proxy
mode=0755
when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy
copy: >
src=browsermob-proxy
dest=/usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of wrapper script
file: >
path=/usr/local/bin/browsermob-proxy
mode=0755
when: download_browsermob_proxy.changed
......@@ -15,8 +15,8 @@ CERTS_QUEUE_URL: "http://localhost:18040"
CERTS_BUCKET: ""
# basic auth credentials for connecting
# to the xqueue server
CERTS_XQUEUE_AUTH_USER: "edx"
CERTS_XQUEUE_AUTH_PASS: "edx"
CERTS_XQUEUE_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}"
CERTS_XQUEUE_AUTH_PASS: "{{ COMMON_HTPASSWD_PASS }}"
# credentials for connecting to the xqueue server
CERTS_QUEUE_USER: "lms"
CERTS_QUEUE_PASS: "password"
......
......@@ -52,6 +52,7 @@
sudo_user: "{{ certs_user }}"
environment:
GIT_SSH: "{{ certs_git_ssh }}"
register: certs_checkout
notify: restart certs
- name: remove read-only ssh key for the certs repo
......@@ -96,4 +97,7 @@
- python
- pip
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: certs_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:certs" : "{{ CERT_REPO }} {{ certs_checkout.after|truncate(7,True,'') }}"
when: certs_checkout.after is defined
......@@ -53,8 +53,14 @@ COMMON_MYSQL_MIGRATE_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_NGINXTRA: False
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
# enables app reporting, you must enable newrelic
# as well
COMMON_ENABLE_NEWRELIC_APP: False
COMMON_ENABLE_MINOS: False
COMMON_TAG_EC2_INSTANCE: False
common_debian_pkgs:
- ntp
- ack-grep
......@@ -103,3 +109,4 @@ common_debian_variants:
common_redhat_variants:
- CentOS
- Red Hat Enterprise Linux
- Amazon
......@@ -60,6 +60,12 @@
owner=root group=root mode=644
notify: restart rsyslogd
- name: Remove the default rsyslog configuration
file:
path=/etc/rsyslog.d/50-default.conf
state=absent
notify: restart rsyslogd
# This is in common to keep all logrotation config
# in the same role
- name: Create hourly subdirectory in logrotate.d
......
*******************************************************************
* *
* _ __ __ *
* _ _| |\ \/ / *
* / -_) _` | > < *
* \___\__,_|/_/\_\ *
......
......@@ -32,7 +32,7 @@ $template tracking,"%syslogtag%%msg%\n"
$template DynaFile,"{{ COMMON_LOG_DIR }}/%syslogtag:R,ERE,1,BLANK:\[service_variant=([a-zA-Z_-]*)\].*--end%/edx.log"
local0.* -?DynaFile
local1.* {{ COMMON_LOG_DIR }}/tracking.log;tracking
local1.* {{ COMMON_LOG_DIR }}/tracking/tracking.log;tracking
#cron.* /var/log/cron.log
#daemon.* -/var/log/daemon.log
kern.* -/var/log/kern.log
......
{{ COMMON_LOG_DIR }}/tracking.log {
{{ COMMON_LOG_DIR }}/tracking/tracking.log {
compress
create
dateext
......
......@@ -50,6 +50,7 @@ repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'"
if [[ -z $1 || -z $2 ]]; then
......
......@@ -29,12 +29,14 @@
# Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}}
git: >
dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
dest={{edxapp_code_dir}}
repo={{edx_platform_repo}}
version={{edx_platform_version}}
accept_hostkey=yes
register: chkout
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_platform_checkout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -48,12 +50,15 @@
- name: checkout theme
git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}}
repo={{edxapp_theme_source_repo}}
version={{edxapp_theme_version}}
accept_hostkey=yes
when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_theme_checkout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -106,16 +111,26 @@
- "restart edxapp_workers"
# Set the npm registry
# This needs to be done as root since npm is weird about
# chown - https://github.com/npm/npm/issues/3565
- name: Set the npm registry
shell:
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Set the npm registry permissions
- name: Set the npm registry permissions
file:
path="{{ edxapp_app_dir }}/.npmrc"
owner=edxapp group=edxapp
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Node play that need to be run after platform updates.
- name: Install edx-platform npm dependencies
shell: npm install chdir={{ edxapp_code_dir }}
......@@ -183,6 +198,25 @@
- "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the python custom requirements into {{ edxapp_venv_dir }}
- stat: path="{{ custom_requirements_file }}"
register: custom_requirements
sudo_user: "{{ edxapp_user }}"
- name : install python custom-requirements
pip: >
requirements="{{ custom_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
when: custom_requirements.stat.exists and new.stat.md5 != inst.stat.md5
# Install the final python modules into {{ edxapp_venv_dir }}
- name : install python post-post requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
......@@ -248,33 +282,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
......@@ -355,6 +362,44 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=30
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url:
url="{{ s3_one_time_url.url }}"
dest="{{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}"
mode=0600
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf {{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
# This currently has to be done because
# the course coffescript is compiled on the fly
# by the application after startup.
# See VPC-117 and VPC-122
- name: make the course data web user writable
file:
path="{{ edxapp_course_data_dir }}"
state=directory
recurse=yes
owner="{{ common_web_user }}"
group="{{ edxapp_user }}"
# creates the supervisor jobs for the
# service variants configured, runs
# gather_assets and db migrations
......@@ -363,6 +408,10 @@
- service_variant_config
- deploy
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
......@@ -417,4 +466,7 @@
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: edxapp_installed=true
......@@ -27,6 +27,19 @@
- "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
# This is a symlink that has to exist because
# we currently can't override the DATA_DIR var
# in edx-platform. TODO: This can be removed once
# VPC-122 is closed
- name: make the course data dir
file:
src="{{ edxapp_course_data_dir }}"
dest="{{ edxapp_legacy_course_data_dir }}"
state=link
owner="{{ edxapp_user }}"
group="{{ common_web_group }}"
- name: create edxapp log dir
file: >
path="{{ edxapp_log_dir }}" state=directory
......@@ -47,8 +60,12 @@
- "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}"
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ edxapp_chrislea_ppa }}"
- name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present update_cache=yes
notify:
- "restart edxapp"
- "restart edxapp_workers"
......
......@@ -79,12 +79,13 @@
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS
{{ edxapp_venv_bin}}/python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS and item != "lms-preview"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
sudo_user: "{{ edxapp_user }}"
with_items: service_variants_enabled
notify:
- "restart edxapp"
- "restart edxapp_workers"
......
---
- name: get instance information
action: ec2_facts
- name: tag instance with edx_platform version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edx_platform" : "{{ edx_platform_repo }} {{ edxapp_platform_checkout.after|truncate(7,True,'') }}"
when: edxapp_platform_checkout.after is defined
- name: tag instance with edxapp theme version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edxapp_theme" : "{{ edxapp_theme_source_repo }} {{ edxapp_theme_checkout.after|truncate(7,True,'') }}"
when: edxapp_theme_checkout.after is defined
......@@ -20,9 +20,11 @@
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.course}}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.course}}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
......@@ -56,13 +58,8 @@
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} --exclude ".git" {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
......
{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in cms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do cms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_auth_config | to_nice_json }}
[program:cms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC_APP -%}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' -%}
{% else -%}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' -%}
{% endif -%}
{% if EDXAPP_CMS_MAX_REQ -%}
{% set max_req = '--max-requests ' + EDXAPP_CMS_MAX_REQ|string -%}
{% else -%}
{% set max_req = '' -%}
{% endif -%}
{% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %}
{% if EDXAPP_WORKERS -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ EDXAPP_WORKERS.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif -%}
{% endif -%}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do cms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{% for key, value in cms_env_config.iteritems() %}
{% if value == 'None' %}
{% do cms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_env_config | to_nice_json }}
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in lms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do lms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_auth_config | to_nice_json }}
[program:lms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC_APP -%}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' -%}
{% else -%}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' -%}
{% endif -%}
{% if EDXAPP_LMS_MAX_REQ -%}
{% set max_req = '--max-requests ' + EDXAPP_LMS_MAX_REQ|string -%}
{% else -%}
{% set max_req = '' -%}
{% endif -%}
{% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% if EDXAPP_WORKERS -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ EDXAPP_WORKERS.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{% for key, value in lms_env_config.iteritems() %}
{% if value == 'None' %}
{% do lms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_env_config | to_nice_json }}
......@@ -7,9 +7,10 @@ directory={{ edxapp_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.{{ ansible_hostname }} --concurrency={{ w.concurrency }}
command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }}
killasgroup=true
stopasgroup=true
stopwaitsecs=432000
{% endfor %}
......
......@@ -16,6 +16,11 @@ path.logs: {{elasticsearch_log_dir}}
#
bootstrap.mlockall: true
# Disable dynamic scripting as it is insecure and we don't use it
# See: http://bouk.co/blog/elasticsearch-rce/
# CVE: CVE-2014-3120
script.disable_dynamic: true
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
......@@ -37,4 +42,4 @@ bootstrap.mlockall: true
discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}']
{% endif -%}
\ No newline at end of file
{% endif -%}
......@@ -30,7 +30,7 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_NEW_RELIC_APP_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-forum"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
......
......@@ -34,6 +34,7 @@
dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
accept_hostkey=yes
sudo_user: "{{ forum_user }}"
register: forum_checkout
notify: restart the forum service
# TODO: This is done as the common_web_user
......@@ -66,4 +67,7 @@
- include: test.yml tags=deploy
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: forum_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}"
when: forum_checkout.after is defined
......@@ -27,15 +27,19 @@ JENKINS_ADMIN_AWS_CREDENTIALS: !!null
jenkins_admin_role_name: jenkins_admin
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
#
# OS packages
#
jenkins_admin_debian_repos:
- "deb http://cosmos.cites.illinois.edu/pub/ubuntu/ precise-backports main universe"
jenkins_admin_debian_pkgs:
# These are copied from the edxapp
# role so that we can create virtualenvs
# on the jenkins server for edxapp
- npm
# for compiling the virtualenv
# (only needed if wheel files aren't available)
- build-essential
......@@ -58,7 +62,7 @@ jenkins_admin_debian_pkgs:
# misc
- curl
- ipython
- npm
- nodejs
- ntp
# for shapely
- libgeos-dev
......@@ -73,6 +77,8 @@ jenkins_admin_debian_pkgs:
- ruby1.9.1
# for check-migrations
- mysql-client
# for aws cli scripting
- jq
jenkins_admin_gem_pkgs:
# for generating status.edx.org
......@@ -85,55 +91,69 @@ jenkins_admin_plugins:
- { name: "rebuild", version: "1.21" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "build-token-root", version: "1.1" }
- { name: "matrix-auth", version: "1.0.2" }
- { name: "mailer", version: "1.5" }
- { name: "external-monitor-job", version: "1.1" }
- { name: "ldap", version: "1.2" }
- { name: "pam-auth", version: "1.0" }
- { name: "matrix-auth", version: "1.2" }
- { name: "matrix-project", version: "1.3" }
- { name: "mailer", version: "1.9" }
- { name: "ldap", version: "1.10.2" }
- { name: "pam-auth", version: "1.1" }
- { name: "ant", version: "1.2" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "credentials", version: "1.8.3" }
- { name: "ssh-credentials", version: "1.5.1" }
- { name: "ssh-agent", version: "1.3" }
- { name: "token-macro", version: "1.8.1" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "build-user-vars-plugin", version: "1.3" }
- { name: "credentials", version: "1.15" }
- { name: "ssh-credentials", version: "1.7.1" }
- { name: "ssh-agent", version: "1.4.1" }
- { name: "token-macro", version: "1.10" }
- { name: "parameterized-trigger", version: "2.25" }
- { name: "multiple-scms", version: "0.3" }
- { name: "git", version: "1.5.0" }
- { name: "git", version: "2.2.2" }
- { name: "thinBackup", version: "1.7.4" }
- { name: "maven-plugin", version: "2.0" }
- { name: "build-token-root", version: "1.0" }
- { name: "maven-plugin", version: "2.5" }
- { name: "copy-project-link", version: "1.2" }
- { name: "scriptler", version: "2.6.1" }
- { name: "rebuild", version: "1.20" }
- { name: "ssh-slaves", version: "1.4" }
- { name: "translation", version: "1.10" }
- { name: "rebuild", version: "1.21" }
- { name: "ssh-slaves", version: "1.6" }
- { name: "translation", version: "1.11" }
- { name: "dynamicparameter", version: "0.2.0" }
- { name: "hipchat", version: "0.1.5" }
- { name: "throttle-concurrents", version: "1.8.2" }
- { name: "hipchat", version: "0.1.6" }
- { name: "throttle-concurrents", version: "1.8.3" }
- { name: "mask-passwords", version: "2.7.2" }
- { name: "jquery", version: "1.7.2-1" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "build-pipeline-plugin", version: "1.4" }
- { name: "dashboard-view", version: "2.9.4" }
- { name: "build-pipeline-plugin", version: "1.4.3" }
- { name: "s3", version: "0.5" }
- { name: "tmpcleaner", version: "1.1" }
- { name: "jobConfigHistory", version: "2.4" }
- { name: "build-timeout", version: "1.11" }
- { name: "next-build-number", version: "1.0" }
- { name: "nested-view", version: "1.10" }
- { name: "timestamper", version: "1.5.7" }
- { name: "github-api", version: "1.44" }
- { name: "jobConfigHistory", version: "2.8" }
- { name: "build-timeout", version: "1.14" }
- { name: "next-build-number", version: "1.1" }
- { name: "nested-view", version: "1.14" }
- { name: "timestamper", version: "1.5.14" }
- { name: "github-api", version: "1.55" }
- { name: "postbuild-task", version: "1.8" }
- { name: "cobertura", version: "1.9.2" }
- { name: "notification", version: "1.5" }
- { name: "violations", version: "0.7.11" }
- { name: "copy-to-slave", version: "1.4.3" }
- { name: "github", version: "1.8" }
- { name: "copyartifact", version: "1.28" }
- { name: "shiningpanda", version: "0.20" }
- { name: "htmlpublisher", version: "1.2" }
- { name: "github-oauth", version: "0.14" }
- { name: "github", version: "1.9.1" }
- { name: "copyartifact", version: "1.31" }
- { name: "shiningpanda", version: "0.21" }
- { name: "htmlpublisher", version: "1.3" }
- { name: "github-oauth", version: "0.19" }
- { name: "build-name-setter", version: "1.3" }
- { name: "ec2", version: "1.19" }
- { name: "ec2", version: "1.23" }
- { name: "jenkins-flowdock-plugin", version: "1.1.3" }
- { name: "simple-parameterized-builds-report", version: "1.3" }
jenkins_admin_jobs:
- 'backup-jenkins'
# Supervisor related settings
jenkins_supervisor_user: "{{ jenkins_user }}"
jenkins_supervisor_app_dir: "{{ jenkins_home }}/supervisor"
jenkins_supervisor_cfg_dir: "{{ jenkins_supervisor_app_dir }}/conf.d"
jenkins_supervisor_available_dir: "{{ jenkins_supervisor_app_dir }}/available.d"
jenkins_supervisor_data_dir: "{{ jenkins_home }}/supervisor/data"
jenkins_supervisor_cfg: "{{ jenkins_supervisor_app_dir }}/supervisord.conf"
jenkins_supervisor_log_dir: "{{ COMMON_LOG_DIR }}/supervisor/jenkins"
jenkins_supervisor_venv_dir: "{{ jenkins_home }}/venvs/supervisor"
jenkins_supervisor_venv_bin: "{{ jenkins_supervisor_venv_dir }}/bin"
jenkins_supervisor_ctl: "{{ jenkins_supervisor_venv_bin }}/supervisorctl"
jenkins_supervisor_service_user: "{{ jenkins_user }}"
jenkins_admin_scripts_dir: "{{ jenkins_home }}/scripts"
#!/bin/bash -x
# This script will monitor two NATs and route to a backup nat
# if the primary fails.
set -e
# Health Check variables
Num_Pings=3
Ping_Timeout=2
Wait_Between_Pings=2
Wait_for_Instance_Stop=60
Wait_for_Instance_Start=300
ID_UPDATE_INTERVAL=150
send_message() {
message_file=/var/tmp/message-$$.json
message_string=$1
if [ -z $message_string ]; then
message_string="Unknown error for $VPC_NAME NAT monitor"
fi
message_body=$2
cat << EOF > $message_file
{"Subject":{"Data":"$message_string"},"Body":{"Text":{"Data": "$message_body"}}}
EOF
echo `date` "-- $message_body"
BASE_PROFILE=$AWS_DEFAULT_PROFILE
export AWS_DEFAULT_PROFILE=$AWS_MAIL_PROFILE
aws ses send-email --from $NAT_MONITOR_FROM_EMAIL --to $NAT_MONITOR_TO_EMAIL --message file://$message_file
export AWS_DEFAULT_PROFILE=$BASE_PROFILE
}
trap send_message ERR SIGHUP SIGINT SIGTERM
# Determine the NAT instance private IP so we can ping the other NAT instance, take over
# its route, and reboot it. Requires EC2 DescribeInstances, ReplaceRoute, and Start/RebootInstances
# permissions. The following example EC2 Roles policy will authorize these commands:
# {
# "Statement": [
# {
# "Action": [
# "ec2:DescribeInstances",
# "ec2:CreateRoute",
# "ec2:ReplaceRoute",
# "ec2:StartInstances",
# "ec2:StopInstances"
# ],
# "Effect": "Allow",
# "Resource": "*"
# }
# ]
# }
COUNTER=0
echo `date` "-- Running NAT monitor"
while [ . ]; do
# Re check thi IDs and IPs periodically
# This is useful in case the primary nat changes by some
# other means than this script.
if [ $COUNTER -eq 0 ]; then
# NAT instance variables
PRIMARY_NAT_ID=`aws ec2 describe-route-tables --filters Name=tag:aws:cloudformation:stack-name,Values=$VPC_NAME Name=tag:aws:cloudformation:logical-id,Values=PrivateRouteTable | jq '.RouteTables[].Routes[].InstanceId|strings' -r`
BACKUP_NAT_ID=`aws ec2 describe-instances --filters Name=tag:aws:cloudformation:stack-name,Values=$VPC_NAME Name=tag:aws:cloudformation:logical-id,Values=NATDevice,BackupNATDevice | jq '.Reservations[].Instances[].InstanceId' -r | grep -v $PRIMARY_NAT_ID`
NAT_RT_ID=`aws ec2 describe-route-tables --filters Name=tag:aws:cloudformation:stack-name,Values=$VPC_NAME Name=tag:aws:cloudformation:logical-id,Values=PrivateRouteTable | jq '.RouteTables[].RouteTableId' -r`
# Get the primary NAT instance's IP
PRIMARY_NAT_IP=`aws ec2 describe-instances --instance-ids $PRIMARY_NAT_ID | jq -r ".Reservations[].Instances[].PrivateIpAddress"`
BACKUP_NAT_IP=`aws ec2 describe-instances --instance-ids $BACKUP_NAT_ID | jq -r ".Reservations[].Instances[].PrivateIpAddress"`
let "COUNTER += 1"
let "COUNTER %= $ID_UPDATE_INTERVAL"
fi
# Check the health of both instances.
primary_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $PRIMARY_NAT_IP| grep time= | wc -l`
if [ "$primary_pingresult" == "0" ]; then
backup_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $BACKUP_NAT_IP| grep time= | wc -l`
if [ "$backup_pingresult" == "0" ]; then
send_message "Error monitoring NATs for $VPC_NAME." "ERROR -- Both NATs($PRIMARY_NAT_ID and $BACKUP_NAT_ID) were unreachable."
else #Backup nat is healthy.
send_message "Primary $VPC_NAME NAT failed ping" "-- NAT($PRIMARY_NAT_ID) heartbeat failed, consider using $BACKUP_NAT_ID for $NAT_RT_ID default route
Command for re-routing:
aws ec2 replace-route --route-table-id $NAT_RT_ID --destination-cidr-block 0.0.0.0/0 --instance-id $BACKUP_NAT_ID"
fi
else
echo `date` "-- PRIMARY NAT ($PRIMARY_NAT_ID $PRIMARY_NAT_IP) reports healthy to pings"
sleep $Wait_Between_Pings
fi
done
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role jenkins_admin
#
# Overview:
#
# Have to use shell here because supervisorctl doesn't support
# process groups.
- name: restart nat monitor
shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} restart nat_monitor:*"
when: not disable_edx_services
......@@ -20,4 +20,16 @@
# }
dependencies:
- common
- jenkins_master
- aws
- role: jenkins_master
jenkins_plugins: $jenkins_admin_plugins
- role: supervisor
supervisor_app_dir: "{{ jenkins_supervisor_app_dir }}"
supervisor_data_dir: "{{ jenkins_supervisor_data_dir }}"
supervisor_log_dir: "{{ jenkins_supervisor_log_dir }}"
supervisor_venv_dir: "{{ jenkins_supervisor_venv_dir }}"
supervisor_service_user: "{{ jenkins_supervisor_user }}"
supervisor_available_dir: "{{ jenkins_supervisor_available_dir }}"
supervisor_cfg_dir: "{{ jenkins_supervisor_cfg_dir }}"
supervisor_service: "supervisor.jenkins"
supervisor_http_bind_port: '9003'
......@@ -33,24 +33,13 @@
- fail: msg="JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins
- name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: jenkins_admin_plugins
- name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: jenkins_admin_plugins
- name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_admin_plugins
notify:
- restart Jenkins
- name: add admin specific apt repositories
apt_repository: repo="{{ item }}" state=present update_cache=yes
with_items: jenkins_admin_debian_repos
- name: create the scripts directory
file: path={{ jenkins_admin_scripts_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=755
- name: configure s3 plugin
template: >
......@@ -67,6 +56,24 @@
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
tags:
- aws-config
- name: create the .aws directory
file: path={{ jenkins_home }}/.aws state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
tags:
- aws-config
- name: configure the awscli profiles for jenkins
template: >
src="./{{ jenkins_home }}/aws_config.j2"
dest="{{ jenkins_home }}/.aws/config"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
tags:
- aws-config
- name: create the ssh directory
file: >
......@@ -108,9 +115,12 @@
mode=0644
with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: install system packages for edxapp virtualenvs
apt: pkg={{ item }} state=present
with_items: jenkins_admin_debian_pkgs
apt: pkg={{','.join(jenkins_admin_debian_pkgs)}} state=present update_cache=yes
# This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins
......@@ -127,3 +137,7 @@
version={{ item.version }}
user_install=no
with_items: jenkins_admin_gem_pkgs
- include: nat_monitor.yml
tags:
- nat-monitor
---
# Nat monitors should be defined as a list of dictionaries
# e.g.
# NAT_MONITORS:
# - vpc_name: 'loadtest-edx'
# region: 'us-east-1'
# deployment: 'edx'
#
# To receive E-mails, ses should be setup with the
# aws account that is defined by the JENKINS_ADMIN_MAIL_PROFILE
# and the from adress should be verified
# JENKINS_ADMIN_MAIL_PROFILE: 'aws_account_name'
# JENKINS_ADMIN_FROM_EMAIL: 'admin@example.com'
# JENKINS_ADMIN_TO_EMAIL: 'alert@example.com'
- fail: msg="NAT_MONITORS is not defined."
when: NAT_MONITORS is not defined
- name: upload the monitor script
copy:
dest="{{ jenkins_admin_scripts_dir }}/nat-monitor.sh"
src="nat-monitor.sh"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="755"
sudo_user: "{{ jenkins_user }}"
- name: create a supervisor config
template:
src="nat-monitor.conf.j2" dest="{{ jenkins_supervisor_available_dir }}/nat-monitor.conf"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
sudo_user: "{{ jenkins_user }}"
notify: restart nat monitor
- name: enable the supervisor config
file:
src="{{ jenkins_supervisor_available_dir }}/nat-monitor.conf"
dest="{{ jenkins_supervisor_cfg_dir }}/nat-monitor.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ jenkins_user }}"
when: not disable_edx_services
notify: restart nat monitor
- name: update supervisor configuration
shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
# Have to use shell here because supervisorctl doesn't support
# process groups.
- name: ensure nat monitor is started
shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} start nat_monitor:*"
when: not disable_edx_services
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
......@@ -27,7 +27,6 @@ mkdir -p $BUILD_ID/jobs
# Copy global configuration files into the workspace
cp $JENKINS_HOME/*.xml $BUILD_ID/
# Copy keys and secrets into the workspace
cp $JENKINS_HOME/identity.key $BUILD_ID/
cp $JENKINS_HOME/secret.key $BUILD_ID/
cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/
cp -r $JENKINS_HOME/secrets $BUILD_ID/
......
{% for m in NAT_MONITORS %}
[program:nat_monitor_{{ m.vpc_name|replace('-','_') }}]
environment=VPC_NAME="{{ m.vpc_name }}",AWS_DEFAULT_REGION="{{ m.region }}",AWS_DEFAULT_PROFILE="{{ m.deployment }}",AWS_MAIL_PROFILE="{{ JENKINS_ADMIN_MAIL_PROFILE }}",NAT_MONITOR_FROM_EMAIL="{{ JENKINS_ADMIN_FROM_EMAIL }}",NAT_MONITOR_TO_EMAIL="{{ JENKINS_ADMIN_TO_EMAIL }}"
user={{ jenkins_supervisor_service_user }}
directory={{ jenkins_admin_scripts_dir }}
stdout_logfile={{ jenkins_supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ jenkins_supervisor_log_dir }}/%(program_name)-stderr.log
command={{ jenkins_admin_scripts_dir }}/nat-monitor.sh
killasgroup=true
stopasgroup=true
{% endfor %}
[group:nat_monitor]
programs={%- for m in NAT_MONITORS %}nat_monitor_{{ m.vpc_name|replace('-','_') }}{%- if not loop.last %},{%- endif %}{%- endfor %}
......@@ -4,7 +4,7 @@ jenkins_group: "edx"
jenkins_server_name: "jenkins.testeng.edx.org"
jenkins_port: 8080
jenkins_version: 1.538
jenkins_version: 1.574
jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_{{ jenkins_version }}_all.deb"
jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb"
......@@ -17,7 +17,7 @@ jenkins_plugins:
- { name: "copy-to-slave", version: "1.4.3" }
- { name: "credentials", version: "1.8.3" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "ec2", version: "1.19" }
- { name: "ec2", version: "1.23" }
- { name: "github", version: "1.8" }
- { name: "github-api", version: "1.44" }
- { name: "github-oauth", version: "0.14" }
......@@ -28,10 +28,12 @@ jenkins_plugins:
- { name: "mailer", version: "1.5" }
- { name: "nested-view", version: "1.10" }
- { name: "next-build-number", version: "1.0" }
- { name: "node-iterator-api", version: "1.5" }
- { name: "notification", version: "1.5" }
- { name: "pam-auth", version: "1.0" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "postbuild-task", version: "1.8" }
- { name: "PrioritySorter", version: "2.8" }
- { name: "sauce-ondemand", version: "1.61" }
- { name: "s3", version: "0.5" }
- { name: "ssh-agent", version: "1.3" }
......@@ -45,6 +47,7 @@ jenkins_plugins:
- { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"}
- { name: "xunit", version: "1.89"}
jenkins_bundled_plugins:
- "credentials"
......
......@@ -23,13 +23,17 @@
# Should be resolved in the next release, but until then we need to do this
# https://issues.jenkins-ci.org/browse/JENKINS-20407
- name: workaround for JENKINS-20407
command: "mkdir -p /var/run/jenkins"
file:
path="/var/run/jenkins"
state=directory
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
- name: download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
shell: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: stop Jenkins
service: name=jenkins state=stopped
......@@ -57,8 +61,11 @@
shell: usermod -d {{jenkins_home}} {{jenkins_user}}
- name: make plugins directory
sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins
file:
path="{{ jenkins_home }}/plugins"
state=directory
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
......@@ -67,15 +74,18 @@
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: jenkins_plugins
register: jenkins_plugin_downloads
- name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: jenkins_plugins
command: cp {{ item.dest }} {{ jenkins_home }}/plugins/{{ item.item.name }}.hpi
with_items: jenkins_plugin_downloads.results
when: item.changed
- name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
file: path={{ jenkins_home }}/plugins/{{ item.item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_plugins
with_items: jenkins_plugin_downloads.results
when: item.changed
notify:
- restart Jenkins
......@@ -88,23 +98,26 @@
repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
accept_hostkey=yes
with_items: jenkins_custom_plugins
register: jenkins_custom_plugins_checkout
- name: compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }}
with_items: jenkins_custom_plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.item.repo_name }}
with_items: jenkins_custom_plugins_checkout.results
when: item.changed
- name: install custom plugins
command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }}
{{ jenkins_home }}/plugins/{{ item.package }}
with_items: jenkins_custom_plugins
command: mv /tmp/{{ item.item.repo_name }}/target/{{ item.item.package }}
{{ jenkins_home }}/plugins/{{ item.item.package }}
with_items: jenkins_custom_plugins_checkout.results
when: item.changed
notify:
- restart Jenkins
- name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }}
file: path={{ jenkins_home }}/plugins/{{ item.item.package }}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_custom_plugins
with_items: jenkins_custom_plugins_checkout.results
when: item.changed
# Plugins that are bundled with Jenkins are "pinned".
# Jenkins will overwrite updated plugins with its built-in version
......
......@@ -3,6 +3,9 @@ jenkins_user: "jenkins"
jenkins_group: "jenkins"
jenkins_home: /home/jenkins
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
# System packages
jenkins_debian_pkgs:
- build-essential
......@@ -15,7 +18,7 @@ jenkins_debian_pkgs:
- libxml2-dev
- libgeos-dev
- libxslt1-dev
- npm
- nodejs
- pkg-config
- gettext
......@@ -27,103 +30,5 @@ jenkins_ruby_version: "1.9.3-p374"
jscover_url: "http://files.edx.org/testeng/JSCover-1.0.2.zip"
jscover_version: "1.0.2"
# Python
jenkins_venv: "{{ jenkins_home }}/wheel_venv"
jenkins_pip: "{{ jenkins_venv }}/bin/pip"
jenkins_wheel_dir: "{{ jenkins_home }}/wheelhouse"
jenkins_wheels:
- { pkg: "numpy==1.6.2", wheel: "numpy-1.6.2-cp27-none-linux_x86_64.whl" }
- { pkg: "django-celery==3.0.17", wheel: "django_celery-3.0.17-py27-none-any.whl" }
- { pkg: "beautifulsoup4==4.1.3", wheel: "beautifulsoup4-4.1.3-py27-none-any.whl"}
- { pkg: "beautifulsoup==3.2.1", wheel: "BeautifulSoup-3.2.1-py27-none-any.whl" }
- { pkg: "bleach==1.4", wheel: "bleach-1.4-py27-none-any.whl" }
- { pkg: "html5lib==0.999", wheel: "html5lib-0.999-py27-none-any.whl" }
- { pkg: "boto==2.13.3", wheel: "boto-2.13.3-py27-none-any.whl" }
- { pkg: "celery==3.0.19", wheel: "celery-3.0.19-py27-none-any.whl" }
- { pkg: "dealer==0.2.3", wheel: "dealer-0.2.3-py27-none-any.whl" }
- { pkg: "django-countries==1.5", wheel: "django_countries-1.5-py27-none-any.whl" }
- { pkg: "django-filter==0.6.0", wheel: "django_filter-0.6-py27-none-any.whl" }
- { pkg: "django-followit==0.0.3", wheel: "django_followit-0.0.3-py27-none-any.whl" }
- { pkg: "django-kombu==0.9.4", wheel: "kombu-2.5.16-py27-none-any.whl" }
- { pkg: "django-mako==0.1.5pre", wheel: "django_mako-0.1.5pre-py27-none-any.whl" }
- { pkg: "django-model-utils==1.4.0", wheel: "django_model_utils-1.4.0-py27-none-any.whl" }
- { pkg: "django-masquerade==0.1.6", wheel: "django_masquerade-0.1.6-py27-none-any.whl" }
- { pkg: "django-mptt==0.5.5", wheel: "django_mptt-0.5.5-py27-none-any.whl" }
- { pkg: "django-openid-auth==0.4", wheel: "python_openid-2.2.5-py27-none-any.whl" }
- { pkg: "django-robots==0.9.1", wheel: "django_robots-0.9.1-py27-none-any.whl" }
- { pkg: "django-sekizai==0.6.1", wheel: "django_sekizai-0.6.1-py27-none-any.whl" }
- { pkg: "django-ses==0.4.1", wheel: "django_ses-0.4.1-py27-none-any.whl" }
- { pkg: "django-storages==1.1.5", wheel: "django_storages-1.1.5-py27-none-any.whl" }
- { pkg: "django-method-override==0.1.0", wheel: "django_method_override-0.1.0-py27-none-any.whl" }
- { pkg: "djangorestframework==2.3.5", wheel: "djangorestframework-2.3.5-py27-none-any.whl" }
- { pkg: "django==1.4.8", wheel: "Django-1.4.8-py27-none-any.whl" }
- { pkg: "feedparser==5.1.3", wheel: "feedparser-5.1.3-py27-none-any.whl" }
- { pkg: "fs==0.4.0", wheel: "fs-0.4.0-py27-none-any.whl" }
- { pkg: "GitPython==0.3.2.RC1", wheel: "GitPython-0.3.2.RC1-py27-none-any.whl" }
- { pkg: "glob2==0.3", wheel: "glob2-0.3-py27-none-any.whl" }
- { pkg: "gunicorn==0.17.4", wheel: "gunicorn-0.17.4-py27-none-any.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "lxml==3.0.1", wheel: "lxml-3.0.1-cp27-none-linux_x86_64.whl" }
- { pkg: "mako==0.9.1", wheel: "Mako-0.9.1-py2.py3-none-any.whl" }
- { pkg: "Markdown==2.2.1", wheel: "Markdown-2.2.1-py27-none-any.whl" }
- { pkg: "mongoengine==0.7.10", wheel: "mongoengine-0.7.10-py27-none-any.whl" }
- { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" }
- { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" }
- { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" }
- { pkg: "paramiko==1.9.0", wheel: "paramiko-1.9.0-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "Pillow==1.7.8", wheel: "Pillow-1.7.8-cp27-none-linux_x86_64.whl" }
- { pkg: "polib==1.0.3", wheel: "polib-1.0.3-py27-none-any.whl" }
- { pkg: "pycrypto>=2.6", wheel: "pycrypto-2.6.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pygments==1.6", wheel: "Pygments-1.6-py27-none-any.whl" }
- { pkg: "pygraphviz==1.1", wheel: "pygraphviz-1.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pymongo==2.4.1", wheel: "pymongo-2.4.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pyparsing==1.5.6", wheel: "pyparsing-1.5.6-py27-none-any.whl" }
- { pkg: "python-memcached==1.48", wheel: "python_memcached-1.48-py27-none-any.whl" }
- { pkg: "python-openid==2.2.5", wheel: "python_openid-2.2.5-py27-none-any.whl" }
- { pkg: "python-dateutil==2.1", wheel: "python_dateutil-2.1-py27-none-any.whl" }
- { pkg: "python-social-auth==0.1.21", wheel: "python_social_auth-0.1.21-py27-none-any.whl" }
- { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" }
- { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" }
- { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" }
- { pkg: "requests==1.2.3", wheel: "requests-1.2.3-py27-none-any.whl" }
- { pkg: "scipy==0.11.0", wheel: "scipy-0.11.0-cp27-none-linux_x86_64.whl" }
- { pkg: "Shapely==1.2.16", wheel: "Shapely-1.2.16-cp27-none-linux_x86_64.whl" }
- { pkg: "singledispatch==3.4.0.2", wheel: "singledispatch-3.4.0.2-py27-none-any.whl" }
- { pkg: "sorl-thumbnail==11.12", wheel: "sorl_thumbnail-11.12-py27-none-any.whl" }
- { pkg: "South==0.7.6", wheel: "South-0.7.6-py27-none-any.whl" }
- { pkg: "sympy==0.7.1", wheel: "sympy-0.7.1-py27-none-any.whl" }
- { pkg: "xmltodict==0.4.1", wheel: "xmltodict-0.4.1-py27-none-any.whl" }
- { pkg: "django-ratelimit-backend==0.6", wheel: "django_ratelimit_backend-0.6-py27-none-any.whl" }
- { pkg: "unicodecsv==0.9.4", wheel: "unicodecsv-0.9.4-py27-none-any.whl" }
- { pkg: "ipython==0.13.1", wheel: "ipython-0.13.1-py27-none-any.whl" }
- { pkg: "watchdog==0.6.0", wheel: "watchdog-0.6.0-py27-none-any.whl" }
- { pkg: "dogapi==1.2.1", wheel: "dogapi-1.2.1-py27-none-any.whl" }
- { pkg: "newrelic==2.4.0.4", wheel: "newrelic-2.4.0.4-cp27-none-linux_x86_64.whl" }
- { pkg: "sphinx==1.1.3", wheel: "Sphinx-1.1.3-py27-none-any.whl" }
- { pkg: "sphinx_rtd_theme==0.1.5", wheel: "sphinx_rtd_theme-0.1.5-py27-none-any.whl" }
- { pkg: "Babel==1.3", wheel: "Babel-1.3-py27-none-any.whl" }
- { pkg: "transifex-client==0.10", wheel: "transifex_client-0.10-py27-none-any.whl" }
- { pkg: "django_debug_toolbar", wheel: "django_debug_toolbar-0.11.0-py2.py3-none-any.whl" }
- { pkg: "django-debug-toolbar-mongo", wheel: "django_debug_toolbar_mongo-0.1.10-py27-none-any.whl" }
- { pkg: "chrono==1.0.2", wheel: "chrono-1.0.2-py2.py3-none-any.whl" }
- { pkg: "coverage==3.6", wheel: "coverage-3.6-cp27-none-linux_x86_64.whl" }
- { pkg: "ddt==0.7.1", wheel: "ddt-0.7.1-py27-none-any.whl" }
- { pkg: "django-crum==0.5", wheel: "django_crum-0.5-py27-none-any.whl" }
- { pkg: "django_nose==1.1", wheel: "django_nose-1.1-py27-none-any.whl" }
- { pkg: "factory_boy==2.1.2", wheel: "factory_boy-2.1.2-py27-none-any.whl" }
- { pkg: "freezegun==0.1.11", wheel: "freezegun-0.1.11-py27-none-any.whl" }
- { pkg: "mock==1.0.1", wheel: "mock-1.0.1-py27-none-any.whl" }
- { pkg: "nosexcover==1.0.7", wheel: "nosexcover-1.0.7-py27-none-any.whl" }
- { pkg: "pep8==1.4.5", wheel: "pep8-1.4.5-py27-none-any.whl" }
- { pkg: "pylint==0.28", wheel: "pylint-0.28.0-py27-none-any.whl" }
- { pkg: "python-subunit==0.0.16", wheel: "python_subunit-0.0.16-py27-none-any.whl" }
- { pkg: "rednose==0.3", wheel: "rednose-0.3-py27-none-any.whl" }
- { pkg: "selenium==2.39.0", wheel: "selenium-2.39.0-py27-none-any.whl" }
- { pkg: "splinter==0.5.4", wheel: "splinter-0.5.4-py27-none-any.whl" }
- { pkg: "testtools==0.9.34", wheel: "testtools-0.9.34-py27-none-any.whl" }
- { pkg: "Paver==1.2.1", wheel: "Paver-1.2.1-py27-none-any.whl" }
- { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "MySQL-python==1.2.5", wheel: "MySQL_python-1.2.5-cp27-none-linux_x86_64.whl" }
# packer direct download URL
packer_url: "https://dl.bintray.com/mitchellh/packer/0.6.1_linux_amd64.zip"
---
dependencies:
- common
- role: rbenv
rbenv_user: "{{ jenkins_user }}"
rbenv_dir: "{{ jenkins_home }}"
......
......@@ -8,7 +8,9 @@
# `jenkins_home`: /var/lib/jenkins
# `jenkins_user_home`: /home/jenkins
- include: packer.yml
- include: system.yml
- include: python.yml
- include: ruby.yml
- include: jscover.yml
- include: test.yml
---
- name: Download packer
get_url: url={{ packer_url }} dest=/var/tmp/packer.zip
- name: Unzip packer
unarchive: src=/var/tmp/packer.zip dest=/usr/local/bin copy=no
......@@ -17,45 +17,31 @@
owner=root group=root
mode=755
# Create wheelhouse to enable fast virtualenv creation
- name: Create wheel virtualenv
command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }}
# Create a virtualenv for edx-platform by installing the requirements
# and packaging the virtualenv.
# A shallow clone is created off of master. The depth setting
# refers to the --depth-setting of git clone. A value of 1
# will truncate all history prior to the last revision.
- name: Create shallow clone of edx-platform
git: >
repo=https://github.com/edx/edx-platform.git
dest={{ jenkins_home }}/shallow-clone
version=master
depth=1
sudo_user: "{{ jenkins_user }}"
- name: Install wheel
pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv
sudo_user: "{{ jenkins_user }}"
- name: Create wheelhouse dir
file:
path={{ jenkins_wheel_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# (need to install each one in the venv to satisfy dependencies)
- name: Create wheel archives
shell:
"{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" &&
{{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\"
creates={{ jenkins_wheel_dir }}/${item.wheel}"
sudo_user: "{{ jenkins_user }}"
with_items: jenkins_wheels
- name: Add wheel_venv.sh script
template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# Run the wheel_venv.sh script for the first time
# This was previously done in the Jenkins global
# configuration as part of the AMI Init script.
# Moving here so that we can archive a clean snapshot
# of the virtualenv with only the defined packages
# from jenkins_wheels.
- name: Run the wheel_venv.sh script
command: >
./wheel_venv.sh edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv
- name: Install edx-platform requirements
pip: >
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}
extra_args="--exists-action w"
virtualenv={{ jenkins_home }}/edx-venv
virtualenv_command=virtualenv-2.7
with_items:
- pre.txt
- github.txt
- base.txt
- post.txt
- paver.txt
sudo_user: "{{ jenkins_user }}"
# Archive the current state of the virtualenv
......@@ -66,5 +52,9 @@
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
# Remove the shallow-clone directory now that we archive
# done with it
- name: Remove shallow-clone
file: path={{ jenkins_home }}/shallow-clone state=absent
......@@ -26,6 +26,10 @@
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
......@@ -40,3 +44,16 @@
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
# Edit the /etc/hosts file so that the Preview button will work in Studio
- name: add preview.localhost to /etc/hosts
shell: sed -i -r 's/^127.0.0.1\s+.*$/127.0.0.1 localhost preview.localhost/' /etc/hosts
sudo: yes
# Npm registry must be pre-loaded or else setting it
# with the Jenkins user will fail
# See https://github.com/npm/npm/issues/3565
- name: Set npm registry
template:
src=.npmrc.j2 dest={{ jenkins_home }}/.npmrc
owner={{ jenkins_user }} group={{ jenkins_group }} mode=0664
---
# Tests for this role
# Set up #
# To get a baseline comparison for timestamp comparisons
# create a testfile and register its stat info
- name: Create test file
file: path=testfile state=touch
- name: Stat test file
stat: path=testfile
register: testfile
# Tests #
- name: Verify java cmd is using v 1.7
shell: java -version
register: java_version
- assert:
that:
- "'1.7.0' in java_version.stderr"
# The role is run with a github oauth token passed in
# as github_oauth_token var value.
# This test confirms that the key being used will work
- name: ensure github token works
shell:
"github_post_status.py edx edx-platform
dddac0b5dddf00c0950daf324e603e4935994954 success
https://jenkins.testeng.edx.org/ \"Tests Passed\""
# Run the github_pr_auth script to confirm it reports
# An expected error when there is nothing in the whitelist
- name: ensure github_pr_auth fails as expected
shell:
"github_pr_auth.py edx edx-platform 2498"
ignore_errors: True
register: pr_auth_result
- assert:
that:
- "'You can update the whitelist by' in '{{ pr_auth_result.stdout_lines[1] }}'"
# Run the github_pr_auth script with a value in the whitelist
# to ensure a passing run
- name: ensure github_pr_auth fails as expected
shell:
"export GITHUB_OWNER_WHITELIST=edx &&
github_pr_auth.py edx edx-platform 2498"
# Verify the virtualenv tar is newly-built
- name: Get info on virtualenv tar
stat: path={{ jenkins_home }}/edx-venv_clean.tar.gz
register: edxvenv
- assert:
that:
# Assert that it was modified at least within the hour
- "{{ testfile.stat.mtime }} - {{ edxvenv.stat.mtime }} < 3600"
# Tear Down #
- name: Remove test file
file: path=testfile state=absent
registry={{ COMMON_NPM_MIRROR_URL }}
#! /usr/bin/env bash
if [ $# -ne 1 ]; then
echo "Usage: $0 VENV_DIR"
exit 1
fi
# Create and activate the new virtualenv
VENV=$1
mkdir -p $VENV
/usr/local/bin/virtualenv $VENV
. $VENV/bin/activate
# Install each available wheel archive
ls {{ jenkins_wheel_dir }} | cut -d- -f1 | while read line ; do
pip install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} $line ;
done
......@@ -41,6 +41,8 @@
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ ami }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment