Commit 944e697a by Han Su Kim

Merge pull request #1495 from edx/rc/kifli

Release...rc/kifli
parents a76cf246 bc0a114a
......@@ -10,3 +10,9 @@ vagrant/*/devstack/edx-platform
vagrant/*/devstack/cs_comments_service
vagrant/*/devstack/ora
vagrant_ansible_inventory_default
### OS X artifacts
*.DS_Store
.AppleDouble
:2e_*
:2e#
......@@ -3,7 +3,7 @@ language: python
python:
- "2.7"
install:
- "sudo apt-get install -y npm python-demjson"
- "sudo apt-get install -y nodejs python-demjson"
- "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson"
script:
......@@ -24,9 +24,17 @@ script:
fi
done
- |
plays="aws bastion certs commoncluster common demo devpi discern edx_ansible edxapp elasticsearch forum ora rabbitmq worker xqueue xserver"
set -e
cd playbooks/edx-east
for play in $plays; do
ansible-playbook -i localhost, --syntax-check ${play}.yml
ROLE_DIRS=$(/bin/ls -d roles/*)
cat <<EOF >travis-test.yml
- name: Play to test all roles
hosts: all
roles:
EOF
for role_dir in $ROLE_DIRS; do
echo " - $(basename $role_dir)" >> travis-test.yml
done
ansible-playbook -i localhost, --syntax-check travis-test.yml
......@@ -32,3 +32,6 @@ Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com>
Ben Patterson <bpatterson@edx.org>
Jason Zhu <fmyzjs@gmail.com>
Rohit Karajgi <rohit.karajgi@gmail.com>
- Role: common
- We now remove the default syslog.d conf file (50-default.conf) this will
break people who have hand edited that file.
- Role: edxapp
- Updated the module store settings to match the new settings format.
- Role: analytics-api
- Added a new role for the analytics-api Django app. Currently a private repo
......@@ -18,3 +25,7 @@
- Update `CMS_HOSTNAME` default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
- Role: Mongo
- Fixed case of variable used in if block that breaks cluster configuration
by changing mongo_clustered to MONGO_CLUSTERED.
......@@ -2865,15 +2865,13 @@
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
},
"PropagateAtLaunch":true
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
}
],
"UserData": { "Fn::Base64" : { "Fn::Join" : ["", [
......
{
"AWSTemplateFormatVersion":"2010-09-09",
"Description":"Separate VPC for database clones and replicas.",
"Parameters":{
"EnvironmentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which environment to use during the configuration phase, e.g., stage, prod, sandbox",
"Default":"prod"
},
"DeploymentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, <university>, <org>",
"Default":"edx"
},
"KeyName":{
"Type":"String",
"Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server",
"Default":"deployment-201407"
},
"ClassB":{
"Default":"1",
"Description":"The second octet of the Class B to be allocated for this VPC. 10.?.xxx.xxx",
"Type":"Number",
"MinValue":"0",
"MaxValue":"255",
"ConstraintDescription":"ClassB value must be between 0 and 255."
}
},
"Mappings":{
"SubnetConfig":{
"VPC": { "CIDR":".0.0/16" },
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
"us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" },
"us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" },
"eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" },
"sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" },
"ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" },
"ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" },
"ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" }
}
},
"Resources":{
"EdxVPC":{
"Type":"AWS::EC2::VPC",
"Properties":{
"EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"InstanceTenancy":"default"
}
},
"Data01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-sanitized-dbs"
}
]
}
},
"Data02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-non-sanitized-clones"
}
]
}
},
"PrivateRouteTable":{
"Type":"AWS::EC2::RouteTable",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"PrivateSubnetRouteTableAssociationData01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationData02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateNetworkAcl":{
"Type":"AWS::EC2::NetworkAcl",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"InboundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"OutBoundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"PrivateSubnetNetworkAclAssociationData01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationData02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up access to the data subnet",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"3306",
"ToPort":"3306",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":"0.0.0.0/0"
}
]
}
},
"EdxDBSubnetGroup":{
"Type":"AWS::RDS::DBSubnetGroup",
"Properties":{
"DBSubnetGroupDescription":"Subnets available for the RDS DB Instance",
"SubnetIds":[
{
"Ref":"Data01"
},
{
"Ref":"Data02"
}
]
}
},
"DBSecurityGroup":{
"Type":"AWS::RDS::DBSecurityGroup",
"Properties":{
"EC2VpcId":{
"Ref":"EdxVPC"
},
"GroupDescription":"Data access"
}
}
}
}
......@@ -20,6 +20,7 @@ import os
import sys
import time
import json
import socket
try:
import boto.sqs
from boto.exception import NoAuthHandlerFound
......@@ -132,4 +133,12 @@ class CallbackModule(object):
# only keep the last 20 or so lines to avoid payload size errors
if len(payload[msg_type]['stdout_lines']) > 20:
payload[msg_type]['stdout_lines'] = ['(clipping) ... '] + payload[msg_type]['stdout_lines'][-20:]
while True:
try:
self.sqs.send_message(self.queue, json.dumps(payload))
break
except socket.gaierror as e:
print 'socket.gaierror will retry: ' + e
time.sleep(1)
except Exception as e:
raise e
......@@ -225,12 +225,16 @@ class Ec2Inventory(object):
cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if 'AWS_PROFILE' in os.environ:
aws_profile = "{}-".format(os.environ.get('AWS_PROFILE'))
else:
aws_profile = ""
self.cache_path_cache = cache_path + "/{}ansible-ec2.cache".format(aws_profile)
self.cache_path_tags = cache_path + "/{}ansible-ec2.tags.cache".format(aws_profile)
self.cache_path_index = cache_path + "/{}ansible-ec2.index".format(aws_profile)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
......
# A simple utility play to add a public key to the authorized key
# file for the ubuntu user.
# You must pass in the entire line that you are adding.
# Example: ansible-playbook add-ubuntu-key.yml -c local -i 127.0.0.1, \
# -e "public_key=deployment-201407" \
# -e owner=jarv -e keyfile=/home/jarv/.ssh/authorized_keys
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- name: Deploy aide IDS
hosts: all
sudo: True
gather_facts: True
roles:
- aide
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: all
sudo: True
serial: 1
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- shell: echo "rabbit@ip-{{ item|replace('.', '-') }}"
when: item != ansible_ec2_local_ipv4
with_items: hostvars.keys()
register: list
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster {{ item.stdout }}
when: item.stdout is defined
with_items: list.results
- command: rabbitmqctl start_app
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -22,25 +22,24 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: nginx
nginx_sites:
- xqueue
- xqueue
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
......@@ -51,6 +50,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......
......@@ -4,91 +4,6 @@
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
......@@ -108,6 +23,17 @@
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
- "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}"
- "EDXAPP_MONGO_PASS: {{ EDXAPP_MONGO_PASS }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
......
......@@ -30,17 +30,11 @@
ora_db_root_user: 'None'
discern_db_root_user: 'None'
vars_prompt:
# passwords use vars_prompt so they aren't in the
# bash history
- name: "db_root_pass"
prompt: "Password for root mysql user"
private: True
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- fail: msg="db_root_pass is not defined"
when: db_root_pass is not defined
- name: install python mysqldb module
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
sudo: yes
......
......@@ -4,12 +4,12 @@
gather_facts: False
vars:
keypair: continuous-integration
instance_type: m1.medium
security_group: sandbox
instance_type: t2.medium
security_group: sandbox-vpc
# ubuntu 12.04
ami: ami-d0f89fb9
ami: ami-f478849c
region: us-east-1
zone: us-east-1b
zone: us-east-1c
instance_tags:
environment: sandbox
github_username: temp
......@@ -21,6 +21,7 @@
dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp
elb: false
vpc_subnet_id: subnet-cd867aba
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
......@@ -33,6 +34,8 @@
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
terminate_instance: true
instance_profile_name: sandbox
......
......@@ -2,6 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars:
roles:
- aws
- role: nginx
......@@ -10,6 +11,9 @@
- cms
nginx_default_sites:
- lms
nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}"
nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}"
nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}"
- edxapp
- role: datadog
when: COMMON_ENABLE_DATADOG
......@@ -17,3 +21,5 @@
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: minos
when: COMMON_ENABLE_MINOS
......@@ -9,7 +9,7 @@
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }}
python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
......@@ -22,6 +22,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -38,6 +39,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -15,7 +15,6 @@
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles:
- common
- edxlocal
- mongo
- browsers
......
#!/usr/bin/env python
"""
Build an ansible inventory based on autoscaling group instance lifecycle state.
Outputs JSON to stdout with keys for each state and combination of autoscaling
group and state.
{
"InService": [
"10.0.47.127",
"10.0.46.174"
],
"Terminating:Wait": [
"10.0.48.104"
],
"e-d-CommonClusterServerAsGroup": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.48.104"
]
}
"""
import argparse
import boto
import json
from collections import defaultdict
class LifecycleInventory():
profile = None
def __init__(self, profile):
parser = argparse.ArgumentParser()
self.profile = profile
def get_instance_dict(self):
ec2 = boto.connect_ec2(profile_name=self.profile)
reservations = ec2.get_all_instances()
dict = {}
for instance in [i for r in reservations for i in r.instances]:
dict[instance.id] = instance
return dict
def run(self):
autoscale = boto.connect_autoscale(profile_name=self.profile)
groups = autoscale.get_all_groups()
instances = self.get_instance_dict()
inventory = defaultdict(list)
for group in groups:
for instance in group.instances:
private_ip_address = instances[instance.instance_id].private_ip_address
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
LifecycleInventory(args.profile).run()
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
vars:
roles:
- common
- minos
- name: Deploy MongoDB
hosts: all
sudo: True
gather_facts: True
roles:
- mongo
- mongo_mms
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# Example ansible-playbook -i redirect.example.com -e@/path/to/secure/var/file.yml
#
# the secure var file will need to have the following vars defined:
#
# NGINX_ENABLE_SSL
# NGINX_SSL_CERTIFICATE
# NGINX_SSL_KEY
# # for the redirects use $scheme://example.com to match the protocol
#
# secure vars example:
# # Vars for setting up the nginx redirect instance
# NGINX_ENABLE_SSL: True
# NGINX_SSL_CERTIFICATE: '../../../example-secure/ssl/example.com.crt'
# NGINX_SSL_KEY: '../../../example-secure/ssl/example.com.key'
# nginx_redirects:
# - server_name: nginx-redirect.example.edx.org
# redirect: "http://www.example.com"
# - server_name: example.com
# redirect: "http://www.example.com"
# default: true
#
#
#
# - ...
- name: utility play to setup an nginx redirect
hosts: all
sudo: True
gather_facts: True
roles:
- role: nginx
nginx_sites:
- nginx_redirect
......@@ -29,6 +29,8 @@
notify:
- "restart edxapp"
- "restart workers"
tags:
- deploy
- name: syncdb and migrate
shell: >
......@@ -40,6 +42,8 @@
notify:
- "restart edxapp"
- "restart workers"
tags:
- deploy
handlers:
- name: restart edxapp
......
......@@ -24,6 +24,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -39,6 +40,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# A simple utility play to remove a public key from the authorized key
# file for the ubuntu user
# You must pass in the entire line that you are adding
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
when: line_count.stdout|int != 1
# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml
# -e@/vars/env.yml --limit Terminating_Wait
#
# This is separate because it's use of handlers
# leads to various race conditions.
#
- name: Stop all services
hosts: Terminating_Wait
sudo: True
gather_facts: False
vars:
STOP_ALL_EDX_SERVICES_EXTRA_ARGS: "--no-wait"
roles:
- stop_all_edx_services
- name: Server retirement workflow
hosts: Terminating_Wait
sudo: True
gather_facts: False
tasks:
- name: Force a log rotation
command: /usr/sbin/logrotate -f /etc/logrotate.d/{{ item }}
with_items:
- "apport"
- "apt"
- "aptitude"
- "dpkg"
- "hourly"
- "landscape-client"
- "newrelic-sysmond"
- "nginx"
- "nginx-access"
- "nginx-error"
- "ppp"
- "rsyslog"
- "ufw"
- "unattended-upgrades"
- "upstart"
- name: Force a log rotation
command: /usr/sbin/logrotate -f /etc/logrotate.d/hourly/{{ item }}
with_items:
- "tracking.log"
- "edx-services"
- name: Terminate existing s3 log sync
command: /usr/bin/pkill send-logs-to-s3 || true
- name: Send logs to s3
command: /edx/bin/send-logs-to-s3
- name: Run minos verification
hosts: Terminating_Wait
sudo: True
gather_facts: False
tasks:
- name: Run minos
command: /edx/app/minos/venvs/bin/minos --config /edx/etc/minos/minos.yml --json
- name: Deploy snort IDS
hosts: all
sudo: True
gather_facts: True
roles:
- snort
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- name: Deploy Splunk
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: True
ENABLE_NEWRELIC: True
roles:
- datadog
- splunkforwarder
- newrelic
......@@ -19,6 +19,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
......@@ -33,6 +34,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
......@@ -12,3 +12,6 @@
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: minos
when: COMMON_ENABLE_MINOS
\ No newline at end of file
......@@ -21,6 +21,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -45,6 +46,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -25,6 +25,7 @@ ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null
ALTON_AWS_CREDENTIALS: !!null
# Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null
......@@ -55,6 +56,7 @@ alton_environment:
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
BOTO_CONFIG: "{{ alton_app_dir }}/.boto"
#
# OS packages
......
- name: configure the boto profiles for alton
template: >
src="boto.j2"
dest="{{ alton_app_dir }}/.boto"
owner="{{ alton_user }}"
group="{{ common_web_user }}"
mode="0640"
notify: restart alton
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
register: alton_checkout
notify: restart alton
- name: install the requirements
......@@ -55,3 +65,5 @@
state=started
when: not disable_edx_services
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
tags:
- deploy
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:alton" : "{{ alton_source_repo }} {{ alton_checkout.after |truncate(7,True,'')}}"
when: alton_checkout.after is defined
tags:
- deploy
{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
......@@ -15,7 +15,7 @@ ANALYTICS_API_GIT_IDENTITY: !!null
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
ANALYTICS_API_NEWRELIC_APPNAME: "your Newrelic appname"
ANALYTICS_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api"
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
......@@ -58,6 +58,7 @@ ANALYTICS_API_CONFIG:
HOST: 'localhost'
PORT: '3306'
ANALYTICS_API_GUNICORN_WORKERS: "2"
#
# vars are namespace with the module name.
#
......@@ -79,7 +80,6 @@ analytics_api_code_dir: "{{ analytics_api_app_dir }}/edx-analytics-data-api"
analytics_api_conf_dir: "{{ analytics_api_home }}"
analytics_api_gunicorn_host: "127.0.0.1"
analytics_api_gunicorn_port: "8100"
analytics_api_gunicorn_workers: "8"
analytics_api_gunicorn_timeout: "300"
analytics_api_django_settings: "production"
......
......@@ -21,3 +21,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
\ No newline at end of file
......@@ -57,7 +57,7 @@
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }} --create-user
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }}
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
......@@ -109,3 +109,6 @@
- name: remove read-only ssh key for the content repo
file: path={{ analytics_api_git_identity_file }} state=absent
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:analytics_api" : "{{ analytics_api_source_repo }} {{ analytics_api_code_checkout.after |truncate(7,True,'')}}"
when: analytics_api_code_checkout.after is defined
......@@ -2,17 +2,17 @@
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
{% set executable = analytics_api_venv_bin + '/newrelic-admin run-program ' + analytics_api_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = analytics_api_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
export NEW_RELIC_APP_NAME="{{ ANALYTICS_API_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ analytics_api_gunicorn_workers }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
......@@ -18,7 +18,7 @@ AS_DB_ANALYTICS_HOST: 'localhost'
AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
AS_WORKERS: '2'
# add public keys to enable the automator user
# for running manage.py commands
......
......@@ -18,7 +18,7 @@ ANALYTICS_DB_ANALYTICS_HOST: 'localhost'
ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '4'
ANALYTICS_WORKERS: '2'
DATABASES:
default: &databases_default
......
......@@ -27,12 +27,12 @@ AWS_S3_LOGS_FROM_EMAIL: dummy@example.com
# You should be overriding the environment and deployment vars
# Order of precedence is left to right for exclude and include options
AWS_S3_LOG_PATHS:
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/tracking"
path: "{{ COMMON_LOG_DIR }}/tracking/*"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/application"
path: "{{ COMMON_LOG_DIR }}/!(*tracking*)"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/system"
path: "/var/log/*"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-tracking-logs"
path: "{{ COMMON_LOG_DIR }}/*tracking*"
#
# vars are namespace with the module name.
......@@ -45,7 +45,8 @@ aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
aws_region: "us-east-1"
# default path to the aws binary
aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
s3cmd_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
aws_cmd: "/usr/local/bin/aws"
#
# OS packages
#
......@@ -55,8 +56,8 @@ aws_debian_pkgs:
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
- boto==2.29.1
- awscli==1.4.2
- boto==2.32.0
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
......
......@@ -84,18 +84,19 @@
dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }}
when: AWS_S3_LOGS
- name: run s3 log sync script on shutdown
file: >
state=link
src={{ COMMON_BIN_DIR }}/send-logs-to-s3
path=/etc/rc0.d/S00send-logs-to-s3
- name: run s3 log sync script on supervisor shutdown
template: >
src=etc/init/sync-on-stop.conf.j2
dest=/etc/init/sync-on-stop.conf
owner=root group=root mode=0644
when: AWS_S3_LOGS
# cron job runs the aws s3 sync script
# this job will log its output to /var/log/aws
- name: cronjob for s3 log sync
cron: >
name="cronjob for s3 log sync"
user=root
minute=0
job={{ aws_s3_sync_script }}
cron:
name: "cronjob for s3 log sync"
user: root
minute: 0
job: "{{ aws_s3_sync_script }} > /dev/null 2>&1"
when: AWS_S3_LOGS
---
aws_profile: !!null
s3_bucket: 'edx-prod-edx'
bucket_path: 'test'
voters:
- BellwetherVoter:
config:
- ProccessQuienscenceVoter:
config:
process_name: 'gunicorn'
- TrackingLogVoter:
config:
aws_profile: !!null
s3_bucket: 'edx-prod-edx'
bucket_path: 'test'
local_directory: '{{ COMMON_LOG_DIR }}'
start on stopped supervisor
description "sync s3 logs on supervisor shutdown"
script
/bin/bash {{ aws_s3_sync_script }}
end script
......@@ -13,6 +13,10 @@ fi
exec > >(tee "{{ aws_s3_logfile }}")
exec 2>&1
# s3cmd sync requires a valid home
# directory
export HOME=/
shopt -s extglob
usage() {
......@@ -90,5 +94,5 @@ region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%}
$noop {{ aws_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
$noop {{ s3cmd_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %}
# browsermob-proxy
browsermob_proxy_version: '2.0-beta-9'
browsermob_proxy_url: 'https://s3-us-west-1.amazonaws.com/lightbody-bmp/browsermob-proxy-{{ browsermob_proxy_version }}-bin.zip'
#!/bin/sh
/etc/browsermob-proxy/bin/browsermob-proxy
# Install browsermob-proxy, which is used for page performance testing with bok-choy
---
- name: get zip file
get_url: >
url={{ browsermob_proxy_url }}
dest=/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
register: download_browsermob_proxy
- name: unzip into /var/tmp/
shell: >
unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip
chdir=/var/tmp
when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/
shell: >
mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of main script
file: >
path=/etc/browsermob-proxy/bin/browsermob-proxy
mode=0755
when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy
copy: >
src=browsermob-proxy
dest=/usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of wrapper script
file: >
path=/usr/local/bin/browsermob-proxy
mode=0755
when: download_browsermob_proxy.changed
......@@ -15,8 +15,8 @@ CERTS_QUEUE_URL: "http://localhost:18040"
CERTS_BUCKET: ""
# basic auth credentials for connecting
# to the xqueue server
CERTS_XQUEUE_AUTH_USER: "edx"
CERTS_XQUEUE_AUTH_PASS: "edx"
CERTS_XQUEUE_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}"
CERTS_XQUEUE_AUTH_PASS: "{{ COMMON_HTPASSWD_PASS }}"
# credentials for connecting to the xqueue server
CERTS_QUEUE_USER: "lms"
CERTS_QUEUE_PASS: "password"
......
......@@ -52,6 +52,7 @@
sudo_user: "{{ certs_user }}"
environment:
GIT_SSH: "{{ certs_git_ssh }}"
register: certs_checkout
notify: restart certs
- name: remove read-only ssh key for the certs repo
......@@ -96,4 +97,7 @@
- python
- pip
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: certs_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:certs" : "{{ CERT_REPO }} {{ certs_checkout.after|truncate(7,True,'') }}"
when: certs_checkout.after is defined
......@@ -53,8 +53,14 @@ COMMON_MYSQL_MIGRATE_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_NGINXTRA: False
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
# enables app reporting, you must enable newrelic
# as well
COMMON_ENABLE_NEWRELIC_APP: False
COMMON_ENABLE_MINOS: False
COMMON_TAG_EC2_INSTANCE: False
common_debian_pkgs:
- ntp
- ack-grep
......@@ -103,3 +109,4 @@ common_debian_variants:
common_redhat_variants:
- CentOS
- Red Hat Enterprise Linux
- Amazon
......@@ -60,6 +60,12 @@
owner=root group=root mode=644
notify: restart rsyslogd
- name: Remove the default rsyslog configuration
file:
path=/etc/rsyslog.d/50-default.conf
state=absent
notify: restart rsyslogd
# This is in common to keep all logrotation config
# in the same role
- name: Create hourly subdirectory in logrotate.d
......
*******************************************************************
* *
* _ __ __ *
* _ _| |\ \/ / *
* / -_) _` | > < *
* \___\__,_|/_/\_\ *
......
......@@ -32,7 +32,7 @@ $template tracking,"%syslogtag%%msg%\n"
$template DynaFile,"{{ COMMON_LOG_DIR }}/%syslogtag:R,ERE,1,BLANK:\[service_variant=([a-zA-Z_-]*)\].*--end%/edx.log"
local0.* -?DynaFile
local1.* {{ COMMON_LOG_DIR }}/tracking.log;tracking
local1.* {{ COMMON_LOG_DIR }}/tracking/tracking.log;tracking
#cron.* /var/log/cron.log
#daemon.* -/var/log/daemon.log
kern.* -/var/log/kern.log
......
{{ COMMON_LOG_DIR }}/tracking.log {
{{ COMMON_LOG_DIR }}/tracking/tracking.log {
compress
create
dateext
......
......@@ -50,6 +50,7 @@ repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'"
if [[ -z $1 || -z $2 ]]; then
......
......@@ -11,12 +11,24 @@
#
# Defaults specified here should not contain
# any secrets or host identifying information.
EDXAPP_LMS_BASE: ''
EDXAPP_PREVIEW_LMS_BASE: ''
EDXAPP_CMS_BASE: ''
EDXAPP_AWS_ACCESS_KEY_ID: ''
EDXAPP_AWS_SECRET_ACCESS_KEY: ''
#
# Variables set to "None" will be converted to None
# when the edxapp config is written to disk.
EDXAPP_LMS_BASE: ""
EDXAPP_PREVIEW_LMS_BASE: ""
EDXAPP_CMS_BASE: ""
# Set this to the maximum number
# of requests for gunicorn for the lms and cms
# gunicorn --max-requests <num>
EDXAPP_LMS_MAX_REQ: !!null
EDXAPP_CMS_MAX_REQ: !!null
# 'None' will be written out as null in
# the configuration on disk
EDXAPP_AWS_ACCESS_KEY_ID: "None"
EDXAPP_AWS_SECRET_ACCESS_KEY: "None"
EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ]
EDXAPP_XQUEUE_DJANGO_AUTH:
username: 'lms'
......@@ -45,6 +57,11 @@ EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend'
EDXAPP_EMAIL_HOST: 'localhost'
EDXAPP_EMAIL_PORT: 25
EDXAPP_EMAIL_USE_TLS: False
EDXAPP_EMAIL_HOST_USER: ''
EDXAPP_EMAIL_HOST_PASSWORD: ''
EDXAPP_LOG_LEVEL: 'INFO'
......@@ -52,27 +69,39 @@ EDXAPP_MEMCACHE: [ 'localhost:11211' ]
EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080'
EDXAPP_COMMENTS_SERVICE_KEY: 'password'
EDXAPP_EDXAPP_SECRET_KEY: ''
EDXAPP_EDXAPP_SECRET_KEY: ""
EDXAPP_OEE_URL: 'http://localhost:18060/'
EDXAPP_OEE_USER: 'lms'
EDXAPP_OEE_PASSWORD: 'password'
EDXAPP_ANALYTICS_API_KEY: ''
EDXAPP_ZENDESK_USER: ''
EDXAPP_ZENDESK_API_KEY: ''
EDXAPP_ANALYTICS_API_KEY: ""
EDXAPP_PAYMENT_SUPPORT_EMAIL: "billing@example.com"
EDXAPP_ZENDESK_USER: ""
EDXAPP_ZENDESK_URL: ""
EDXAPP_ZENDESK_API_KEY: ""
EDXAPP_CELERY_USER: 'celery'
EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_CELERY_BROKER_VHOST: ""
EDXAPP_VIDEO_CDN_URLS:
EXAMPLE_COUNTRY_CODE: "http://example.com/edx/video?s3_url="
EDXAPP_PLATFORM_NAME: 'Your Platform Name Here'
EDXAPP_CAS_SERVER_URL: ''
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ''
EDXAPP_CAS_ATTRIBUTE_CALLBACK: ''
EDXAPP_CAS_ATTRIBUTE_PACKAGE: ''
EDXAPP_CAS_SERVER_URL: ""
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ""
EDXAPP_CAS_ATTRIBUTE_CALLBACK: ""
EDXAPP_CAS_ATTRIBUTE_PACKAGE: ""
# Enable an end-point that creates a user and logs them in
# Used for performance testing
EDXAPP_ENABLE_AUTO_AUTH: false
# Settings for enabling and configuring third party authorization
EDXAPP_ENABLE_THIRD_PARTY_AUTH: false
EDXAPP_THIRD_PARTY_AUTH: "None"
EDXAPP_MODULESTORE_MAPPINGS:
'preview\.': 'draft-preferred'
EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true
......@@ -85,21 +114,22 @@ EDXAPP_FEATURES:
ENABLE_S3_GRADE_DOWNLOADS: true
USE_CUSTOM_THEME: $edxapp_use_custom_theme
AUTOMATIC_AUTH_FOR_TESTING: $EDXAPP_ENABLE_AUTO_AUTH
ENABLE_THIRD_PARTY_AUTH: $EDXAPP_ENABLE_THIRD_PARTY_AUTH
EDXAPP_BOOK_URL: ''
EDXAPP_BOOK_URL: ""
# This needs to be set to localhost
# if xqueue is run on the same server
# as the lms (it's sent in the request)
EDXAPP_SITE_NAME: 'localhost'
EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}"
EDXAPP_CMS_SITE_NAME: 'localhost'
EDXAPP_MEDIA_URL: ''
EDXAPP_ANALYTICS_SERVER_URL: ''
EDXAPP_FEEDBACK_SUBMISSION_EMAIL: ''
EDXAPP_CELERY_BROKER_HOSTNAME: ''
EDXAPP_MEDIA_URL: ""
EDXAPP_ANALYTICS_SERVER_URL: ""
EDXAPP_FEEDBACK_SUBMISSION_EMAIL: ""
EDXAPP_CELERY_BROKER_HOSTNAME: ""
EDXAPP_LOGGING_ENV: 'sandbox'
EDXAPP_SYSLOG_SERVER: ''
EDXAPP_SYSLOG_SERVER: ""
EDXAPP_RABBIT_HOSTNAME: 'localhost'
EDXAPP_XML_MAPPINGS: {}
......@@ -141,13 +171,19 @@ EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades'
# Credit card processor
# These are the same defaults set in common.py
EDXAPP_CC_PROCESSOR_NAME: "CyberSource"
EDXAPP_CC_PROCESSOR:
CyberSource:
SHARED_SECRET: ''
MERCHANT_ID: ''
SERIAL_NUMBER: ''
SHARED_SECRET: ""
MERCHANT_ID: ""
SERIAL_NUMBER: ""
ORDERPAGE_VERSION: '7'
PURCHASE_ENDPOINT: ''
PURCHASE_ENDPOINT: ""
CyberSource2:
PURCHASE_ENDPOINT: ""
SECRET_KEY: ""
ACCESS_KEY: ""
PROFILE_ID: ""
# does not affect verified students
EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$']
......@@ -192,10 +228,16 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: false
EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false
EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "UA-DUMMY"
EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "None"
EDXAPP_PEARSON_TEST_PASSWORD: ""
EDXAPP_SEGMENT_IO_LMS: false
EDXAPP_SEGMENT_IO_LMS_KEY: ""
EDXAPP_OPTIMIZELY_PROJECT_ID: "None"
# For the CMS
EDXAPP_SEGMENT_IO_KEY: ""
EDXAPP_SEGMENT_IO: false
EDXAPP_EDX_API_KEY: ""
# This is the default set in common.py
EDXAPP_VERIFY_STUDENT:
......@@ -206,18 +248,46 @@ EDXAPP_BULK_EMAIL_EMAILS_PER_TASK: 500
# If using microsites this should point to the microsite repo
EDXAPP_MICROSITE_ROOT_DIR: "{{ edxapp_app_dir }}/edx-microsite"
# this dictionary defines what microsites are configured
EDXAPP_MICROSITE_CONFIGRATION: {}
EDXAPP_MICROSITE_CONFIGURATION: {}
# Instructor code that will not be run in the code sandbox
EDXAPP_COURSES_WITH_UNSAFE_CODE: []
EDXAPP_SESSION_COOKIE_DOMAIN: ""
EDXAPP_SESSION_COOKIE_NAME: "sessionid"
# XML Course related flags
EDXAPP_XML_FROM_GIT: false
EDXAPP_XML_S3_BUCKET: !!null
EDXAPP_XML_S3_KEY: !!null
EDXAPP_NEWRELIC_LMS_APPNAME: "edX-LMS"
EDXAPP_NEWRELIC_CMS_APPNAME: "edX-CMS"
EDXAPP_NEWRELIC_LMS_APPNAME: "{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}-edxapp-lms"
EDXAPP_NEWRELIC_CMS_APPNAME: "{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}-edxapp-cms"
EDXAPP_AWS_STORAGE_BUCKET_NAME: 'edxuploads'
EDXAPP_ORA2_FILE_PREFIX: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}/ora2'
EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME: '{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}'
EDXAPP_FILE_UPLOAD_STORAGE_PREFIX: 'submissions_attachments'
EDXAPP_CODE_JAIL_LIMITS:
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# Time in seconds that the jailed process has to run.
REALTIME: 1
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
EDXAPP_VIRTUAL_UNIVERSITIES: []
EDXAPP_SUBDOMAIN_BRANDING: {}
# Set the number of workers explicitely for lms and cms
# Should be set to
# EDXAPP_WORKERS:
# lms: <num workers>
# cms: <num workers>
EDXAPP_WORKERS: !!null
EDXAPP_ANALYTICS_DATA_TOKEN: ""
EDXAPP_ANALYTICS_DATA_URL: ""
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -235,8 +305,9 @@ edxapp_rbenv_shims: "{{ edxapp_rbenv_root }}/shims"
edxapp_rbenv_bin: "{{ edxapp_rbenv_root }}/bin"
edxapp_gem_root: "{{ edxapp_rbenv_dir }}/.gem"
edxapp_gem_bin: "{{ edxapp_gem_root }}/bin"
edxapp_node_bin: "{{ edxapp_code_dir }}/node_modules/.bin"
edxapp_user: edxapp
edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_rbenv_bin }}:{{ edxapp_rbenv_shims }}:{{ edxapp_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_rbenv_bin }}:{{ edxapp_rbenv_shims }}:{{ edxapp_gem_bin }}:{{ edxapp_node_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_course_static_dir: "{{ edxapp_data_dir }}/course_static"
edxapp_course_data_dir: "{{ edxapp_data_dir }}/data"
......@@ -244,6 +315,10 @@ edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads"
edxapp_theme_dir: "{{ edxapp_data_dir }}/themes"
edxapp_git_identity: "{{ edxapp_app_dir }}/edxapp-git-identity"
edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh"
# TODO: This can be removed once VPC-122 is resolved
edxapp_legacy_course_data_dir: "{{ edxapp_app_dir }}/data"
edxapp_workers:
- queue: low
service_variant: cms
......@@ -312,6 +387,7 @@ edxapp_environment:
edxapp_generic_auth_config: &edxapp_generic_auth
ANALYTICS_DATA_TOKEN: $EDXAPP_ANALYTICS_DATA_TOKEN
AWS_ACCESS_KEY_ID: $EDXAPP_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $EDXAPP_AWS_SECRET_ACCESS_KEY
SECRET_KEY: $EDXAPP_EDXAPP_SECRET_KEY
......@@ -341,24 +417,33 @@ edxapp_generic_auth_config: &edxapp_generic_auth
ADDITIONAL_OPTIONS: $EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
MODULESTORE:
default: &edxapp_generic_default_modulestore
default:
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
OPTIONS:
mappings: $EDXAPP_XML_MAPPINGS
stores:
- &edxapp_generic_draft_modulestore
NAME: 'draft'
ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore'
OPTIONS: &generic_modulestore_default_options
collection: 'modulestore'
db: $EDXAPP_MONGO_DB_NAME
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
OPTIONS:
default_class: 'xmodule.hidden_module.HiddenDescriptor'
fs_root: $edxapp_course_data_dir
host: $EDXAPP_MONGO_HOSTS
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
render_template: 'edxmako.shortcuts.render_to_string'
# Needed for the CMS to be able to run update_templates
user: $EDXAPP_MONGO_USER
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
direct: &edxapp_generic_direct_modulestore
ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore'
OPTIONS: *generic_modulestore_default_options
- &edxapp_generic_xml_modulestore
NAME: 'xml'
ENGINE: 'xmodule.modulestore.xml.XMLModuleStore'
OPTIONS:
data_dir: $edxapp_course_data_dir
default_class: 'xmodule.hidden_module.HiddenDescriptor'
- &edxapp_generic_split_modulestore
NAME: 'split'
ENGINE: 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore'
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
OPTIONS:
default_class: 'xmodule.hidden_module.HiddenDescriptor'
fs_root: $edxapp_course_data_dir
render_template: 'edxmako.shortcuts.render_to_string'
DATABASES:
read_replica:
ENGINE: 'django.db.backends.mysql'
......@@ -382,17 +467,25 @@ edxapp_generic_auth_config: &edxapp_generic_auth
grading_controller: 'grading_controller'
username: $EDXAPP_OEE_USER
ANALYTICS_API_KEY: $EDXAPP_ANALYTICS_API_KEY
EMAIL_HOST_USER: $EDXAPP_EMAIL_HOST_USER
EMAIL_HOST_PASSWORD: $EDXAPP_EMAIL_HOST_PASSWORD
ZENDESK_USER: $EDXAPP_ZENDESK_USER
ZENDESK_API_KEY: $EDXAPP_ZENDESK_API_KEY
CELERY_BROKER_USER: $EDXAPP_CELERY_USER
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
GOOGLE_ANALYTICS_ACCOUNT: $EDXAPP_GOOGLE_ANALYTICS_ACCOUNT
THIRD_PARTY_AUTH: $EDXAPP_THIRD_PARTY_AUTH
AWS_STORAGE_BUCKET_NAME: "{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}"
generic_env_config: &edxapp_generic_env
ANALYTICS_DATA_URL: $EDXAPP_ANALYTICS_DATA_URL
CELERY_BROKER_VHOST: $EDXAPP_CELERY_BROKER_VHOST
PAYMENT_SUPPORT_EMAIL: $EDXAPP_PAYMENT_SUPPORT_EMAIL
ZENDESK_URL: $EDXAPP_ZENDESK_URL
COURSES_WITH_UNSAFE_CODE: $EDXAPP_COURSES_WITH_UNSAFE_CODE
BULK_EMAIL_EMAILS_PER_TASK: $EDXAPP_BULK_EMAIL_EMAILS_PER_TASK
MICROSITE_ROOT_DIR: $EDXAPP_MICROSITE_ROOT_DIR
MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGRATION
MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGURATION
GRADES_DOWNLOAD:
STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE
BUCKET: $EDXAPP_GRADE_BUCKET
......@@ -407,6 +500,9 @@ generic_env_config: &edxapp_generic_env
LOCAL_LOGLEVEL: $EDXAPP_LOG_LEVEL
# default email backed set to local SMTP
EMAIL_BACKEND: $EDXAPP_EMAIL_BACKEND
EMAIL_HOST: $EDXAPP_EMAIL_HOST
EMAIL_PORT: $EDXAPP_EMAIL_PORT
EMAIL_USE_TLS: $EDXAPP_EMAIL_USE_TLS
FEATURES: $EDXAPP_FEATURES
WIKI_ENABLED: true
SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
......@@ -424,29 +520,31 @@ generic_env_config: &edxapp_generic_env
default: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key'
KEY_PREFIX: 'sandbox_default'
KEY_PREFIX: 'default'
LOCATION: $EDXAPP_MEMCACHE
general:
<<: *default_generic_cache
KEY_PREFIX: 'sandbox_general'
KEY_PREFIX: 'general'
mongo_metadata_inheritance:
<<: *default_generic_cache
KEY_PREFIX: 'integration_mongo_metadata_inheritance'
KEY_PREFIX: 'mongo_metadata_inheritance'
TIMEOUT: 300
staticfiles:
<<: *default_generic_cache
KEY_PREFIX: 'integration_static_files'
KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general"
celery:
<<: *default_generic_cache
KEY_PREFIX: 'integration_celery'
KEY_PREFIX: 'celery'
TIMEOUT: "7200"
CELERY_BROKER_TRANSPORT: 'amqp'
CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME
COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL
LOGGING_ENV: $EDXAPP_LOGGING_ENV
SESSION_COOKIE_DOMAIN: $EDXAPP_SESSION_COOKIE_DOMAIN
SESSION_COOKIE_NAME: $EDXAPP_SESSION_COOKIE_NAME
COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY
SEGMENT_IO_LMS: true
SEGMENT_IO_LMS: $EDXAPP_SEGMENT_IO_LMS
SEGMENT_IO: $EDXAPP_SEGMENT_IO
THEME_NAME: $edxapp_theme_name
TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL
CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL
......@@ -458,68 +556,54 @@ generic_env_config: &edxapp_generic_env
CAS_SERVER_URL: $EDXAPP_CAS_SERVER_URL
CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS:
'preview\.': 'draft-preferred'
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS: "{{ EDXAPP_MODULESTORE_MAPPINGS }}"
UNIVERSITY_EMAIL: $EDXAPP_UNIVERSITY_EMAIL
PRESS_EMAIL: $EDXAPP_PRESS_EMAIL
PLATFORM_TWITTER_ACCOUNT: $EDXAPP_PLATFORM_TWITTER_ACCOUNT
PLATFORM_FACEBOOK_ACCOUNT: $EDXAPP_PLATFORM_FACEBOOK_ACCOUNT
ORA2_FILE_PREFIX: $EDXAPP_ORA2_FILE_PREFIX
FILE_UPLOAD_STORAGE_BUCKET_NAME: $EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME
FILE_UPLOAD_STORAGE_PREFIX: $EDXAPP_FILE_UPLOAD_STORAGE_PREFIX
VIRTUAL_UNIVERSITIES: $EDXAPP_VIRTUAL_UNIVERSITIES
SUBDOMAIN_BRANDING: $EDXAPP_SUBDOMAIN_BRANDING
lms_auth_config:
<<: *edxapp_generic_auth
PEARSON_TEST_PASSWORD: $EDXAPP_PEARSON_TEST_PASSWORD
SEGMENT_IO_LMS_KEY: $EDXAPP_SEGMENT_IO_LMS_KEY
OPTIMIZELY_PROJECT_ID: $EDXAPP_OPTIMIZELY_PROJECT_ID
EDX_API_KEY: $EDXAPP_EDX_API_KEY
VERIFY_STUDENT: $EDXAPP_VERIFY_STUDENT
GOOGLE_ANALYTICS_LINKEDIN: $EDXAPP_GOOGLE_ANALYTICS_LINKEDIN
CC_PROCESSOR_NAME: $EDXAPP_CC_PROCESSOR_NAME
CC_PROCESSOR: $EDXAPP_CC_PROCESSOR
MODULESTORE:
default: &lms_default_modulestore
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
OPTIONS:
mappings: $EDXAPP_XML_MAPPINGS
stores:
xml:
ENGINE: 'xmodule.modulestore.xml.XMLModuleStore'
OPTIONS:
data_dir: $edxapp_course_data_dir
default_class: 'xmodule.hidden_module.HiddenDescriptor'
default:
OPTIONS:
default_class: 'xmodule.hidden_module.HiddenDescriptor'
host: $EDXAPP_MONGO_HOSTS
db: $EDXAPP_MONGO_DB_NAME
collection: 'modulestore'
render_template: 'edxmako.shortcuts.render_to_string'
user: $EDXAPP_MONGO_USER
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
fs_root: $edxapp_course_data_dir
ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore'
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
draft:
<<: *edxapp_generic_default_modulestore
ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore'
lms_env_config:
<<: *edxapp_generic_env
PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY
SITE_NAME: $EDXAPP_LMS_SITE_NAME
VIDEO_CDN_URL: $EDXAPP_VIDEO_CDN_URLS
CODE_JAIL:
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
limits:
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# Time in seconds that the jailed process has to run.
REALTIME: 1
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
limits: $EDXAPP_CODE_JAIL_LIMITS
user: '{{ edxapp_sandbox_user }}'
cms_auth_config:
<<: *edxapp_generic_auth
SEGMENT_IO_KEY: $EDXAPP_SEGMENT_IO_KEY
MODULESTORE:
default:
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
OPTIONS:
# See commented section below. LMS-11258
# mappings: $EDXAPP_XML_MAPPINGS
mappings: {}
stores:
- *edxapp_generic_draft_modulestore
# Commented for now so that it can be tested first: LMS-11258
# - *edxapp_generic_xml_modulestore
- *edxapp_generic_split_modulestore
cms_env_config:
<<: *edxapp_generic_env
SITE_NAME: $EDXAPP_CMS_SITE_NAME
......@@ -581,8 +665,9 @@ sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base
sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt"
sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt"
edxapp_chrislea_ppa: "ppa:chris-lea/node.js"
edxapp_debian_pkgs:
- npm
# for compiling the virtualenv
# (only needed if wheel files aren't available)
- build-essential
......@@ -595,7 +680,6 @@ edxapp_debian_pkgs:
# libopenblas-base, it will cause
# problems for numpy
- gfortran
- libatlas3gf-base
- liblapack-dev
- g++
- libxml2-dev
......@@ -605,7 +689,7 @@ edxapp_debian_pkgs:
# misc
- curl
- ipython
- npm
- nodejs
- ntp
# for shapely
- libgeos-dev
......
......@@ -29,12 +29,14 @@
# Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}}
git: >
dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
dest={{edxapp_code_dir}}
repo={{edx_platform_repo}}
version={{edx_platform_version}}
accept_hostkey=yes
register: chkout
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_platform_checkout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -48,12 +50,15 @@
- name: checkout theme
git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}}
repo={{edxapp_theme_source_repo}}
version={{edxapp_theme_version}}
accept_hostkey=yes
when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_theme_checkout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -106,16 +111,26 @@
- "restart edxapp_workers"
# Set the npm registry
# This needs to be done as root since npm is weird about
# chown - https://github.com/npm/npm/issues/3565
- name: Set the npm registry
shell:
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Set the npm registry permissions
- name: Set the npm registry permissions
file:
path="{{ edxapp_app_dir }}/.npmrc"
owner=edxapp group=edxapp
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Node play that need to be run after platform updates.
- name: Install edx-platform npm dependencies
shell: npm install chdir={{ edxapp_code_dir }}
......@@ -183,6 +198,25 @@
- "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the python custom requirements into {{ edxapp_venv_dir }}
- stat: path="{{ custom_requirements_file }}"
register: custom_requirements
sudo_user: "{{ edxapp_user }}"
- name : install python custom-requirements
pip: >
requirements="{{ custom_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
when: custom_requirements.stat.exists and new.stat.md5 != inst.stat.md5
# Install the final python modules into {{ edxapp_venv_dir }}
- name : install python post-post requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
......@@ -248,33 +282,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
......@@ -355,6 +362,44 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=30
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url:
url="{{ s3_one_time_url.url }}"
dest="{{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}"
mode=0600
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf {{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
# This currently has to be done because
# the course coffescript is compiled on the fly
# by the application after startup.
# See VPC-117 and VPC-122
- name: make the course data web user writable
file:
path="{{ edxapp_course_data_dir }}"
state=directory
recurse=yes
owner="{{ common_web_user }}"
group="{{ edxapp_user }}"
# creates the supervisor jobs for the
# service variants configured, runs
# gather_assets and db migrations
......@@ -363,6 +408,10 @@
- service_variant_config
- deploy
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
......@@ -417,4 +466,7 @@
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: edxapp_installed=true
......@@ -27,6 +27,19 @@
- "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
# This is a symlink that has to exist because
# we currently can't override the DATA_DIR var
# in edx-platform. TODO: This can be removed once
# VPC-122 is closed
- name: make the course data dir
file:
src="{{ edxapp_course_data_dir }}"
dest="{{ edxapp_legacy_course_data_dir }}"
state=link
owner="{{ edxapp_user }}"
group="{{ common_web_group }}"
- name: create edxapp log dir
file: >
path="{{ edxapp_log_dir }}" state=directory
......@@ -47,8 +60,12 @@
- "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}"
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ edxapp_chrislea_ppa }}"
- name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present update_cache=yes
notify:
- "restart edxapp"
- "restart edxapp_workers"
......
......@@ -79,12 +79,13 @@
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS
{{ edxapp_venv_bin}}/python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS and item != "lms-preview"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
sudo_user: "{{ edxapp_user }}"
with_items: service_variants_enabled
notify:
- "restart edxapp"
- "restart edxapp_workers"
......
---
- name: get instance information
action: ec2_facts
- name: tag instance with edx_platform version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edx_platform" : "{{ edx_platform_repo }} {{ edxapp_platform_checkout.after|truncate(7,True,'') }}"
when: edxapp_platform_checkout.after is defined
- name: tag instance with edxapp theme version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edxapp_theme" : "{{ edxapp_theme_source_repo }} {{ edxapp_theme_checkout.after|truncate(7,True,'') }}"
when: edxapp_theme_checkout.after is defined
......@@ -20,9 +20,11 @@
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.course}}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.course}}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
......@@ -56,13 +58,8 @@
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} --exclude ".git" {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
......
{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in cms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do cms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_auth_config | to_nice_json }}
[program:cms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC_APP -%}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' -%}
{% else -%}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' -%}
{% endif -%}
{% if EDXAPP_CMS_MAX_REQ -%}
{% set max_req = '--max-requests ' + EDXAPP_CMS_MAX_REQ|string -%}
{% else -%}
{% set max_req = '' -%}
{% endif -%}
{% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %}
{% if EDXAPP_WORKERS -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ EDXAPP_WORKERS.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif -%}
{% endif -%}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do cms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{% for key, value in cms_env_config.iteritems() %}
{% if value == 'None' %}
{% do cms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_env_config | to_nice_json }}
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in lms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do lms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_auth_config | to_nice_json }}
[program:lms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC_APP -%}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' -%}
{% else -%}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' -%}
{% endif -%}
{% if EDXAPP_LMS_MAX_REQ -%}
{% set max_req = '--max-requests ' + EDXAPP_LMS_MAX_REQ|string -%}
{% else -%}
{% set max_req = '' -%}
{% endif -%}
{% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% if EDXAPP_WORKERS -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ EDXAPP_WORKERS.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{% for key, value in lms_env_config.iteritems() %}
{% if value == 'None' %}
{% do lms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_env_config | to_nice_json }}
......@@ -7,9 +7,10 @@ directory={{ edxapp_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.{{ ansible_hostname }} --concurrency={{ w.concurrency }}
command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }}
killasgroup=true
stopasgroup=true
stopwaitsecs=432000
{% endfor %}
......
......@@ -16,6 +16,11 @@ path.logs: {{elasticsearch_log_dir}}
#
bootstrap.mlockall: true
# Disable dynamic scripting as it is insecure and we don't use it
# See: http://bouk.co/blog/elasticsearch-rce/
# CVE: CVE-2014-3120
script.disable_dynamic: true
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
......
......@@ -30,7 +30,7 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_NEW_RELIC_APP_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-forum"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
......
......@@ -34,6 +34,7 @@
dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
accept_hostkey=yes
sudo_user: "{{ forum_user }}"
register: forum_checkout
notify: restart the forum service
# TODO: This is done as the common_web_user
......@@ -66,4 +67,7 @@
- include: test.yml tags=deploy
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: forum_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}"
when: forum_checkout.after is defined
......@@ -27,15 +27,19 @@ JENKINS_ADMIN_AWS_CREDENTIALS: !!null
jenkins_admin_role_name: jenkins_admin
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
#
# OS packages
#
jenkins_admin_debian_repos:
- "deb http://cosmos.cites.illinois.edu/pub/ubuntu/ precise-backports main universe"
jenkins_admin_debian_pkgs:
# These are copied from the edxapp
# role so that we can create virtualenvs
# on the jenkins server for edxapp
- npm
# for compiling the virtualenv
# (only needed if wheel files aren't available)
- build-essential
......@@ -58,7 +62,7 @@ jenkins_admin_debian_pkgs:
# misc
- curl
- ipython
- npm
- nodejs
- ntp
# for shapely
- libgeos-dev
......@@ -73,6 +77,8 @@ jenkins_admin_debian_pkgs:
- ruby1.9.1
# for check-migrations
- mysql-client
# for aws cli scripting
- jq
jenkins_admin_gem_pkgs:
# for generating status.edx.org
......@@ -85,55 +91,69 @@ jenkins_admin_plugins:
- { name: "rebuild", version: "1.21" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "build-token-root", version: "1.1" }
- { name: "matrix-auth", version: "1.0.2" }
- { name: "mailer", version: "1.5" }
- { name: "external-monitor-job", version: "1.1" }
- { name: "ldap", version: "1.2" }
- { name: "pam-auth", version: "1.0" }
- { name: "matrix-auth", version: "1.2" }
- { name: "matrix-project", version: "1.3" }
- { name: "mailer", version: "1.9" }
- { name: "ldap", version: "1.10.2" }
- { name: "pam-auth", version: "1.1" }
- { name: "ant", version: "1.2" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "credentials", version: "1.8.3" }
- { name: "ssh-credentials", version: "1.5.1" }
- { name: "ssh-agent", version: "1.3" }
- { name: "token-macro", version: "1.8.1" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "build-user-vars-plugin", version: "1.3" }
- { name: "credentials", version: "1.15" }
- { name: "ssh-credentials", version: "1.7.1" }
- { name: "ssh-agent", version: "1.4.1" }
- { name: "token-macro", version: "1.10" }
- { name: "parameterized-trigger", version: "2.25" }
- { name: "multiple-scms", version: "0.3" }
- { name: "git", version: "1.5.0" }
- { name: "git", version: "2.2.2" }
- { name: "thinBackup", version: "1.7.4" }
- { name: "maven-plugin", version: "2.0" }
- { name: "build-token-root", version: "1.0" }
- { name: "maven-plugin", version: "2.5" }
- { name: "copy-project-link", version: "1.2" }
- { name: "scriptler", version: "2.6.1" }
- { name: "rebuild", version: "1.20" }
- { name: "ssh-slaves", version: "1.4" }
- { name: "translation", version: "1.10" }
- { name: "rebuild", version: "1.21" }
- { name: "ssh-slaves", version: "1.6" }
- { name: "translation", version: "1.11" }
- { name: "dynamicparameter", version: "0.2.0" }
- { name: "hipchat", version: "0.1.5" }
- { name: "throttle-concurrents", version: "1.8.2" }
- { name: "hipchat", version: "0.1.6" }
- { name: "throttle-concurrents", version: "1.8.3" }
- { name: "mask-passwords", version: "2.7.2" }
- { name: "jquery", version: "1.7.2-1" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "build-pipeline-plugin", version: "1.4" }
- { name: "dashboard-view", version: "2.9.4" }
- { name: "build-pipeline-plugin", version: "1.4.3" }
- { name: "s3", version: "0.5" }
- { name: "tmpcleaner", version: "1.1" }
- { name: "jobConfigHistory", version: "2.4" }
- { name: "build-timeout", version: "1.11" }
- { name: "next-build-number", version: "1.0" }
- { name: "nested-view", version: "1.10" }
- { name: "timestamper", version: "1.5.7" }
- { name: "github-api", version: "1.44" }
- { name: "jobConfigHistory", version: "2.8" }
- { name: "build-timeout", version: "1.14" }
- { name: "next-build-number", version: "1.1" }
- { name: "nested-view", version: "1.14" }
- { name: "timestamper", version: "1.5.14" }
- { name: "github-api", version: "1.55" }
- { name: "postbuild-task", version: "1.8" }
- { name: "cobertura", version: "1.9.2" }
- { name: "notification", version: "1.5" }
- { name: "violations", version: "0.7.11" }
- { name: "copy-to-slave", version: "1.4.3" }
- { name: "github", version: "1.8" }
- { name: "copyartifact", version: "1.28" }
- { name: "shiningpanda", version: "0.20" }
- { name: "htmlpublisher", version: "1.2" }
- { name: "github-oauth", version: "0.14" }
- { name: "github", version: "1.9.1" }
- { name: "copyartifact", version: "1.31" }
- { name: "shiningpanda", version: "0.21" }
- { name: "htmlpublisher", version: "1.3" }
- { name: "github-oauth", version: "0.19" }
- { name: "build-name-setter", version: "1.3" }
- { name: "ec2", version: "1.19" }
- { name: "ec2", version: "1.23" }
- { name: "jenkins-flowdock-plugin", version: "1.1.3" }
- { name: "simple-parameterized-builds-report", version: "1.3" }
jenkins_admin_jobs:
- 'backup-jenkins'
# Supervisor related settings
jenkins_supervisor_user: "{{ jenkins_user }}"
jenkins_supervisor_app_dir: "{{ jenkins_home }}/supervisor"
jenkins_supervisor_cfg_dir: "{{ jenkins_supervisor_app_dir }}/conf.d"
jenkins_supervisor_available_dir: "{{ jenkins_supervisor_app_dir }}/available.d"
jenkins_supervisor_data_dir: "{{ jenkins_home }}/supervisor/data"
jenkins_supervisor_cfg: "{{ jenkins_supervisor_app_dir }}/supervisord.conf"
jenkins_supervisor_log_dir: "{{ COMMON_LOG_DIR }}/supervisor/jenkins"
jenkins_supervisor_venv_dir: "{{ jenkins_home }}/venvs/supervisor"
jenkins_supervisor_venv_bin: "{{ jenkins_supervisor_venv_dir }}/bin"
jenkins_supervisor_ctl: "{{ jenkins_supervisor_venv_bin }}/supervisorctl"
jenkins_supervisor_service_user: "{{ jenkins_user }}"
jenkins_admin_scripts_dir: "{{ jenkins_home }}/scripts"
#!/bin/bash -x
# This script will monitor two NATs and route to a backup nat
# if the primary fails.
set -e
# Health Check variables
Num_Pings=3
Ping_Timeout=2
Wait_Between_Pings=2
Wait_for_Instance_Stop=60
Wait_for_Instance_Start=300
ID_UPDATE_INTERVAL=150
send_message() {
message_file=/var/tmp/message-$$.json
message_string=$1
if [ -z $message_string ]; then
message_string="Unknown error for $VPC_NAME NAT monitor"
fi
message_body=$2
cat << EOF > $message_file
{"Subject":{"Data":"$message_string"},"Body":{"Text":{"Data": "$message_body"}}}
EOF
echo `date` "-- $message_body"
BASE_PROFILE=$AWS_DEFAULT_PROFILE
export AWS_DEFAULT_PROFILE=$AWS_MAIL_PROFILE
aws ses send-email --from $NAT_MONITOR_FROM_EMAIL --to $NAT_MONITOR_TO_EMAIL --message file://$message_file
export AWS_DEFAULT_PROFILE=$BASE_PROFILE
}
trap send_message ERR SIGHUP SIGINT SIGTERM
# Determine the NAT instance private IP so we can ping the other NAT instance, take over
# its route, and reboot it. Requires EC2 DescribeInstances, ReplaceRoute, and Start/RebootInstances
# permissions. The following example EC2 Roles policy will authorize these commands:
# {
# "Statement": [
# {
# "Action": [
# "ec2:DescribeInstances",
# "ec2:CreateRoute",
# "ec2:ReplaceRoute",
# "ec2:StartInstances",
# "ec2:StopInstances"
# ],
# "Effect": "Allow",
# "Resource": "*"
# }
# ]
# }
COUNTER=0
echo `date` "-- Running NAT monitor"
while [ . ]; do
# Re check thi IDs and IPs periodically
# This is useful in case the primary nat changes by some
# other means than this script.
if [ $COUNTER -eq 0 ]; then
# NAT instance variables
PRIMARY_NAT_ID=`aws ec2 describe-route-tables --filters Name=tag:aws:cloudformation:stack-name,Values=$VPC_NAME Name=tag:aws:cloudformation:logical-id,Values=PrivateRouteTable | jq '.RouteTables[].Routes[].InstanceId|strings' -r`
BACKUP_NAT_ID=`aws ec2 describe-instances --filters Name=tag:aws:cloudformation:stack-name,Values=$VPC_NAME Name=tag:aws:cloudformation:logical-id,Values=NATDevice,BackupNATDevice | jq '.Reservations[].Instances[].InstanceId' -r | grep -v $PRIMARY_NAT_ID`
NAT_RT_ID=`aws ec2 describe-route-tables --filters Name=tag:aws:cloudformation:stack-name,Values=$VPC_NAME Name=tag:aws:cloudformation:logical-id,Values=PrivateRouteTable | jq '.RouteTables[].RouteTableId' -r`
# Get the primary NAT instance's IP
PRIMARY_NAT_IP=`aws ec2 describe-instances --instance-ids $PRIMARY_NAT_ID | jq -r ".Reservations[].Instances[].PrivateIpAddress"`
BACKUP_NAT_IP=`aws ec2 describe-instances --instance-ids $BACKUP_NAT_ID | jq -r ".Reservations[].Instances[].PrivateIpAddress"`
let "COUNTER += 1"
let "COUNTER %= $ID_UPDATE_INTERVAL"
fi
# Check the health of both instances.
primary_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $PRIMARY_NAT_IP| grep time= | wc -l`
if [ "$primary_pingresult" == "0" ]; then
backup_pingresult=`ping -c $Num_Pings -W $Ping_Timeout $BACKUP_NAT_IP| grep time= | wc -l`
if [ "$backup_pingresult" == "0" ]; then
send_message "Error monitoring NATs for $VPC_NAME." "ERROR -- Both NATs($PRIMARY_NAT_ID and $BACKUP_NAT_ID) were unreachable."
else #Backup nat is healthy.
send_message "Primary $VPC_NAME NAT failed ping" "-- NAT($PRIMARY_NAT_ID) heartbeat failed, consider using $BACKUP_NAT_ID for $NAT_RT_ID default route
Command for re-routing:
aws ec2 replace-route --route-table-id $NAT_RT_ID --destination-cidr-block 0.0.0.0/0 --instance-id $BACKUP_NAT_ID"
fi
else
echo `date` "-- PRIMARY NAT ($PRIMARY_NAT_ID $PRIMARY_NAT_IP) reports healthy to pings"
sleep $Wait_Between_Pings
fi
done
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role jenkins_admin
#
# Overview:
#
# Have to use shell here because supervisorctl doesn't support
# process groups.
- name: restart nat monitor
shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} restart nat_monitor:*"
when: not disable_edx_services
......@@ -20,4 +20,16 @@
# }
dependencies:
- common
- jenkins_master
- aws
- role: jenkins_master
jenkins_plugins: $jenkins_admin_plugins
- role: supervisor
supervisor_app_dir: "{{ jenkins_supervisor_app_dir }}"
supervisor_data_dir: "{{ jenkins_supervisor_data_dir }}"
supervisor_log_dir: "{{ jenkins_supervisor_log_dir }}"
supervisor_venv_dir: "{{ jenkins_supervisor_venv_dir }}"
supervisor_service_user: "{{ jenkins_supervisor_user }}"
supervisor_available_dir: "{{ jenkins_supervisor_available_dir }}"
supervisor_cfg_dir: "{{ jenkins_supervisor_cfg_dir }}"
supervisor_service: "supervisor.jenkins"
supervisor_http_bind_port: '9003'
......@@ -33,24 +33,13 @@
- fail: msg="JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins
- name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: jenkins_admin_plugins
- name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: jenkins_admin_plugins
- name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_admin_plugins
notify:
- restart Jenkins
- name: add admin specific apt repositories
apt_repository: repo="{{ item }}" state=present update_cache=yes
with_items: jenkins_admin_debian_repos
- name: create the scripts directory
file: path={{ jenkins_admin_scripts_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=755
- name: configure s3 plugin
template: >
......@@ -67,6 +56,24 @@
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
tags:
- aws-config
- name: create the .aws directory
file: path={{ jenkins_home }}/.aws state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
tags:
- aws-config
- name: configure the awscli profiles for jenkins
template: >
src="./{{ jenkins_home }}/aws_config.j2"
dest="{{ jenkins_home }}/.aws/config"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
tags:
- aws-config
- name: create the ssh directory
file: >
......@@ -108,9 +115,12 @@
mode=0644
with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: install system packages for edxapp virtualenvs
apt: pkg={{ item }} state=present
with_items: jenkins_admin_debian_pkgs
apt: pkg={{','.join(jenkins_admin_debian_pkgs)}} state=present update_cache=yes
# This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins
......@@ -127,3 +137,7 @@
version={{ item.version }}
user_install=no
with_items: jenkins_admin_gem_pkgs
- include: nat_monitor.yml
tags:
- nat-monitor
---
# Nat monitors should be defined as a list of dictionaries
# e.g.
# NAT_MONITORS:
# - vpc_name: 'loadtest-edx'
# region: 'us-east-1'
# deployment: 'edx'
#
# To receive E-mails, ses should be setup with the
# aws account that is defined by the JENKINS_ADMIN_MAIL_PROFILE
# and the from adress should be verified
# JENKINS_ADMIN_MAIL_PROFILE: 'aws_account_name'
# JENKINS_ADMIN_FROM_EMAIL: 'admin@example.com'
# JENKINS_ADMIN_TO_EMAIL: 'alert@example.com'
- fail: msg="NAT_MONITORS is not defined."
when: NAT_MONITORS is not defined
- name: upload the monitor script
copy:
dest="{{ jenkins_admin_scripts_dir }}/nat-monitor.sh"
src="nat-monitor.sh"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="755"
sudo_user: "{{ jenkins_user }}"
- name: create a supervisor config
template:
src="nat-monitor.conf.j2" dest="{{ jenkins_supervisor_available_dir }}/nat-monitor.conf"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
sudo_user: "{{ jenkins_user }}"
notify: restart nat monitor
- name: enable the supervisor config
file:
src="{{ jenkins_supervisor_available_dir }}/nat-monitor.conf"
dest="{{ jenkins_supervisor_cfg_dir }}/nat-monitor.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ jenkins_user }}"
when: not disable_edx_services
notify: restart nat monitor
- name: update supervisor configuration
shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
# Have to use shell here because supervisorctl doesn't support
# process groups.
- name: ensure nat monitor is started
shell: "{{ jenkins_supervisor_ctl }} -c {{ jenkins_supervisor_cfg }} start nat_monitor:*"
when: not disable_edx_services
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
......@@ -27,7 +27,6 @@ mkdir -p $BUILD_ID/jobs
# Copy global configuration files into the workspace
cp $JENKINS_HOME/*.xml $BUILD_ID/
# Copy keys and secrets into the workspace
cp $JENKINS_HOME/identity.key $BUILD_ID/
cp $JENKINS_HOME/secret.key $BUILD_ID/
cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/
cp -r $JENKINS_HOME/secrets $BUILD_ID/
......
{% for m in NAT_MONITORS %}
[program:nat_monitor_{{ m.vpc_name|replace('-','_') }}]
environment=VPC_NAME="{{ m.vpc_name }}",AWS_DEFAULT_REGION="{{ m.region }}",AWS_DEFAULT_PROFILE="{{ m.deployment }}",AWS_MAIL_PROFILE="{{ JENKINS_ADMIN_MAIL_PROFILE }}",NAT_MONITOR_FROM_EMAIL="{{ JENKINS_ADMIN_FROM_EMAIL }}",NAT_MONITOR_TO_EMAIL="{{ JENKINS_ADMIN_TO_EMAIL }}"
user={{ jenkins_supervisor_service_user }}
directory={{ jenkins_admin_scripts_dir }}
stdout_logfile={{ jenkins_supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ jenkins_supervisor_log_dir }}/%(program_name)-stderr.log
command={{ jenkins_admin_scripts_dir }}/nat-monitor.sh
killasgroup=true
stopasgroup=true
{% endfor %}
[group:nat_monitor]
programs={%- for m in NAT_MONITORS %}nat_monitor_{{ m.vpc_name|replace('-','_') }}{%- if not loop.last %},{%- endif %}{%- endfor %}
......@@ -4,7 +4,7 @@ jenkins_group: "edx"
jenkins_server_name: "jenkins.testeng.edx.org"
jenkins_port: 8080
jenkins_version: 1.538
jenkins_version: 1.574
jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_{{ jenkins_version }}_all.deb"
jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb"
......@@ -17,7 +17,7 @@ jenkins_plugins:
- { name: "copy-to-slave", version: "1.4.3" }
- { name: "credentials", version: "1.8.3" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "ec2", version: "1.19" }
- { name: "ec2", version: "1.23" }
- { name: "github", version: "1.8" }
- { name: "github-api", version: "1.44" }
- { name: "github-oauth", version: "0.14" }
......@@ -28,10 +28,12 @@ jenkins_plugins:
- { name: "mailer", version: "1.5" }
- { name: "nested-view", version: "1.10" }
- { name: "next-build-number", version: "1.0" }
- { name: "node-iterator-api", version: "1.5" }
- { name: "notification", version: "1.5" }
- { name: "pam-auth", version: "1.0" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "postbuild-task", version: "1.8" }
- { name: "PrioritySorter", version: "2.8" }
- { name: "sauce-ondemand", version: "1.61" }
- { name: "s3", version: "0.5" }
- { name: "ssh-agent", version: "1.3" }
......@@ -45,6 +47,7 @@ jenkins_plugins:
- { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"}
- { name: "xunit", version: "1.89"}
jenkins_bundled_plugins:
- "credentials"
......
......@@ -23,13 +23,17 @@
# Should be resolved in the next release, but until then we need to do this
# https://issues.jenkins-ci.org/browse/JENKINS-20407
- name: workaround for JENKINS-20407
command: "mkdir -p /var/run/jenkins"
file:
path="/var/run/jenkins"
state=directory
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
- name: download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
shell: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: stop Jenkins
service: name=jenkins state=stopped
......@@ -57,8 +61,11 @@
shell: usermod -d {{jenkins_home}} {{jenkins_user}}
- name: make plugins directory
sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins
file:
path="{{ jenkins_home }}/plugins"
state=directory
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
......@@ -67,15 +74,18 @@
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: jenkins_plugins
register: jenkins_plugin_downloads
- name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: jenkins_plugins
command: cp {{ item.dest }} {{ jenkins_home }}/plugins/{{ item.item.name }}.hpi
with_items: jenkins_plugin_downloads.results
when: item.changed
- name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
file: path={{ jenkins_home }}/plugins/{{ item.item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_plugins
with_items: jenkins_plugin_downloads.results
when: item.changed
notify:
- restart Jenkins
......@@ -88,23 +98,26 @@
repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
accept_hostkey=yes
with_items: jenkins_custom_plugins
register: jenkins_custom_plugins_checkout
- name: compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }}
with_items: jenkins_custom_plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.item.repo_name }}
with_items: jenkins_custom_plugins_checkout.results
when: item.changed
- name: install custom plugins
command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }}
{{ jenkins_home }}/plugins/{{ item.package }}
with_items: jenkins_custom_plugins
command: mv /tmp/{{ item.item.repo_name }}/target/{{ item.item.package }}
{{ jenkins_home }}/plugins/{{ item.item.package }}
with_items: jenkins_custom_plugins_checkout.results
when: item.changed
notify:
- restart Jenkins
- name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }}
file: path={{ jenkins_home }}/plugins/{{ item.item.package }}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_custom_plugins
with_items: jenkins_custom_plugins_checkout.results
when: item.changed
# Plugins that are bundled with Jenkins are "pinned".
# Jenkins will overwrite updated plugins with its built-in version
......
......@@ -3,6 +3,9 @@ jenkins_user: "jenkins"
jenkins_group: "jenkins"
jenkins_home: /home/jenkins
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
# System packages
jenkins_debian_pkgs:
- build-essential
......@@ -15,7 +18,7 @@ jenkins_debian_pkgs:
- libxml2-dev
- libgeos-dev
- libxslt1-dev
- npm
- nodejs
- pkg-config
- gettext
......@@ -27,103 +30,5 @@ jenkins_ruby_version: "1.9.3-p374"
jscover_url: "http://files.edx.org/testeng/JSCover-1.0.2.zip"
jscover_version: "1.0.2"
# Python
jenkins_venv: "{{ jenkins_home }}/wheel_venv"
jenkins_pip: "{{ jenkins_venv }}/bin/pip"
jenkins_wheel_dir: "{{ jenkins_home }}/wheelhouse"
jenkins_wheels:
- { pkg: "numpy==1.6.2", wheel: "numpy-1.6.2-cp27-none-linux_x86_64.whl" }
- { pkg: "django-celery==3.0.17", wheel: "django_celery-3.0.17-py27-none-any.whl" }
- { pkg: "beautifulsoup4==4.1.3", wheel: "beautifulsoup4-4.1.3-py27-none-any.whl"}
- { pkg: "beautifulsoup==3.2.1", wheel: "BeautifulSoup-3.2.1-py27-none-any.whl" }
- { pkg: "bleach==1.4", wheel: "bleach-1.4-py27-none-any.whl" }
- { pkg: "html5lib==0.999", wheel: "html5lib-0.999-py27-none-any.whl" }
- { pkg: "boto==2.13.3", wheel: "boto-2.13.3-py27-none-any.whl" }
- { pkg: "celery==3.0.19", wheel: "celery-3.0.19-py27-none-any.whl" }
- { pkg: "dealer==0.2.3", wheel: "dealer-0.2.3-py27-none-any.whl" }
- { pkg: "django-countries==1.5", wheel: "django_countries-1.5-py27-none-any.whl" }
- { pkg: "django-filter==0.6.0", wheel: "django_filter-0.6-py27-none-any.whl" }
- { pkg: "django-followit==0.0.3", wheel: "django_followit-0.0.3-py27-none-any.whl" }
- { pkg: "django-kombu==0.9.4", wheel: "kombu-2.5.16-py27-none-any.whl" }
- { pkg: "django-mako==0.1.5pre", wheel: "django_mako-0.1.5pre-py27-none-any.whl" }
- { pkg: "django-model-utils==1.4.0", wheel: "django_model_utils-1.4.0-py27-none-any.whl" }
- { pkg: "django-masquerade==0.1.6", wheel: "django_masquerade-0.1.6-py27-none-any.whl" }
- { pkg: "django-mptt==0.5.5", wheel: "django_mptt-0.5.5-py27-none-any.whl" }
- { pkg: "django-openid-auth==0.4", wheel: "python_openid-2.2.5-py27-none-any.whl" }
- { pkg: "django-robots==0.9.1", wheel: "django_robots-0.9.1-py27-none-any.whl" }
- { pkg: "django-sekizai==0.6.1", wheel: "django_sekizai-0.6.1-py27-none-any.whl" }
- { pkg: "django-ses==0.4.1", wheel: "django_ses-0.4.1-py27-none-any.whl" }
- { pkg: "django-storages==1.1.5", wheel: "django_storages-1.1.5-py27-none-any.whl" }
- { pkg: "django-method-override==0.1.0", wheel: "django_method_override-0.1.0-py27-none-any.whl" }
- { pkg: "djangorestframework==2.3.5", wheel: "djangorestframework-2.3.5-py27-none-any.whl" }
- { pkg: "django==1.4.8", wheel: "Django-1.4.8-py27-none-any.whl" }
- { pkg: "feedparser==5.1.3", wheel: "feedparser-5.1.3-py27-none-any.whl" }
- { pkg: "fs==0.4.0", wheel: "fs-0.4.0-py27-none-any.whl" }
- { pkg: "GitPython==0.3.2.RC1", wheel: "GitPython-0.3.2.RC1-py27-none-any.whl" }
- { pkg: "glob2==0.3", wheel: "glob2-0.3-py27-none-any.whl" }
- { pkg: "gunicorn==0.17.4", wheel: "gunicorn-0.17.4-py27-none-any.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "lxml==3.0.1", wheel: "lxml-3.0.1-cp27-none-linux_x86_64.whl" }
- { pkg: "mako==0.9.1", wheel: "Mako-0.9.1-py2.py3-none-any.whl" }
- { pkg: "Markdown==2.2.1", wheel: "Markdown-2.2.1-py27-none-any.whl" }
- { pkg: "mongoengine==0.7.10", wheel: "mongoengine-0.7.10-py27-none-any.whl" }
- { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" }
- { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" }
- { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" }
- { pkg: "paramiko==1.9.0", wheel: "paramiko-1.9.0-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "Pillow==1.7.8", wheel: "Pillow-1.7.8-cp27-none-linux_x86_64.whl" }
- { pkg: "polib==1.0.3", wheel: "polib-1.0.3-py27-none-any.whl" }
- { pkg: "pycrypto>=2.6", wheel: "pycrypto-2.6.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pygments==1.6", wheel: "Pygments-1.6-py27-none-any.whl" }
- { pkg: "pygraphviz==1.1", wheel: "pygraphviz-1.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pymongo==2.4.1", wheel: "pymongo-2.4.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pyparsing==1.5.6", wheel: "pyparsing-1.5.6-py27-none-any.whl" }
- { pkg: "python-memcached==1.48", wheel: "python_memcached-1.48-py27-none-any.whl" }
- { pkg: "python-openid==2.2.5", wheel: "python_openid-2.2.5-py27-none-any.whl" }
- { pkg: "python-dateutil==2.1", wheel: "python_dateutil-2.1-py27-none-any.whl" }
- { pkg: "python-social-auth==0.1.21", wheel: "python_social_auth-0.1.21-py27-none-any.whl" }
- { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" }
- { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" }
- { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" }
- { pkg: "requests==1.2.3", wheel: "requests-1.2.3-py27-none-any.whl" }
- { pkg: "scipy==0.11.0", wheel: "scipy-0.11.0-cp27-none-linux_x86_64.whl" }
- { pkg: "Shapely==1.2.16", wheel: "Shapely-1.2.16-cp27-none-linux_x86_64.whl" }
- { pkg: "singledispatch==3.4.0.2", wheel: "singledispatch-3.4.0.2-py27-none-any.whl" }
- { pkg: "sorl-thumbnail==11.12", wheel: "sorl_thumbnail-11.12-py27-none-any.whl" }
- { pkg: "South==0.7.6", wheel: "South-0.7.6-py27-none-any.whl" }
- { pkg: "sympy==0.7.1", wheel: "sympy-0.7.1-py27-none-any.whl" }
- { pkg: "xmltodict==0.4.1", wheel: "xmltodict-0.4.1-py27-none-any.whl" }
- { pkg: "django-ratelimit-backend==0.6", wheel: "django_ratelimit_backend-0.6-py27-none-any.whl" }
- { pkg: "unicodecsv==0.9.4", wheel: "unicodecsv-0.9.4-py27-none-any.whl" }
- { pkg: "ipython==0.13.1", wheel: "ipython-0.13.1-py27-none-any.whl" }
- { pkg: "watchdog==0.6.0", wheel: "watchdog-0.6.0-py27-none-any.whl" }
- { pkg: "dogapi==1.2.1", wheel: "dogapi-1.2.1-py27-none-any.whl" }
- { pkg: "newrelic==2.4.0.4", wheel: "newrelic-2.4.0.4-cp27-none-linux_x86_64.whl" }
- { pkg: "sphinx==1.1.3", wheel: "Sphinx-1.1.3-py27-none-any.whl" }
- { pkg: "sphinx_rtd_theme==0.1.5", wheel: "sphinx_rtd_theme-0.1.5-py27-none-any.whl" }
- { pkg: "Babel==1.3", wheel: "Babel-1.3-py27-none-any.whl" }
- { pkg: "transifex-client==0.10", wheel: "transifex_client-0.10-py27-none-any.whl" }
- { pkg: "django_debug_toolbar", wheel: "django_debug_toolbar-0.11.0-py2.py3-none-any.whl" }
- { pkg: "django-debug-toolbar-mongo", wheel: "django_debug_toolbar_mongo-0.1.10-py27-none-any.whl" }
- { pkg: "chrono==1.0.2", wheel: "chrono-1.0.2-py2.py3-none-any.whl" }
- { pkg: "coverage==3.6", wheel: "coverage-3.6-cp27-none-linux_x86_64.whl" }
- { pkg: "ddt==0.7.1", wheel: "ddt-0.7.1-py27-none-any.whl" }
- { pkg: "django-crum==0.5", wheel: "django_crum-0.5-py27-none-any.whl" }
- { pkg: "django_nose==1.1", wheel: "django_nose-1.1-py27-none-any.whl" }
- { pkg: "factory_boy==2.1.2", wheel: "factory_boy-2.1.2-py27-none-any.whl" }
- { pkg: "freezegun==0.1.11", wheel: "freezegun-0.1.11-py27-none-any.whl" }
- { pkg: "mock==1.0.1", wheel: "mock-1.0.1-py27-none-any.whl" }
- { pkg: "nosexcover==1.0.7", wheel: "nosexcover-1.0.7-py27-none-any.whl" }
- { pkg: "pep8==1.4.5", wheel: "pep8-1.4.5-py27-none-any.whl" }
- { pkg: "pylint==0.28", wheel: "pylint-0.28.0-py27-none-any.whl" }
- { pkg: "python-subunit==0.0.16", wheel: "python_subunit-0.0.16-py27-none-any.whl" }
- { pkg: "rednose==0.3", wheel: "rednose-0.3-py27-none-any.whl" }
- { pkg: "selenium==2.39.0", wheel: "selenium-2.39.0-py27-none-any.whl" }
- { pkg: "splinter==0.5.4", wheel: "splinter-0.5.4-py27-none-any.whl" }
- { pkg: "testtools==0.9.34", wheel: "testtools-0.9.34-py27-none-any.whl" }
- { pkg: "Paver==1.2.1", wheel: "Paver-1.2.1-py27-none-any.whl" }
- { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "MySQL-python==1.2.5", wheel: "MySQL_python-1.2.5-cp27-none-linux_x86_64.whl" }
# packer direct download URL
packer_url: "https://dl.bintray.com/mitchellh/packer/0.6.1_linux_amd64.zip"
---
dependencies:
- common
- role: rbenv
rbenv_user: "{{ jenkins_user }}"
rbenv_dir: "{{ jenkins_home }}"
......
......@@ -8,7 +8,9 @@
# `jenkins_home`: /var/lib/jenkins
# `jenkins_user_home`: /home/jenkins
- include: packer.yml
- include: system.yml
- include: python.yml
- include: ruby.yml
- include: jscover.yml
- include: test.yml
---
- name: Download packer
get_url: url={{ packer_url }} dest=/var/tmp/packer.zip
- name: Unzip packer
unarchive: src=/var/tmp/packer.zip dest=/usr/local/bin copy=no
......@@ -17,45 +17,31 @@
owner=root group=root
mode=755
# Create wheelhouse to enable fast virtualenv creation
- name: Create wheel virtualenv
command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }}
# Create a virtualenv for edx-platform by installing the requirements
# and packaging the virtualenv.
# A shallow clone is created off of master. The depth setting
# refers to the --depth-setting of git clone. A value of 1
# will truncate all history prior to the last revision.
- name: Create shallow clone of edx-platform
git: >
repo=https://github.com/edx/edx-platform.git
dest={{ jenkins_home }}/shallow-clone
version=master
depth=1
sudo_user: "{{ jenkins_user }}"
- name: Install wheel
pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv
sudo_user: "{{ jenkins_user }}"
- name: Create wheelhouse dir
file:
path={{ jenkins_wheel_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# (need to install each one in the venv to satisfy dependencies)
- name: Create wheel archives
shell:
"{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" &&
{{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\"
creates={{ jenkins_wheel_dir }}/${item.wheel}"
sudo_user: "{{ jenkins_user }}"
with_items: jenkins_wheels
- name: Add wheel_venv.sh script
template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# Run the wheel_venv.sh script for the first time
# This was previously done in the Jenkins global
# configuration as part of the AMI Init script.
# Moving here so that we can archive a clean snapshot
# of the virtualenv with only the defined packages
# from jenkins_wheels.
- name: Run the wheel_venv.sh script
command: >
./wheel_venv.sh edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv
- name: Install edx-platform requirements
pip: >
requirements={{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}
extra_args="--exists-action w"
virtualenv={{ jenkins_home }}/edx-venv
virtualenv_command=virtualenv-2.7
with_items:
- pre.txt
- github.txt
- base.txt
- post.txt
- paver.txt
sudo_user: "{{ jenkins_user }}"
# Archive the current state of the virtualenv
......@@ -66,5 +52,9 @@
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
# Remove the shallow-clone directory now that we archive
# done with it
- name: Remove shallow-clone
file: path={{ jenkins_home }}/shallow-clone state=absent
......@@ -26,6 +26,10 @@
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
......@@ -40,3 +44,16 @@
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
# Edit the /etc/hosts file so that the Preview button will work in Studio
- name: add preview.localhost to /etc/hosts
shell: sed -i -r 's/^127.0.0.1\s+.*$/127.0.0.1 localhost preview.localhost/' /etc/hosts
sudo: yes
# Npm registry must be pre-loaded or else setting it
# with the Jenkins user will fail
# See https://github.com/npm/npm/issues/3565
- name: Set npm registry
template:
src=.npmrc.j2 dest={{ jenkins_home }}/.npmrc
owner={{ jenkins_user }} group={{ jenkins_group }} mode=0664
---
# Tests for this role
# Set up #
# To get a baseline comparison for timestamp comparisons
# create a testfile and register its stat info
- name: Create test file
file: path=testfile state=touch
- name: Stat test file
stat: path=testfile
register: testfile
# Tests #
- name: Verify java cmd is using v 1.7
shell: java -version
register: java_version
- assert:
that:
- "'1.7.0' in java_version.stderr"
# The role is run with a github oauth token passed in
# as github_oauth_token var value.
# This test confirms that the key being used will work
- name: ensure github token works
shell:
"github_post_status.py edx edx-platform
dddac0b5dddf00c0950daf324e603e4935994954 success
https://jenkins.testeng.edx.org/ \"Tests Passed\""
# Run the github_pr_auth script to confirm it reports
# An expected error when there is nothing in the whitelist
- name: ensure github_pr_auth fails as expected
shell:
"github_pr_auth.py edx edx-platform 2498"
ignore_errors: True
register: pr_auth_result
- assert:
that:
- "'You can update the whitelist by' in '{{ pr_auth_result.stdout_lines[1] }}'"
# Run the github_pr_auth script with a value in the whitelist
# to ensure a passing run
- name: ensure github_pr_auth fails as expected
shell:
"export GITHUB_OWNER_WHITELIST=edx &&
github_pr_auth.py edx edx-platform 2498"
# Verify the virtualenv tar is newly-built
- name: Get info on virtualenv tar
stat: path={{ jenkins_home }}/edx-venv_clean.tar.gz
register: edxvenv
- assert:
that:
# Assert that it was modified at least within the hour
- "{{ testfile.stat.mtime }} - {{ edxvenv.stat.mtime }} < 3600"
# Tear Down #
- name: Remove test file
file: path=testfile state=absent
registry={{ COMMON_NPM_MIRROR_URL }}
#! /usr/bin/env bash
if [ $# -ne 1 ]; then
echo "Usage: $0 VENV_DIR"
exit 1
fi
# Create and activate the new virtualenv
VENV=$1
mkdir -p $VENV
/usr/local/bin/virtualenv $VENV
. $VENV/bin/activate
# Install each available wheel archive
ls {{ jenkins_wheel_dir }} | cut -d- -f1 | while read line ; do
pip install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} $line ;
done
......@@ -41,6 +41,8 @@
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ ami }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
......
......@@ -27,6 +27,7 @@
src=app_bashrc.j2 dest={{ item.home }}/.bashrc
owner={{ item.user }} mode=755
with_items: localdev_accounts
ignore_errors: yes
# Default to the correct git config
# No more accidentally force pushing to master! :)
......@@ -35,6 +36,7 @@
src=gitconfig dest={{ item.home }}/.gitconfig
owner={{ item.user }} mode=700
with_items: localdev_accounts
ignore_errors: yes
# Configure X11 for application users
- name: preserve DISPLAY for sudo
......@@ -53,3 +55,16 @@
regexp=". {{ localdev_home }}/share_x11"
line=". {{ localdev_home }}/share_x11"
state=present
# Create scripts to add paver autocomplete
- name: add paver autocomplete
template:
src=paver_autocomplete dest={{ item.home }}/.paver_autocomplete
owner={{ item.user }} mode=755
with_items: localdev_accounts
ignore_errors: yes
# Edit the /etc/hosts file so that the Preview button will work in Studio
- name: add preview.localhost to /etc/hosts
shell: sed -i -r 's/^127.0.0.1\s+.*$/127.0.0.1 localhost preview.localhost/' /etc/hosts
sudo: yes
......@@ -15,3 +15,5 @@ else
fi
cd "{{ item.home }}/{{ item.repo }}"
source "{{ item.home }}/.paver_autocomplete"
# Courtesy of Gregory Nicholas
_paver()
{
local cur
COMPREPLY=()
# Variable to hold the current word
cur="${COMP_WORDS[COMP_CWORD]}"
# Build a list of the available tasks from: `paver --help --quiet`
local cmds=$(paver -hq | awk '/^ ([a-zA-Z][a-zA-Z0-9_]+)/ {print $1}')
# Generate possible matches and store them in the
# array variable COMPREPLY
COMPREPLY=($(compgen -W "${cmds}" $cur))
}
# Assign the auto-completion function for our command.
complete -F _paver paver
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role minos
#
MINOS_GIT_IDENTITY: !!null
#
# vars are namespace with the module name.
#
minos_role_name: minos
minos_service_name: "{{ minos_role_name }}"
minos_data_dir: "{{ COMMON_DATA_DIR }}/minos"
minos_app_dir: "{{ COMMON_APP_DIR }}/minos"
minos_venv_dir: "{{ minos_app_dir }}/venvs/"
minos_log_dir: "{{ COMMON_LOG_DIR }}/minos"
minos_cfg_file: "{{ COMMON_CFG_DIR }}/minos/minos.yml"
minos_voter_cfg: "{{ COMMON_CFG_DIR }}/minos/conf.d/"
minos_git_ssh: "/tmp/git.sh"
minos_git_identity: "{{ minos_app_dir }}/minos-git-identity"
minos_edx_server_tools_repo: "git@github.com/edx-ops/edx-minos.git"
minos_edx_server_tools_version: "release"
minos_requirement: "git+ssh://{{ minos_edx_server_tools_repo }}@{{ minos_edx_server_tools_version }}#egg=edx-minos"
#
# OS packages
#
minos_debian_pkgs: []
minos_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role minos
#
# Overview:
#
#
- name: notify me
debug: msg="stub handler"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role minos
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- role: edx_service
edx_role_name: "{{ minos_role_name }}"
edx_service_name: "{{ minos_service_name }}"
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role minos
#
# Overview:
#
# Install the, currently private, minos application
# which determines whether or not it is safe to retire
# a server
#
# Dependencies:
#
# Relies on the common role.
#
# Example play:
#
# - name: Deploy minos
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# COMMON_ENABLE_MINOS: True
# roles:
# - common
# - minos
#
- name: gather ec2 facts
action: ec2_facts
- name: create minos config directory
file: >
path={{ minos_voter_cfg }}
state=directory
owner=root
group=root
mode=0755
- name: create minos config
template: >
dest={{ minos_cfg_file }}
src=edx/etc/minos/minos.yml.j2
mode=0755 owner=root group=root
- name: create minos voters configs
template: >
dest={{ minos_voter_cfg }}/{{ item }}.yml
src=edx/etc/minos/conf.d/{{ item }}.yml.j2
mode=0755 owner=root group=root
with_items:
- "BellwetherVoter"
- "ProccessQuienscenceVoterCelery"
- "ProccessQuienscenceVoterGunicorn"
- "TrackingLogVoter"
# Optional auth for git
- name: create ssh script for git (not authenticated)
template: >
src=tmp/git-identity.sh.j2 dest={{ minos_git_ssh }}
mode=750
when:
- MINOS_GIT_IDENTITY is not defined
- name: create ssh script for git (authenticated)
template: >
src=tmp/git-identity.sh.j2 dest={{ minos_git_ssh }}
mode=750
when:
- MINOS_GIT_IDENTITY is defined
- name: install read-only ssh key
copy: >
content="{{ COMMON_GIT_IDENTITY }}" dest="{{ minos_git_identity }}"
force=yes mode=0600
- name : install python custom-requirements
pip: >
name="{{ item }}"
virtualenv="{{ minos_venv_dir }}"
state=present
extra_args="--exists-action w"
environment:
GIT_SSH: "{{ minos_git_ssh }}"
with_items:
- "{{ minos_requirement }}"
BellwetherVoter:
config:
\ No newline at end of file
ProccessQuiescenceVoter:
config:
process_name: 'celery'
\ No newline at end of file
ProccessQuiescenceVoter:
config:
process_name: 'gunicorn'
\ No newline at end of file
ProccessQuiescenceVoter:
config:
process_name: 'celery'
\ No newline at end of file
ProccessQuiescenceVoter:
config:
process_name: 'gunicorn'
\ No newline at end of file
TrackingLogVoter:
config:
aws_profile: !!null
local_directory: '{{ COMMON_LOG_DIR }}/tracking'
s3_bucket: 'edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
bucket_path_prefix: 'logs/tracking'
---
aws_profile: !!null
s3_bucket: 'edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
bucket_path: 'lifecycle/minos'
voter_conf_d: '{{ minos_voter_cfg }}'
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no {% if COMMON_GIT_IDENTITY %}-i {{ minos_git_identity }}{% endif %} "$@"
......@@ -20,7 +20,7 @@
apt: >
pkg=mongodb-10gen={{ mongo_version }}
state=present install_recommends=yes
update_cache=yes
force=yes update_cache=yes
- name: create mongo dirs
file: >
......@@ -41,7 +41,7 @@
- name: copy mongodb key file
copy: >
src={{ secure_dir }}/files/mongo_key
content="{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }}
mode=0600
owner=mongodb
......
......@@ -12,7 +12,7 @@ logappend={{ mongo_logappend }}
{# Bind to all ips(default) if in clustered mode,
otherwise only to the specified local ip.
#}
{% if mongo_clustered is not defined %}
{% if MONGO_CLUSTERED is not defined %}
bind_ip = {{ MONGO_BIND_IP }}
{% endif %}
......
......@@ -13,6 +13,11 @@
# this cruft can be removed in ansible 1.6, which can have apt install local deb files
- name: install required packages
apt: name={{ item }} state=present
with_items:
- gdebi
- name: download mongo mms agent
get_url: >
url="{{ mms_agent_url }}"
......
......@@ -16,9 +16,9 @@
#
newrelic_role_name: newrelic
NEWRELIC_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_KEY_ID: '548C16BF'
NEWRELIC_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_KEY_ID }}.gpg'
NEWRELIC_DEBIAN_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_DEBIAN_KEY_ID: '548C16BF'
NEWRELIC_DEBIAN_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_DEBIAN_KEY_ID }}.gpg'
NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
#
......@@ -28,4 +28,5 @@ NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
newrelic_debian_pkgs:
- newrelic-sysmond
newrelic_redhat_pkgs: []
newrelic_redhat_pkgs:
- newrelic-sysmond
......@@ -27,18 +27,33 @@
- name: add apt key
apt_key: >
id="{{ NEWRELIC_KEY_ID }}" url="{{ NEWRELIC_KEY_URL }}"
id="{{ NEWRELIC_DEBIAN_KEY_ID }}" url="{{ NEWRELIC_DEBIAN_KEY_URL }}"
state=present
when: ansible_distribution == 'Ubuntu'
- name: Configure the New Relic Servers yum repository
shell: >
rpm -Uvh https://yum.newrelic.com/pub/newrelic/el5/x86_64/newrelic-repo-5-3.noarch.rpm
creates=/etc/yum.repos.d/newrelic.repo
when: ansible_distribution == 'Amazon'
- name: install apt repository
apt_repository: repo="{{ NEWRELIC_REPO }}" update_cache=yes
apt_repository: repo="{{ NEWRELIC_DEBIAN_REPO }}" update_cache=yes
when: ansible_distribution == 'Ubuntu'
- name: install newrelic agent
- name: install newrelic agent (apt)
apt: pkg="newrelic-sysmond"
when: ansible_distribution == 'Ubuntu'
- name: Install newrelic related system packages.
- name: Install newrelic related system packages for Ubuntu
apt: pkg={{ item }} install_recommends=yes state=present
with_items: newrelic_debian_pkgs
when: ansible_distribution == 'Ubuntu'
- name: Install newrelic related system packages for Amazon
yum: pkg={{ item }} state=present
with_items: newrelic_redhat_pkgs
when: ansible_distribution == 'Amazon'
- name: configure the agent with the license key
shell: >
......
# Variables for nginx role
---
# These are paramters to the role
# and should be overridden
nginx_sites: []
nginx_redirects: {}
nginx_extra_sites: []
nginx_extra_configs: []
NGINX_EDXAPP_EXTRA_SITES: []
NGINX_EDXAPP_EXTRA_CONFIGS: []
NGINX_EDXAPP_CUSTOM_REDIRECTS: {}
NGINX_ENABLE_SSL: False
# Set these to real paths on your
# filesystem, otherwise nginx will
......
......@@ -12,6 +12,7 @@
- "{{ nginx_app_dir }}"
- "{{ nginx_sites_available_dir }}"
- "{{ nginx_sites_enabled_dir }}"
- "{{ nginx_conf_dir }}"
notify: restart nginx
- name: create nginx data dirs
......@@ -72,6 +73,46 @@
notify: reload nginx
with_items: nginx_sites
- name: Copying nginx extra configs
template: >
src={{ item }}
dest={{ nginx_sites_available_dir }}/{{ item|basename|replace(".j2", "") }}
owner=root group={{ common_web_user }} mode=0640
notify: reload nginx
with_items: nginx_extra_sites
- name: Creating links for nginx extra configs
file: >
src={{ nginx_sites_available_dir }}/{{ item|basename|replace(".j2", "") }}
dest={{ nginx_sites_enabled_dir }}/{{ item|basename|replace(".j2", "") }}
state=link owner=root group=root
notify: reload nginx
with_items: nginx_extra_sites
- name: Copying custom nginx config
template: >
src={{ item }}
dest={{ nginx_conf_dir }}/{{ item|basename|replace(".j2", "") }}
owner=root group={{ common_web_user }} mode=0640
notify: reload nginx
with_items: nginx_extra_configs
- name: Copying nginx redirect configs for {{ nginx_redirects }}
template: >
src={{ nginx_template_dir }}/nginx_redirect.j2
dest={{ nginx_sites_available_dir }}/{{ item.key }}
owner=root group={{ common_web_user }} mode=0640
notify: reload nginx
with_dict: nginx_redirects
- name: Creating nginx redirect links for {{ nginx_redirects }}
file: >
src={{ nginx_sites_available_dir }}/{{ item.key }}
dest={{ nginx_sites_enabled_dir }}/{{ item.key }}
state=link owner=root group=root
notify: reload nginx
with_dict: nginx_redirects
- name: Write out htpasswd file
htpasswd: >
name={{ COMMON_HTPASSWD_USER }}
......
......@@ -2,6 +2,8 @@
satisfy any;
allow 127.0.0.1;
allow 192.168.0.0/16;
allow 172.16.0.0/12;
deny all;
auth_basic "Restricted";
......
......@@ -72,36 +72,7 @@ server {
}
{% include "robots.j2" %}
# Check security on this
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
{% include "static-files.j2" %}
# Forward to HTTPS if we're an HTTP request...
if ($http_x_forwarded_proto = "http") {
......
......@@ -69,36 +69,7 @@ server {
}
{% include "robots.j2" %}
# Check security on this
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
{% include "static-files.j2" %}
# Forward to HTTPS if we're an HTTP request...
if ($http_x_forwarded_proto = "http") {
......
{% for item in nginx_redirects -%}
{%- if "default" in item -%}
{%- if "default" in item.value -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
server {
listen 80 {{ default_site }};
listen 443 {{ default_site }} ssl;
listen {{ EDXAPP_LMS_NGINX_PORT }} {{ default_site }};
{% if "ssl" in item.value and item.value['ssl'] == true -%}
listen {{ EDXAPP_LMS_SSL_NGINX_PORT }} {{ default_site }} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% endif -%}
server_name {{ item['server_name'] }};
return 301 {{ item['redirect'] }}$request_uri;
}
{% endfor %}
server_name {% for server in item.value['server_names'] %}
{{ server }}{% endfor -%};
return 301 {{ item.value['redirect_destination'] }}$request_uri;
}
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files for studio to maximum cache time
location ~ "/static/(?P<collected>[0-9a-f]{7}/.*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
......@@ -57,8 +57,8 @@ ORA_USERS:
ORA_XQUEUE_URL: "http://localhost:18040"
ORA_XQUEUE_DJANGO_USER: "lms"
ORA_XQUEUE_DJANGO_PASSWORD: "password"
ORA_XQUEUE_BASIC_AUTH_USER: "edx"
ORA_XQUEUE_BASIC_AUTH_PASSWORD: "edx"
ORA_XQUEUE_BASIC_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}"
ORA_XQUEUE_BASIC_AUTH_PASSWORD: "{{ COMMON_HTPASSWD_PASS }}"
ORA_DJANGO_USER: "lms"
ORA_DJANGO_PASSWORD: "password"
......@@ -76,7 +76,7 @@ ORA_AWS_SECRET_ACCESS_KEY: ''
# Default nginx listen port
# These should be overrided if you want
# to serve all content on port 80
ora_gunicorn_workers: 4
ora_gunicorn_workers: 2
ora_gunicorn_port: 8060
ora_gunicorn_host: 127.0.0.1
......
......@@ -12,44 +12,46 @@
# - common
# - oraclejdk
- name: check for Oracle Java version {{ oraclejdk_base }}
command: test -d /usr/lib/jvm/{{ oraclejdk_base }}
ignore_errors: true
register: oraclejdk_present
- name: download Oracle Java
shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}
executable=/bin/bash
chdir=/var/tmp
creates=/var/tmp/{{ oraclejdk_file }}
when: oraclejdk_present|failed
- name: install Oracle Java
- name: create jvm dir
file: >
path=/usr/lib/jvm
state=directory
owner=root
group=root
- name: untar Oracle Java
shell: >
mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
creates=/usr/lib/jvm/{{ oraclejdk_base }}
tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
executable=/bin/bash
sudo: true
when: oraclejdk_present|failed
creates=/usr/lib/jvm/{{ oraclejdk_base }}
- name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed
- name: update alternatives java
shell: >
update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/java" 1
register: update_alt
changed_when: update_alt.stdout != ""
- name: update alternatives javac
shell: >
update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javac" 1
register: update_alt
changed_when: update_alt.stdout != ""
- name: update alternatives javaws
shell: >
update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javaws" 1
register: update_alt
changed_when: update_alt.stdout != ""
- name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed
#Variables for rabbitmq
---
rabbit_app_dir: "{{ COMMON_APP_DIR }}/rabbitmq"
rabbit_data_dir: "{{ COMMON_DATA_DIR }}/rabbitmq"
rabbit_log_dir: "{{ COMMON_LOG_DIR }}/rabbitmq"
rabbit_cfg_dir: "{{ COMMON_CFG_DIR }}/rabbitmq"
rabbitmq_app_dir: "{{ COMMON_APP_DIR }}/rabbitmq"
rabbitmq_data_dir: "{{ COMMON_DATA_DIR }}/rabbitmq"
rabbitmq_log_dir: "{{ COMMON_LOG_DIR }}/rabbitmq"
rabbitmq_cfg_dir: "{{ COMMON_CFG_DIR }}/rabbitmq"
rabbitmq_user: "rabbitmq"
rabbitmq_group: "rabbitmq"
# Environment specific vars
RABBIT_ERLANG_COOKIE: 'DEFAULT_COOKIE'
......
......@@ -36,6 +36,30 @@
- name: send sigterm to any running rabbitmq processes
shell: pkill -u rabbitmq || true
# Create the rabbitmq directories
- name: create rabbitmq edx directories
file:
path={{ item }}
owner={{ rabbitmq_user }}
mode=0755
state=directory
with_items:
- "{{ rabbitmq_app_dir }}"
- "{{ rabbitmq_log_dir }}"
- name: add queue monitoring script
template:
src="edx/app/rabbitmq/log-rabbitmq-queues.sh.j2"
dest="{{ rabbitmq_app_dir }}/log-rabbitmq-queues.sh"
owner="{{ rabbitmq_user }}"
group="{{ rabbitmq_group }}"
mode=0755
- name: set up a cron job to run the script
cron:
name: "log-queue-lenghts"
job: "{{ rabbitmq_app_dir }}/log-rabbitmq-queues.sh >/dev/null 2>&1"
# Defaulting to /var/lib/rabbitmq
- name: create cookie directory
file: >
......
#!/usr/bin/env bash
set -x
vpc_name={{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}
log_directory={{ rabbitmq_log_dir }}
{% raw %}
OLD_IFS=$IFS
IFS=$'\n'
vhosts=`/usr/sbin/rabbitmqctl list_vhosts | grep "^/"`
for vhost in $vhosts; do
queues=`/usr/sbin/rabbitmqctl list_queues -p $vhost | awk 'NF==2{ print }'`
mkdir -p ${log_directory}/${vhost}
for queue in $queues; do
queue_name=`echo $queue | awk '{ print $1 }'`
echo $queue | sed 's/\s*/ /' | awk -v date="$(date)" -v vhost="$vhost" '{ print "date=\x27"date"\x27","vhost=\x27"vhost"\x27","queue=\x27"$1"\x27","length="$2}' >> ${log_directory}/${vhost}/${queue_name}.log
done
done
IFS=$OLD_IFS
{% endraw %}
......@@ -16,12 +16,15 @@
#
splunk_role_name: 'splunk'
SPLUNKFORWARDER_SERVER: 'localhost:9997'
SPLUNKFORWARDER_PACKAGE_URL: !!null
SPLUNKFORWARDER_DEB: !!null
SPLUNKFORWARDER_PASSWORD: !!null
SPLUNKFORWARDER_SERVERS:
- target_group: "default_output_server"
server: "localhost:9997"
default: true
SPLUNKFORWARDER_LOG_ITEMS:
- source: '{{ COMMON_LOG_DIR }}/lms'
recursive: true
......
......@@ -41,7 +41,7 @@
# Create splunk user
- name: create splunk user
user: name=splunk createhome=no state=present append=yes groups=syslog
user: name=splunk createhome=no state=present append=yes groups=syslog,adm
when: download_deb.changed
# Need to start splunk manually so that it can create various files
......
[default]
host = {{ansible_hostname}}
# {{ ansible_managed }}
{% for loggable in SPLUNKFORWARDER_LOG_ITEMS%}
[monitor://{{loggable.source}}]
recursive = {{loggable.recursive|default(false)}}
[monitor://{{ loggable.source }}]
recursive = {{ loggable.recursive|default(false) }}
{% if loggable.sourcetype is defined %}
sourcetype = {{loggable.sourcetype}}
sourcetype = {{ loggable.sourcetype }}
{% endif %}
{% if loggable.index is defined %}
index = {{loggable.index}}
index = {{ loggable.index }}
{% endif %}
{% if loggable._TCP_ROUTING is defined %}
_TCP_ROUTING = {{ loggable._TCP_ROUTING }}
{% endif %}
{% endfor %}
[tcpout]
defaultGroup = default_output_server
# {{ ansible_managed }}
[tcpout:default_output_server]
server = {{SPLUNKFORWARDER_SERVER}}
{% for server in SPLUNKFORWARDER_SERVERS|selectattr("default", "defined") %}
[tcpout]
defaultGroup = {{ server.target_group }}
{% endfor %}
[tcpout-server://{{SPLUNKFORWARDER_SERVER}}]
# forwarder receivers
{% for server in SPLUNKFORWARDER_SERVERS %}
[tcpout:{{ server.target_group }}]
server = {{ server.server }}
{% endfor %}
\ No newline at end of file
......@@ -15,3 +15,8 @@
# vars are namespace with the module name.
#
stop_all_edx_services_role_name: stop_all_edx_services
# set this to "--no-wait" if you want to not wait for all
# superviser jobs to finish. Useful when used in conjunction
# with minos.
STOP_ALL_EDX_SERVICES_EXTRA_ARGS: ""
......@@ -19,34 +19,28 @@
#
#
- name: stop supervisor
service: name=supervisor state=stopped
service: name=supervisor state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop supervisor.devpi
service: name=supervisor.devpi state=stopped
service: name=supervisor.devpi state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop nginx
service: name=nginx state=stopped
service: name=nginx state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop rabbitmq-server
service: name=rabbitmq-server state=stopped
service: name=rabbitmq-server state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop mysql
service: name=mysql state=stopped
service: name=mysql state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop memcached
service: name=memcached state=stopped
service: name=memcached state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop supervisor.devpi
service: name=supervisor.devpi state=stopped
- name: stop nginx
service: name=nginx state=stopped
- name: stop rabbitmq-server
service: name=rabbitmq-server state=stopped
service: name=supervisor.devpi state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: stop mongodb
service: name=mongodb state=stopped
service: name=mongodb state=stopped arguments="{{ STOP_ALL_EDX_SERVICES_EXTRA_ARGS }}"
- name: kill processes by user
shell: pkill -u {{ item }} || true
......
......@@ -72,6 +72,15 @@
with_items:
- "{{ supervisor_app_dir }}"
- "{{ supervisor_venv_dir }}"
- name: create service user accessible dirs
file: >
name={{ item }}
state=directory
owner={{ supervisor_user }}
group={{ supervisor_service_user }}
mode="775"
with_items:
- "{{ supervisor_cfg_dir }}"
- "{{ supervisor_available_dir }}"
......
......@@ -7,5 +7,7 @@ start on runlevel [2345]
{% endif %}
stop on runlevel [!2345]
kill timeout 432000
setuid {{ supervisor_service_user }}
exec {{ supervisor_venv_dir }}/bin/supervisord --nodaemon --configuration {{ supervisor_cfg }}
exec {{ supervisor_venv_dir }}/bin/supervisord -n --configuration {{ supervisor_cfg }}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role test_build_server
#
#
# vars are namespace with the module name.
#
test_build_server_user: jenkins
test_build_server_repo_path: /home/jenkins
#!/usr/bin/env bash
################################################################################
# This executes a small subset of the edx-platform tests. It is intended as
# a means of testing newly provisioned AMIs for our jenkins workers.
#
# The two main things that happen here:
# 1. The setup from edx-platform/scripts/all-tests.sh, the script that is
# run by the jenkins workers to kick off tests.
# 2. The paver command for tests, coverage and quality reports are run.
# For the tests, it runs only a small number of test cases for each
# test suite.
###############################################################################
# Doing this rather than copying the file into the scripts folder so that
# this file doesn't get cleaned out by the 'git clean' in all-tests.sh.
cd edx-platform-clone
# This will run all of the setup it usually runs, but none of the
# tests because TEST_SUITE isn't defined.
source scripts/all-tests.sh
# Now we can run a subset of the tests via paver.
# Run some of the common/lib unit tests
paver test_lib -t common/lib/xmodule/xmodule/tests/test_stringify.py
# Generate some coverage reports
paver coverage
# Run some of the djangoapp unit tests
paver test_system -t lms/djangoapps/courseware/tests/tests.py
paver test_system -t cms/djangoapps/course_creators/tests/test_views.py
# Run some of the javascript unit tests
paver test_js_run -s xmodule
# Run some of the bok-choy tests
paver test_bokchoy -t test_lms.py:RegistrationTest
# Run some of the lettuce acceptance tests
# paver test_acceptance -s lms --extra_args="lms/djangoapps/courseware/features/problems.feature"
# paver test_acceptance -s cms --extra_args="cms/djangoapps/contentstore/features/html-editor.feature"
# Generate quality reports
paver run_quality
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role test_build_server
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
#### INTENTIONALLY LEFT BLANK ####
# Since this is a test role, it should not install anything extra onto the
# target machine, thus altering the system under test. Be careful when
# adding dependencies.
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role test_build_server
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
- name: Create clone of edx-platform
git: >
repo=https://github.com/edx/edx-platform.git
dest={{ test_build_server_repo_path }}/edx-platform-clone
version=master
sudo_user: "{{ test_build_server_user }}"
- name: Copy test-development-environment.sh to somewhere the jenkins user can access it
copy: >
src=test-development-environment.sh
dest="{{ test_build_server_repo_path }}"
mode=0755
sudo_user: "{{ test_build_server_user }}"
- name: Validate build environment
shell: "bash test-development-environment.sh"
args:
chdir: "{{ test_build_server_repo_path }}/"
sudo_user: "{{ test_build_server_user }}"
......@@ -30,7 +30,10 @@ XQUEUE_MYSQL_USER: 'xqueue001'
XQUEUE_MYSQL_PASSWORD: 'password'
XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306'
XQUEUE_NEWRELIC_APPNAME: "edX-xqueue"
XQUEUE_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-xqueue"
# Set the number of workers explicitely for xqueue
XQUEUE_WORKERS: !!null
XQUEUE_WORKERS_PER_QUEUE: 12
# Internal vars below this line
#############################################
......@@ -51,7 +54,7 @@ xqueue_gunicorn_host: 127.0.0.1
xqueue_env_config:
XQUEUES: $XQUEUE_QUEUES
XQUEUE_WORKERS_PER_QUEUE: 12
XQUEUE_WORKERS_PER_QUEUE: $XQUEUE_WORKERS_PER_QUEUE
LOGGING_ENV : $XQUEUE_LOGGING_ENV
SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER
LOG_DIR : "{{ COMMON_DATA_DIR }}/logs/xqueue"
......@@ -81,7 +84,6 @@ xqueue_version: 'HEAD'
xqueue_pre_requirements_file: "{{ xqueue_code_dir }}/pre-requirements.txt"
xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt"
# These packages are required for the xqueue server,
# copied from the LMS role for now since there is a lot
# of overlap
......@@ -99,7 +101,6 @@ xqueue_debian_pkgs:
# misc
- curl
- ipython
- npm
- ntp
# for shapely
- libgeos-dev
......
......@@ -32,6 +32,7 @@
dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }}
accept_hostkey=yes
sudo_user: "{{ xqueue_user }}"
register: xqueue_checkout
notify:
- restart xqueue
......@@ -114,4 +115,7 @@
- python
- pip
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: xqueue_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xqueue" : "{{ xqueue_source_repo }} {{ xqueue_checkout.after|truncate(7,True,'') }}"
when: xqueue_checkout.after is defined
[program:xqueue]
{% if COMMON_ENABLE_NEWRELIC %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
{% set executable = xqueue_venv_bin + '/newrelic-admin run-program ' + xqueue_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = xqueue_venv_bin + '/gunicorn' %}
{% endif %}
{% if XQUEUE_WORKERS -%}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ XQUEUE_WORKERS }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% else -%}
{% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% else %}
{% else -%}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% endif %}
{% endif -%}
{% endif -%}
user={{ common_web_user }}
directory={{ xqueue_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ XQUEUE_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ XQUEUE_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
......@@ -19,6 +19,7 @@ XQWATCHER_COURSES:
- COURSE: "exampleX-101x"
GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
GIT_REF: "master"
PYTHON_REQUIREMENTS: []
QUEUE_NAME: "exampleX-101x"
QUEUE_CONFIG:
SERVER: "https://xqueue.example.com"
......@@ -35,6 +36,7 @@ XQWATCHER_COURSES:
- COURSE: "exampleX-202x"
GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
GIT_REF: "master"
PYTHON_REQUIREMENTS: []
QUEUE_NAME: "exampleX-202x"
QUEUE_CONFIG:
SERVER: "https://xqueue.example.com"
......@@ -56,7 +58,7 @@ XQWATCHER_GIT_IDENTITY: |
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
XQWATCHER_NEWRELIC_APPNAME: "your Newrelic appname"
XQWATCHER_NEWRELIC_APPNAME: "{{ COMMON_DEPLOYMENT }}-{{ COMMON_ENVIRONMENT }}-xqwatcher"
XQWATCHER_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
#
#
......@@ -68,12 +70,11 @@ xqwatcher_user: "xqwatcher"
xqwatcher_module: "xqueue_watcher"
xqwatcher_app_dir: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_home: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_venv_base: "{{ xqwatcher_home }}/venvs"
xqwatcher_app_data: "{{ xqwatcher_app_dir }}/data"
xqwatcher_venv_base: "{{ xqwatcher_app_dir }}/venvs"
xqwatcher_venv_dir: "{{ xqwatcher_venv_base }}/{{ xqwatcher_service_name }}"
xqwatcher_code_dir: "{{ xqwatcher_app_dir }}/src"
xqwatcher_conf_dir: "{{ xqwatcher_home }}"
xqwatcher_data_dir: "{{ xqwatcher_home }}/data"
xqwatcher_conf_dir: "{{ xqwatcher_app_dir }}"
xqwatcher_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/xqueue-watcher.git"
xqwatcher_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xqwatcher_git_identity }}"
......@@ -87,6 +88,7 @@ xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}"
# supervisor related config
#
xqwatcher_supervisor_app_dir: "{{ xqwatcher_app_dir }}/supervisor"
xqwatcher_supervisor_http_port: 9003
xqwatcher_supervisor_data_dir: "{{ COMMON_DATA_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_supervisor_log_dir: "{{ xqwatcher_log_dir }}"
xqwatcher_supervisor_venv_dir: "{{ xqwatcher_venv_base }}/supervisor"
......
......@@ -24,4 +24,4 @@ dependencies:
supervisor_service_user: "{{ xqwatcher_supervisor_user }}"
supervisor_available_dir: "{{ xqwatcher_supervisor_available_dir }}"
supervisor_service: "supervisor.xqwatcher"
supervisor_http_bind_port: '9003'
supervisor_http_bind_port: "{{ xqwatcher_supervisor_http_port }}"
......@@ -18,17 +18,10 @@
mode=0644 owner=root group=root
with_items: XQWATCHER_COURSES
- name: write out sudoers config jail user
template: >
src=etc/sudoers.d/95-jailed-user.j2
dest=/etc/sudoers.d/95-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: write out sudoers for watcher
template: >
src=etc/sudoers.d/95-xqwatcher.j2
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
......@@ -44,13 +37,13 @@
- name: write out requirements.txt
template: >
src=edx/app/xqwatcher/data/requirements.txt.j2
dest={{ xqwatcher_data_dir }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt
dest={{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt
mode=0440 owner=root group=root
with_items: XQWATCHER_COURSES
- name : install course specific python requirements
pip: >
requirements="{{ xqwatcher_data_dir }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
requirements="{{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
virtualenv="{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
state=present
extra_args="{{ XQWATCHER_PIP_EXTRA_ARGS }}"
......@@ -74,9 +67,10 @@
# environment where untrusted users can submit code
- name: put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPLAIN
when: CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES
- name: put code sandbox into aa-enforce
command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPAIN is not defined | not CODE_JAIL_COMPLAIN
\ No newline at end of file
when: not CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES
......@@ -4,7 +4,7 @@
- name: checkout grader code
git: >
dest={{ xqwatcher_data_dir }}/{{ item.COURSE }} repo={{ item.GIT_REPO }}
dest={{ xqwatcher_app_data }}/{{ item.COURSE }} repo={{ item.GIT_REPO }}
version={{ item.GIT_REF }}
ssh_opts="{{ xqwatcher_git_ssh_opts }}"
with_items: XQWATCHER_COURSES
......@@ -7,6 +7,7 @@
dest={{ xqwatcher_code_dir }} repo={{ xqwatcher_source_repo }} version={{ XQWATCHER_VERSION }}
accept_hostkey=yes
ssh_opts="{{ xqwatcher_git_ssh_opts }}"
register: xqwatcher_checkout
- name: install application requirements
pip: >
......@@ -40,3 +41,6 @@
shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update"
when: not disable_edx_services
notify: restart xqwatcher
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
......@@ -48,6 +48,7 @@
# - COURSE: "exampleX-101x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
# GIT_REF: "master"
# PYTHON_REQUIREMENTS: []
# QUEUE_NAME: "exampleX-101x"
# QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com"
......@@ -64,6 +65,7 @@
# - COURSE: "exampleX-202x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
# GIT_REF: "master"
# PYTHON_REQUIREMENTS: []
# QUEUE_NAME: "exampleX-202x"
# QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com"
......@@ -84,8 +86,6 @@
# -----END RSA PRIVATE KEY-----
#
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- name: create conf dir
file: >
path="{{ xqwatcher_conf_dir }}"
......@@ -100,4 +100,13 @@
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- name: create app data dir
file: >
path="{{ xqwatcher_app_data }}"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- include: deploy.yml tags=deploy
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xqwatcher" : "{{ xqwatcher_source_repo }} {{ xqwatcher_checkout.after|truncate(7,True,'') }}"
when: xqwatcher_checkout.after is defined
; {{ ansible_managed }}
;
{% if COMMON_ENABLE_NEWRELIC %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
{% set executable = xqwatcher_venv_dir + '/bin/newrelic-admin run-program ' + xqwatcher_venv_dir + '/bin/python' %}
{% else %}
{% set executable = xqwatcher_venv_dir + '/bin/python' %}
{% endif %}
[program:xqwatcher_service_name]
[program:{{ xqwatcher_service_name }}]
command={{ executable }} -m {{ xqwatcher_module }} -d {{ xqwatcher_conf_dir }}
process_name=%(program_name)s
user={{ xqwatcher_user }}
directory={{ xqwatcher_code_dir }}
stdout_logfile={{ xqwatcher_supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ xqwatcher_supervisor_log_dir }}/%(program_name)-stderr.log
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ XQWATCHER_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ XQWATCHER_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}
killasgroup=true
stopasgroup=true
......@@ -21,7 +21,7 @@
/usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
/usr/lib/python2.7/lib-dynload/future_builtins.so mr,
#
# Allow access to selections from /proc
#
......
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) NOPASSWD:/bin/kill
{{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) NOPASSWD:/usr/bin/pkill
......@@ -16,6 +16,7 @@
dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
accept_hostkey=yes
sudo_user: "{{ xserver_user }}"
register: xserver_checkout
notify: restart xserver
- name: install requirements
......@@ -56,6 +57,7 @@
environment:
GIT_SSH: /tmp/git_ssh.sh
notify: restart xserver
register: xserver_grader_checkout
sudo_user: "{{ xserver_user }}"
- name: remove read-only ssh key for the content repo
......@@ -92,3 +94,6 @@
- name: enforce app-armor rules
command: aa-enforce {{ xserver_venv_sandbox_dir }}
- include: ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
- name: tag instance for xserver
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xserver" : "{{ xserver_source_repo }} {{ xserver_checkout.after|truncate(7,True,'') }}"
when: xserver_checkout.after is defined
- name: tag instance for xserver grader
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xserver_grader" : "{{ XSERVER_GRADER_SOURCE }} {{ xserver_grader_checkout.after|truncate(7,True,'') }}"
when: xserver_grader_checkout.after is defined
......@@ -5,6 +5,6 @@
#
- hosts: all
sudo: True
gather_facts: False
gather_facts: True
roles:
- "{{role}}"
......@@ -12,6 +12,7 @@
EDXAPP_NO_PREREQ_INSTALL: 0
COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
COMMON_SSH_PASSWORD_AUTH: "yes"
ENABLE_LEGACY_ORA: !!null
vars_files:
- "group_vars/all"
roles:
......@@ -22,8 +23,10 @@
- oraclejdk
- elasticsearch
- forum
- ora
- role: ora
when: ENABLE_LEGACY_ORA
- browsers
- browsermob-proxy
- local_dev
- demo
- role: analytics-api
......
......@@ -58,5 +58,9 @@ if __name__ == '__main__':
if disposition.lower() == "on disk":
all_xml_mappings[slug] = 'xml'
edxapp_xml_courses = { "EDXAPP_XML_COURSES": all_course_data, "EDXAPP_XML_MAPPINGS": all_xml_mappings }
edxapp_xml_courses = {
"EDXAPP_XML_COURSES": all_course_data,
"EDXAPP_XML_MAPPINGS": all_xml_mappings,
"EDXAPP_XML_FROM_GIT": True
}
print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)
......@@ -17,6 +17,12 @@ if [[ ! "$(lsb_release -d | cut -f2)" =~ $'Ubuntu 12.04' ]]; then
fi
##
## Update and Upgrade apt packages
##
sudo apt-get update -y
sudo apt-get upgrade -y
##
## Install system pre-requisites
##
sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev
......
......@@ -64,7 +64,11 @@ if [[ -z $region ]]; then
fi
if [[ -z $zone ]]; then
zone="us-east-1b"
zone="us-east-1c"
fi
if [[ -z $vpc_subnet_id ]]; then
vpc_subnet_id="subnet-cd867aba"
fi
if [[ -z $elb ]]; then
......@@ -81,16 +85,16 @@ fi
if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-f0814498"
ami="ami-c01dc6a8"
elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then
ami="ami-59a4a230"
ami="ami-8eb061e6"
elif [[ $server_type == "ubuntu_14.04(experimental)" ]]; then
ami="ami-408c7f28"
ami="ami-a0ff23c8"
fi
fi
if [[ -z $instance_type ]]; then
instance_type="m1.medium"
instance_type="t2.medium"
fi
if [[ -z $enable_monitoring ]]; then
......@@ -162,7 +166,7 @@ COMMON_USER_INFO:
github: true
type: admin
USER_CMD_PROMPT: '[$name_tag] '
COMMON_ENABLE_NEWRELIC: $enable_monitoring
COMMON_ENABLE_NEWRELIC_APP: $enable_monitoring
COMMON_ENABLE_DATADOG: $enable_monitoring
FORUM_NEW_RELIC_ENABLE: $enable_monitoring
EDXAPP_NEWRELIC_LMS_APPNAME: sandbox-${dns_name}-edxapp-lms
......@@ -201,7 +205,7 @@ EOF
# run the tasks to launch an ec2 instance from AMI
cat $extra_vars_file
ansible-playbook edx_provision.yml -i inventory.ini $extra_var_arg --user ubuntu -v
ansible-playbook edx_provision.yml -i inventory.ini $extra_var_arg --user ubuntu
if [[ $server_type == "full_edx_installation" ]]; then
# additional tasks that need to be run if the
......@@ -229,7 +233,7 @@ if [[ $reconfigure != "true" && $server_type == "full_edx_installation" ]]; then
for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars_file
ansible-playbook ${i}.yml -i "${deploy_host}," $extra_var_arg --user ubuntu --tags deploy -v
ansible-playbook ${i}.yml -i "${deploy_host}," $extra_var_arg --user ubuntu --tags deploy
fi
done
fi
......
......@@ -29,11 +29,6 @@ if [[ -z "$BUILD_NUMBER" ]]; then
exit -1
fi
if [[ -z "$refs" ]]; then
echo "refs not specified."
exit -1
fi
if [[ -z "$deployment" ]]; then
echo "deployment not specified."
exit -1
......@@ -65,6 +60,10 @@ cd $WORKSPACE/configuration
configuration=`git rev-parse --short HEAD`
cd $WORKSPACE
cd $WORKSPACE/configuration-secure
configuration_secure=`git rev-parse --short HEAD`
cd $WORKSPACE
base_params=""
if [[ -n "$base_ami" ]]; then
base_params="-b $base_ami"
......@@ -75,9 +74,10 @@ if [[ "$use_blessed" == "true" ]]; then
blessed_params="--blessed"
fi
playbookdir_params=""
if [[ ! -z "$playbook_dir" ]]; then
playbookdir_params="--playbook-dir $playbook_dir"
if [[ -e "configuration/playbooks/edx-east/${play}.yml" ]]; then
playbookdir_params="--playbook-dir configuration/playbooks/edx-east"
else
playbookdir_params="--playbook-dir ansible-private"
fi
configurationprivate_params=""
......@@ -103,10 +103,7 @@ pip install -r requirements.txt
cd util/vpc-tools/
echo "$refs" > /var/tmp/$BUILD_ID-refs.yml
cat /var/tmp/$BUILD_ID-refs.yml
echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p $play -t c3.large -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $hipchat_params $cleanup_params
python -u abbey.py -p $play -t c3.large -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $hipchat_params $cleanup_params
#!/bin/bash
cd configuration
pip install -r requirements.txt
env
ip=`python playbooks/ec2.py | jq -r '."tag_Name_prod-edx-worker"[0] | strings'`
if [ "$report" = "true" ]; then
ssh ubuntu@$ip "cd /edx/app/edxapp/edx-platform && sudo -u www-data /edx/bin/python.edxapp ./manage.py lms gen_cert_report -c $course_id --settings aws"
else
ssh ubuntu@$ip "cd /edx/app/edxapp/edx-platform && sudo -u www-data /edx/bin/python.edxapp ./manage.py lms ungenerated_certs -c $course_id --settings aws"
if [ ! -z "$force_certificate_state" ]; then
ssh ubuntu@$ip "cd /edx/app/edxapp/edx-platform && sudo -u www-data /edx/bin/python.edxapp ./manage.py lms ungenerated_certs -c $course_id -f $force_certificate_state --settings aws"
fi
fi
#!/usr/bin/env bash
# A simple wrapper to add ssh keys from
# This assumes that you will be running on one or more servers
# that are tagged with Name: <environment>-<deployment>-<play>
if [[
-z $WORKSPACE ||
-z $environment_tag ||
-z $deployment_tag ||
-z $play ||
-z $first_in ||
-z $public_key ||
-z $serial_count
]]; then
echo "Environment incorrect for this wrapper script"
env
exit 1
fi
cd $WORKSPACE/configuration/playbooks/edx-east
export AWS_PROFILE=$deployment_tag
ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post"
if [[ ! -z "$extra_vars" ]]; then
ansible_extra_vars+=" -e $extra_vars"
fi
if [[ $check_mode == "true" ]]; then
ansible_extra_vars+=" --check"
fi
if [[ ! -z "$run_on_single_ip" ]]; then
ansible_limit+="$run_on_single_ip"
else
if [[ $first_in == "true" ]]; then
ansible_limit+="first_in_"
fi
ansible_limit+="tag_environment_${environment_tag}:&tag_deployment_${deployment_tag}"
fi
ansible_extra_vars+=" -e public_key=$public_key"
export PYTHONUNBUFFERED=1
env
ansible-playbook -v -D -u ubuntu $play -i ./ec2.py $ansible_task_tags --limit $ansible_limit -e@"$WORKSPACE/configuration-secure/ansible/vars/ubuntu-public-keys.yml" $ansible_extra_vars
rm -f $extra_vars_file
......@@ -24,7 +24,9 @@ cd $WORKSPACE/configuration/playbooks/edx-east
ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post"
if [ ! -z "$extra_vars" ]; then
ansible_extra_vars+=" -e $extra_vars"
for arg in $extra_vars; do
ansible_extra_vars+=" -e $arg"
done
fi
if [[ $run_migrations == "true" ]]; then
......@@ -48,6 +50,12 @@ if [[ ! -z "$task_tags" ]]; then
ansible_task_tags+="--tags $task_tags"
fi
if [[ -z "$ssh_user" ]]; then
ansible_ssh_user="ubuntu"
else
ansible_ssh_user="${ssh_user}"
fi
export PYTHONUNBUFFERED=1
env
ansible-playbook -v -D -u ubuntu $ansible_play -i ./ec2.py $ansible_task_tags --limit $ansible_limit -e@"$WORKSPACE/configuration-secure/ansible/vars/${deployment_tag}.yml" -e@"$WORKSPACE/configuration-secure/ansible/vars/${environment_tag}-${deployment_tag}.yml" $ansible_extra_vars
ansible-playbook -v -D -u $ansible_ssh_user $ansible_play -i ./ec2.py $ansible_task_tags --limit $ansible_limit -e@"$WORKSPACE/configuration-secure/ansible/vars/${deployment_tag}.yml" -e@"$WORKSPACE/configuration-secure/ansible/vars/${environment_tag}-${deployment_tag}.yml" $ansible_extra_vars
{
"variables": {
"aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
"aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}",
"github_oauth_token": "{{env `GITHUB_TOKEN`}}",
"playbook_remote_dir": "/tmp/packer-edx-playbooks",
"ami": "{{env `JENKINS_WORKER_AMI`}}"
},
"builders": [{
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1",
"source_ami": "{{user `ami`}}",
"instance_type": "m3.large",
"ssh_username": "ubuntu",
"ami_name": "jenkins_worker-{{isotime | clean_ami_name}}"
}],
"provisioners": [{
"type": "shell",
"inline": ["rm -rf {{user `playbook_remote_dir`}}"]
}, {
"type": "file",
"source": "../../../configuration/playbooks",
"destination": "{{user `playbook_remote_dir`}}"
}, {
"type": "file",
"source": "../../../configuration/requirements.txt",
"destination": "{{user `playbook_remote_dir`}}/requirements.txt"
}, {
"type": "shell",
"inline": ["cd {{user `playbook_remote_dir`}}",
"virtualenv packer-venv",
". packer-venv/bin/activate",
"pip install -q -r requirements.txt",
"echo '[jenkins_worker]' > inventory.ini",
"echo 'localhost' >> inventory.ini",
"ansible-playbook edx-east/jenkins_worker.yml -i inventory.ini -c local -e 'github_oauth_token={{user `github_oauth_token` }}' -vvvv"]
}, {
"type": "shell",
"inline": ["cd {{user `playbook_remote_dir`}}",
". packer-venv/bin/activate",
"ansible-playbook run_role.yml -i inventory.ini -c local -e role=test_build_server -vvvv"]
}]
}
......@@ -18,7 +18,7 @@ except ImportError:
from pprint import pprint
AMI_TIMEOUT = 600 # time to wait for AMIs to complete
AMI_TIMEOUT = 1800 # time to wait for AMIs to complete(30 minutes)
EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition
EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks
NUM_TASKS = 5 # number of tasks for time summary report
......@@ -76,19 +76,17 @@ def parse_args():
help="don't cleanup on failures")
parser.add_argument('--vars', metavar="EXTRA_VAR_FILE",
help="path to extra var file", required=False)
parser.add_argument('--refs', metavar="GIT_REFS_FILE",
help="path to a var file with app git refs", required=False)
parser.add_argument('--configuration-version', required=False,
help="configuration repo branch(no hashes)",
help="configuration repo gitref",
default="master")
parser.add_argument('--configuration-secure-version', required=False,
help="configuration-secure repo branch(no hashes)",
help="configuration-secure repo gitref",
default="master")
parser.add_argument('--configuration-secure-repo', required=False,
default="git@github.com:edx-ops/prod-secure",
help="repo to use for the secure files")
parser.add_argument('--configuration-private-version', required=False,
help="configuration-private repo branch(no hashes)",
help="configuration-private repo gitref",
default="master")
parser.add_argument('--configuration-private-repo', required=False,
default="git@github.com:edx-ops/ansible-private",
......@@ -119,6 +117,10 @@ def parse_args():
default=None,
help="The API ID of the Hipchat room to post"
"status messages to")
parser.add_argument("--ansible-hipchat-room-id", required=False,
default='Hammer',
help="The room used by the abbey instance for "
"printing verbose ansible run data.")
parser.add_argument("--hipchat-api-token", required=False,
default=None,
help="The API token for Hipchat integration")
......@@ -220,7 +222,7 @@ config_secure={config_secure}
git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name="{configuration_secure_repo_basename}"
git_repo_secure_name=$(basename $git_repo_secure .git)
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
secure_vars_file={secure_vars_file}
......@@ -287,18 +289,13 @@ cat << EOF >> $extra_vars
# of all the repositories
{extra_vars_yml}
{git_refs_yml}
# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true
# Use the build number an the dynamic cache key.
EDXAPP_UPDATE_STATIC_FILES_KEY: true
edxapp_dynamic_cache_key: {deployment}-{environment}-{play}-{cache_id}
disable_edx_services: true
COMMON_TAG_EC2_INSTANCE: true
# abbey should never take instances in
# and out of elbs
......@@ -353,12 +350,10 @@ rm -rf $base_dir
""".format(
hipchat_token=args.hipchat_api_token,
hipchat_room=args.hipchat_room_id,
hipchat_room=args.ansible_hipchat_room_id,
configuration_version=args.configuration_version,
configuration_secure_version=args.configuration_secure_version,
configuration_secure_repo=args.configuration_secure_repo,
configuration_secure_repo_basename=os.path.basename(
args.configuration_secure_repo),
configuration_private_version=args.configuration_private_version,
configuration_private_repo=args.configuration_private_repo,
environment=args.environment,
......@@ -369,7 +364,6 @@ rm -rf $base_dir
identity_contents=identity_contents,
queue_name=run_id,
extra_vars_yml=extra_vars_yml,
git_refs_yml=git_refs_yml,
secure_vars_file=secure_vars_file,
cache_id=args.cache_id)
......@@ -528,17 +522,20 @@ def create_ami(instance_id, name, description):
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("play", args.play)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_ref", args.configuration_version)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_secure_ref", args.configuration_secure_version)
conf_tag = "{} {}".format("http://github.com/edx/configuration", args.configuration_version)
img.add_tag("version:configuration", conf_tag)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_secure_repo", args.configuration_secure_repo)
conf_secure_tag = "{} {}".format(args.configuration_secure_repo, args.configuration_secure_version)
img.add_tag("version:configuration_secure", conf_secure_tag)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("cache_id", args.cache_id)
time.sleep(AWS_API_WAIT_TIME)
for repo, ref in git_refs.items():
key = "refs:{}".format(repo)
img.add_tag(key, ref)
# Get versions from the instance.
tags = ec2.get_all_tags(filters={'resource-id': instance_id})
for tag in tags:
if tag.name.startswith('version:'):
img.add_tag(tag.name, tag.value)
time.sleep(AWS_API_WAIT_TIME)
break
else:
......@@ -674,14 +671,6 @@ if __name__ == '__main__':
extra_vars_yml = ""
extra_vars = {}
if args.refs:
with open(args.refs) as f:
git_refs_yml = f.read()
git_refs = yaml.load(git_refs_yml)
else:
git_refs_yml = ""
git_refs = {}
if args.secure_vars_file:
# explicit path to a single
# secure var file
......
__author__ = 'e0d'
"""
Retrieves AWS Auto-scaling lifecycle messages from and SQS queue and processes them. For
the LifeCycleTransition type of autoscaling:EC2_INSTANCE_TERMINATING, ec2 instances are inspected
for an ok_to_retire tag. If that tag exists, the termination state transition is continued, if not, the
lifecycle timeout is extended.
Because the lifecycle commands are not yet available in boto, these commands are, unfortunately,
run via a subprocess call to the awscli. This should be fixed when boto is updated.
This script is meant to be run periodically via some process automation, say, Jenkins.
It relies on some component applying the proper tags and performing pre-retirement activities.
./sqs.py -q autoscaling-lifecycle-queue -b /home/you/.virtualenvs/aws/bin --hook MyLifeCycleHook
"""
import argparse
import boto
import json
import subprocess
from boto.sqs.message import RawMessage
import logging
class LifecycleHandler:
INSTANCE_TERMINATION = 'autoscaling:EC2_INSTANCE_TERMINATING'
TEST_NOTIFICATION = 'autoscaling:TEST_NOTIFICATION'
NUM_MESSAGES = 10
WAIT_TIME_SECONDS = 10
def __init__(self, profile, queue, hook, bin_directory, dry_run):
logging.basicConfig(level=logging.INFO)
self.profile = profile
self.queue = queue
self.hook = hook
self.bin_directory = bin_directory
self.dry_run = dry_run
self.ec2 = boto.connect_ec2(profile_name=self.profile)
def process_lifecycle_messages(self):
sqs_con = boto.connect_sqs()
queue = sqs_con.get_queue(self.queue)
# Needed to get unencoded message for ease of processing
queue.set_message_class(RawMessage)
for sqs_message in queue.get_messages(LifecycleHandler.NUM_MESSAGES,
wait_time_seconds=LifecycleHandler.WAIT_TIME_SECONDS):
body = json.loads(sqs_message.get_body_encoded())
as_message = json.loads(body['Message'])
logging.info("Proccessing message {message}.".format(message=as_message))
if 'LifecycleTransition' in as_message and as_message['LifecycleTransition'] \
== LifecycleHandler.INSTANCE_TERMINATION:
# Convenience vars, set here to avoid messages that don't meet the criteria in
# the if condition above.
instance_id = as_message['EC2InstanceId']
asg = as_message['AutoScalingGroupName']
token = as_message['LifecycleActionToken']
if self.verify_ok_to_retire(as_message['EC2InstanceId']):
logging.info("Host is marked as OK to retire, retiring {instance}".format(
instance=instance_id))
self.continue_lifecycle(asg,token,self.hook)
if not self.dry_run:
logging.info("Deleting message with body {message}".format(message=as_message))
sqs_con.delete_message(queue,sqs_message)
else:
logging.info("Would have deleted message with body {message}".format(message=as_message))
else:
logging.info("Recording lifecycle heartbeat for instance {instance}".format(
instance=instance_id))
self.record_lifecycle_action_heartbeat(asg, token,self.hook)
# These notifications are send when configuring a new lifecycle hook, they can be
# deleted safely
elif as_message['Event'] == LifecycleHandler.TEST_NOTIFICATION:
if not self.dry_run:
logging.info("Deleting message with body {message}".format(message=as_message))
sqs_con.delete_message(queue,sqs_message)
else:
logging.info("Would have deleted message with body {message}".format(message=as_message))
else:
raise NotImplemented("Encountered message, {message_id}, of unexpected type.".format(
message_id=as_message['MessageId']))
def record_lifecycle_action_heartbeat(self, asg, token, hook):
command = "{path}/python " \
"{path}/aws " \
"autoscaling record-lifecycle-action-heartbeat " \
"--lifecycle-hook-name {hook} " \
"--auto-scaling-group-name {asg} " \
"--lifecycle-action-token {token}".format(
path=self.bin_directory,hook=hook,asg=asg,token=token)
self.run_subprocess_command(command, self.dry_run)
def continue_lifecycle(self, asg, token, hook):
command = "{path}/python " \
"{path}/aws autoscaling complete-lifecycle-action --lifecycle-hook-name {hook} " \
"--auto-scaling-group-name {asg} --lifecycle-action-token {token} --lifecycle-action-result " \
"CONTINUE".format(
path=self.bin_directory, hook=hook, asg=asg, token=token)
self.run_subprocess_command(command, self.dry_run)
def run_subprocess_command(self, command, dry_run):
logging.info("Running command {command}.".format(command=command))
if not dry_run:
try:
output = subprocess.check_output(command.split(' '))
logging.info("Output was {output}".format(output=output))
except Exception as e:
logging.exception(e)
raise e
def get_ec2_instance_by_id(self, instance_id):
"""
Simple boto call to get the instance based on the instance-id
"""
instances = self.ec2.get_only_instances([instance_id])
if len(instances) == 1:
return self.ec2.get_only_instances([instance_id])[0]
else:
return None
def verify_ok_to_retire(self, instance_id):
"""
Ensure that the ok_to_retire tag has been added to the instance in question
with the value 'true'
"""
instance = self.get_ec2_instance_by_id(instance_id)
if instance:
if 'safe_to_retire' in instance.tags and instance.tags['safe_to_retire'].lower() == 'true':
logging.info("Instance with id {id} is safe to retire.".format(id=instance_id))
return True
else:
logging.info("Instance with id {id} is not safe to retire.".format(id=instance_id))
return False
else:
# No instance for id in SQS message this can happen if something else
# has terminated the instances outside of this workflow
logging.warn("Instance with id {id} is referenced in an SQS message, but does not exist.")
return True
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile',
help='The boto profile to use '
'per line.',default=None)
parser.add_argument('-b', '--bin', required=True,
help='The bin directory of the virtual env '
'from which tor run the AWS cli')
parser.add_argument('-q', '--queue', required=True,
help="The SQS queue containing the lifecyle messages")
parser.add_argument('--hook', required=True,
help="The lifecyle hook to act upon.")
parser.add_argument('-d', "--dry-run", dest="dry_run", action="store_true",
help='Print the commands, but do not do anything')
parser.set_defaults(dry_run=False)
args = parser.parse_args()
lh = LifecycleHandler(args.profile, args.queue, args.hook, args.bin, args.dry_run)
lh.process_lifecycle_messages()
#!/usr/bin/env python
#!/usr/bin/env python -u
import boto
import boto.route53
import boto.route53.record
......@@ -37,25 +37,6 @@ RDS_SIZES = [
'db.m2.4xlarg',
]
# These are the groups for the different
# stack names that will be assigned once
# the corresponding db is cloned
SG_GROUPS = {
'stage-edx': 'sg-d2f623b7',
}
# This group must already be created
# and allows for full access to port
# 3306 from within the vpc.
# This group is assigned temporarily
# for cleaning the db
SG_GROUPS_FULL = {
'stage-edx': 'sg-0abf396f',
}
def parse_args(args=sys.argv[1:]):
stack_names = all_stack_names()
......@@ -64,9 +45,12 @@ def parse_args(args=sys.argv[1:]):
for db in rds.describe_db_instances()['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']]
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-s', '--stack-name', choices=stack_names,
default=None,
help='Stack name for where you want this RDS instance launched')
parser.add_argument('--vpc', default=None, action="store_true",
help='this is for a vpc')
parser.add_argument('--security-group', default=None,
help='security group name that should be assigned to the new RDS instance (vpc only!)')
parser.add_argument('--subnet', default=None,
help='subnet that should be used for the RDS instance (vpc only!)')
parser.add_argument('-t', '--type', choices=RDS_SIZES,
default='db.m1.small', help='RDS size to create instances of')
parser.add_argument('-d', '--db-source', choices=dbs,
......@@ -86,8 +70,8 @@ def parse_args(args=sys.argv[1:]):
parser.add_argument('--dump', action="store_true",
default=False,
help="create a sql dump after launching it into the vpc")
parser.add_argument('--secret-var-file',
help="using a secret var file run ansible against the host to update db users")
parser.add_argument('-s', '--secret-var-files', action="append", required=True,
help="use one or more secret var files to run ansible against the host to update db users")
return parser.parse_args(args)
......@@ -99,10 +83,11 @@ def wait_on_db_status(db_name, region='us-east-1', wait_on='available', aws_id=N
if len(statuses) > 1:
raise Exception("More than one instance returned for {0}".format(db_name))
if statuses[0]['DBInstanceStatus'] == wait_on:
print("Status is: {}".format(wait_on))
break
sys.stdout.write(".")
sys.stdout.write("status is {}..\n".format(statuses[0]['DBInstanceStatus']))
sys.stdout.flush()
time.sleep(2)
time.sleep(10)
return
if __name__ == '__main__':
......@@ -119,22 +104,21 @@ if __name__ == '__main__':
use_latest_restorable_time=True,
db_instance_class=args.type,
)
if args.stack_name:
subnet_name = rds_subnet_group_name_for_stack_name(args.stack_name)
restore_args['db_subnet_group_name'] = subnet_name
if args.vpc:
restore_args['db_subnet_group_name'] = args.subnet
rds.restore_db_instance_to_point_in_time(**restore_args)
wait_on_db_status(restore_dbid)
print("Getting db host")
db_host = rds.describe_db_instances(restore_dbid)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['Endpoint']['Address']
if args.password or args.stack_name:
modify_args = dict(
apply_immediately=True
)
if args.password:
modify_args['master_user_password'] = args.password
if args.stack_name:
modify_args['vpc_security_group_ids'] = [SG_GROUPS[args.stack_name], SG_GROUPS_FULL[args.stack_name]]
if args.vpc:
modify_args['vpc_security_group_ids'] = [args.security_group]
else:
# dev-edx is the default security group for dbs that
# are not in the vpc, it allows connections from the various
......@@ -142,8 +126,11 @@ if __name__ == '__main__':
modify_args['db_security_groups'] = ['dev-edx']
# Update the db immediately
print("Updating db instance: {}".format(modify_args))
rds.modify_db_instance(restore_dbid, **modify_args)
print("Waiting 15 seconds before checking to see if db is available")
time.sleep(15)
wait_on_db_status(restore_dbid)
if args.clean_wwc:
# Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} wwc < {sanitize_wwc_sql_file} """.format(
......@@ -162,12 +149,17 @@ if __name__ == '__main__':
print("Running {}".format(sanitize_cmd))
os.system(sanitize_cmd)
if args.secret_var_file:
db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, update_edxapp_db_users.yml """ \
"""-e @{secret_var_file} -e "edxapp_db_root_user=root edxapp_db_root_pass={root_pass} """ \
if args.secret_var_files:
extra_args = ""
for secret_var_file in args.secret_var_files:
extra_args += " -e@{}".format(secret_var_file)
db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, create_dbs.yml """ \
"""{extra_args} -e "edxapp_db_root_user=root xqueue_db_root_user=root" """ \
""" -e "db_root_pass={root_pass}" """ \
"""EDXAPP_MYSQL_HOST={db_host}" """.format(
root_pass=args.password,
secret_var_file=args.secret_var_file,
extra_args=extra_args,
db_host=db_host,
play_path=play_path)
print("Running {}".format(db_cmd))
......@@ -181,6 +173,3 @@ if __name__ == '__main__':
db_host=db_host)
print("Running {}".format(dns_cmd))
os.system(dns_cmd)
if args.stack_name:
rds.modify_db_instance(restore_dbid, vpc_security_group_ids=[SG_GROUPS[args.stack_name]])
......@@ -31,7 +31,7 @@ BASTION_CONFIG = """Host {jump_box}
HOST_CONFIG = """# Instance ID: {instance_id}
Host {name}
ProxyCommand ssh {config_file} -W %h:%p {jump_box}
ProxyCommand ssh -q {config_file} -W %h:%p {jump_box}
HostName {ip}
ForwardAgent yes
User {user}
......
......@@ -5,7 +5,7 @@ end
VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
MEMORY = 4096
CPU_COUNT = 2
edx_platform_mount_dir = "edx-platform"
......@@ -50,8 +50,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, owner: "forum", group: "www-data"
if ENV['ENABLE_LEGACY_ORA']
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, owner: "ora", group: "www-data"
end
else
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, nfs: true
......@@ -59,9 +62,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
create: true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, nfs: true
if ENV['ENABLE_LEGACY_ORA']
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, nfs: true
end
end
config.vm.provider :virtualbox do |vb|
......@@ -95,5 +101,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.provision :ansible do |ansible|
ansible.playbook = "../../../playbooks/vagrant-devstack.yml"
ansible.verbose = "vvvv"
if ENV['ENABLE_LEGACY_ORA']
ansible.extra_vars = { ENABLE_LEGACY_ORA: true }
end
end
end
......@@ -2,7 +2,7 @@ Vagrant.require_version ">= 1.5.3"
VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
MEMORY = 4096
CPU_COUNT = 2
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
......
......@@ -43,8 +43,8 @@ end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX devstack VM from an official release
config.vm.box = "johnnycake-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140625-johnnycake-devstack.box"
config.vm.box = "kifli-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140826-kifli-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
......@@ -92,8 +92,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
override.vm.box = "johnnycake-devstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140630-johnnycake-devstack-vmware.box"
override.vm.box = "kifli-devstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140829-kifli-devstack-vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
......
......@@ -8,8 +8,8 @@ CPU_COUNT = 2
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX fullstack VM from an official release
config.vm.box = "johnnycake-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140625-johnnycake-fullstack.box"
config.vm.box = "kifli-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140826-kifli-fullstack.box"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = true
......@@ -28,8 +28,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
override.vm.box = "johnnycake-fullstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140630-johnnycake-fullstack-vmware.box"
override.vm.box = "kifli-fullstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140829-kifli-fullstack-vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment