Commit 29f6483e by Jason Zhu

Merge pull request #2 from edx/master

up to date
parents 1e9b3ebe 5a9076ee
......@@ -3,7 +3,7 @@ language: python
python:
- "2.7"
install:
- "sudo apt-get install -y npm python-demjson"
- "sudo apt-get install -y nodejs python-demjson"
- "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson"
script:
......
......@@ -32,3 +32,4 @@ Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com>
Ben Patterson <bpatterson@edx.org>
- Role: analytics-api
- Added a new role for the analytics-api Django app. Currently a private repo
- Logrotation now happens hourly by default for all logs.
- Role: xqwatcher, xqueue, nginx, edxapp, common
- Moving nginx basic authorization flag and credentials to the common role
- Basic auth will be turned on by default
- Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write
......@@ -9,6 +15,6 @@
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Update `CMS_HOSTNAME` default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-analytics_api-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the Analytics API
hosts: localhost
connection: local
gather_facts: False
tasks:
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create instance security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# Needs ansible 1.7 for vpc support of elbs
# - name: Create elb security group
# ec2_group:
# profile: "{{ profile }}"
# description: "ELB security group"
# name: "ELB-{{ security_group }}"
# vpc_id: "{{ vpc_id }}"
# region: "{{ ec2_region }}"
# rules:
# - proto: tcp
# from_port: "443"
# to_port: "443"
# cidr_ip: "0.0.0.0/0"
# register: created_elb_sec_group
# ignore_errors: True
# Needs 1.7 for VPC support
# - name: "Create ELB"
# ec2_elb_lb:
# profile: "{{ profile }}"
# region: "{{ ec2_region }}"
# zones:
# - us-east-1b
# - us-east-1c
# name: "{{ edp }}"
# state: present
# security_group_ids: "{{ created_elb_sec_group.group_id }}"
# listeners:
# - protocol: https
# load_balancer_port: 443
# instance_protocol: http # optional, defaults to value of protocol setting
# instance_port: 80
# # ssl certificate required for https or ssl
# ssl_certificate_id: "{{ ssl_cert }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
load_balancers: "{{ elb_name }}"
availability_zones:
- us-east-1b
- us-east-1c
min_size: 0
max_size: 2
desired_capacity: 1
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
......@@ -2865,15 +2865,13 @@
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
},
"PropagateAtLaunch":true
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
}
],
"UserData": { "Fn::Base64" : { "Fn::Join" : ["", [
......
{
"AWSTemplateFormatVersion":"2010-09-09",
"Description":"Separate VPC for database clones and replicas.",
"Parameters":{
"EnvironmentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which environment to use during the configuration phase, e.g., stage, prod, sandbox",
"Default":"prod"
},
"DeploymentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, <university>, <org>",
"Default":"edx"
},
"KeyName":{
"Type":"String",
"Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server",
"Default":"deployment-201407"
},
"ClassB":{
"Default":"1",
"Description":"The second octet of the Class B to be allocated for this VPC. 10.?.xxx.xxx",
"Type":"Number",
"MinValue":"0",
"MaxValue":"255",
"ConstraintDescription":"ClassB value must be between 0 and 255."
}
},
"Mappings":{
"SubnetConfig":{
"VPC": { "CIDR":".0.0/16" },
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
"us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" },
"us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" },
"eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" },
"sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" },
"ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" },
"ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" },
"ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" }
}
},
"Resources":{
"EdxVPC":{
"Type":"AWS::EC2::VPC",
"Properties":{
"EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"InstanceTenancy":"default"
}
},
"Data01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-sanitized-dbs"
}
]
}
},
"Data02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-non-sanitized-clones"
}
]
}
},
"PrivateRouteTable":{
"Type":"AWS::EC2::RouteTable",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"PrivateSubnetRouteTableAssociationData01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationData02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateNetworkAcl":{
"Type":"AWS::EC2::NetworkAcl",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"InboundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"OutBoundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"PrivateSubnetNetworkAclAssociationData01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationData02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up access to the data subnet",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"3306",
"ToPort":"3306",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":"0.0.0.0/0"
}
]
}
},
"EdxDBSubnetGroup":{
"Type":"AWS::RDS::DBSubnetGroup",
"Properties":{
"DBSubnetGroupDescription":"Subnets available for the RDS DB Instance",
"SubnetIds":[
{
"Ref":"Data01"
},
{
"Ref":"Data02"
}
]
}
},
"DBSecurityGroup":{
"Type":"AWS::RDS::DBSecurityGroup",
"Properties":{
"EC2VpcId":{
"Ref":"EdxVPC"
},
"GroupDescription":"Data access"
}
}
}
}
......@@ -225,12 +225,16 @@ class Ec2Inventory(object):
cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if 'AWS_PROFILE' in os.environ:
aws_profile = "{}-".format(os.environ.get('AWS_PROFILE'))
else:
aws_profile = ""
self.cache_path_cache = cache_path + "/{}ansible-ec2.cache".format(aws_profile)
self.cache_path_tags = cache_path + "/{}ansible-ec2.tags.cache".format(aws_profile)
self.cache_path_index = cache_path + "/{}ansible-ec2.index".format(aws_profile)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
......
# A simple utility play to add a public key to the authorized key
# file for the ubuntu user.
# You must pass in the entire line that you are adding.
# Example: ansible-playbook add-ubuntu-key.yml -c local -i 127.0.0.1, \
# -e "public_key=deployment-201407" \
# -e owner=jarv -e keyfile=/home/jarv/.ssh/authorized_keys
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- name: Deploy aide IDS
hosts: all
sudo: True
gather_facts: True
roles:
- aide
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- name: Deploy Analytics API
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- role: nginx
nginx_sites:
- analytics-api
- aws
- analytics-api
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: all
sudo: True
serial: 1
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- shell: echo "rabbit@ip-{{ item|replace('.', '-') }}"
when: item != ansible_ec2_local_ipv4
with_items: hostvars.keys()
register: list
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster {{ item.stdout }}
when: item.stdout is defined
with_items: list.results
- command: rabbitmqctl start_app
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -22,25 +22,24 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: nginx
nginx_sites:
- xqueue
- xqueue
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
......@@ -51,6 +50,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......
......@@ -4,91 +4,6 @@
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
......@@ -108,6 +23,17 @@
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
- "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}"
- "EDXAPP_MONGO_PASS: {{ EDXAPP_MONGO_PASS }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
......
#
# Requires MySQL-python be installed for system python
# This play will create databases and user for an application.
# It can be run like so:
#
# ansible-playbook -i 'localhost,' create_analytics_reports_dbs.yml -e@./db.yml
#
# where the content of dbs.yml contains the following dictionaries
#
# database_connection: &default_connection
# login_host: "mysql.example.org"
# login_user: "root"
# login_password: "super-secure-password"
# DEFAULT_ENCODING: "utf8"
# databases:
# reports:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# application:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# database_users:
# migrate:
# state: "present"
# password: "user-with-ddl-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# <<: *default_connection
# runtime:
# state: "present"
# password: "user-with-dml-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE"
# <<: *default_connection
- name: Create databases and users
hosts: all
connection: local
gather_facts: False
tasks:
# Install required library, currently this needs to be available
# to system python.
- name: install python mysqldb module
pip: name={{item}} state=present
sudo: yes
with_items:
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.key }}
state={{ item.value.state }}
encoding={{ item.value.encoding }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
with_dict: databases
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.key }}"
priv="{{ '/'.join(item.value.privileges) }}"
password="{{ item.value.password }}"
host={{ item.value.host }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
append_privs=yes
with_dict: database_users
......@@ -16,14 +16,16 @@
- xqueue
- xserver
- certs
- analytics-api
nginx_default_sites:
- lms
- edxlocal
- role: edxlocal
tags: edxlocal
- mongo
- { role: 'edxapp', celery_worker: True }
- edxapp
- role: demo
tags: ['demo']
tags: demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- oraclejdk
- elasticsearch
......@@ -33,6 +35,7 @@
- ora
- certs
- edx_ansible
- analytics-api
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
......
......@@ -4,12 +4,12 @@
gather_facts: False
vars:
keypair: continuous-integration
instance_type: m1.medium
security_group: sandbox
instance_type: t2.medium
security_group: sandbox-vpc
# ubuntu 12.04
ami: ami-d0f89fb9
ami: ami-f478849c
region: us-east-1
zone: us-east-1b
zone: us-east-1c
instance_tags:
environment: sandbox
github_username: temp
......@@ -21,6 +21,7 @@
dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp
elb: false
vpc_subnet_id: subnet-cd867aba
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
......@@ -33,6 +34,8 @@
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
terminate_instance: true
instance_profile_name: sandbox
......
......@@ -9,7 +9,7 @@
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }}
python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
......@@ -22,6 +22,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -38,6 +39,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# Configure an admin instance with jenkins and asgard.
# Configure an instance with the admin jenkins.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- alton
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# Configure a Jenkins master instance
# Configure a Jenkins master instance for testeng
# This has the Jenkins Java app, but none of the requirements
# to run the tests.
......@@ -8,6 +8,9 @@
gather_facts: True
vars:
COMMON_DATA_DIR: "/mnt"
COMMON_ENABLE_DATADOG: True
roles:
- common
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_master
- name: Deploy MongoDB
hosts: all
sudo: True
gather_facts: True
roles:
- mongo
- mongo_mms
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -29,6 +29,8 @@
notify:
- "restart edxapp"
- "restart workers"
tags:
- deploy
- name: syncdb and migrate
shell: >
......@@ -40,6 +42,8 @@
notify:
- "restart edxapp"
- "restart workers"
tags:
- deploy
handlers:
- name: restart edxapp
......
......@@ -24,6 +24,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -39,6 +40,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# A simple utility play to remove a public key from the authorized key
# file for the ubuntu user
# You must pass in the entire line that you are adding
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
when: line_count.stdout|int != 1
- name: Deploy snort IDS
hosts: all
sudo: True
gather_facts: True
roles:
- snort
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -19,6 +19,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
......@@ -33,6 +34,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@/path/to/secure/ansible/vars/edx_admin.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_play_bastion
sudo: True
gather_facts: False
roles:
- aws
# ansible-playbook -vvv -c ssh -i admin_url, vpc_admin.yml -e "@path_to_common_overrides" -e "@path_to_deployment_specific_overrides"
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_play_admin
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- edx_ansible
- jenkins_admin
- hotg
- newrelic
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- alton
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -21,6 +21,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
roles:
......@@ -45,6 +46,7 @@
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
......@@ -25,6 +25,7 @@ ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null
ALTON_AWS_CREDENTIALS: !!null
# Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null
......@@ -55,6 +56,7 @@ alton_environment:
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
BOTO_CONFIG: "{{ alton_app_dir }}/.boto"
#
# OS packages
......
- name: configure the boto profiles for alton
template: >
src="boto.j2"
dest="{{ alton_app_dir }}/.boto"
owner="{{ alton_user }}"
group="{{ common_web_user }}"
mode="0640"
notify: restart alton
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
register: alton_checkout
notify: restart alton
- name: install the requirements
......@@ -55,3 +65,5 @@
state=started
when: not disable_edx_services
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
tags:
- deploy
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:alton" : "{{ alton_source_repo }} {{ alton_checkout.after |truncate(7,True,'')}}"
when: alton_checkout.after is defined
tags:
- deploy
{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role analytics-api
#
ANALYTICS_API_GIT_IDENTITY: !!null
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
ANALYTICS_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api"
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_VERSION: "master"
# Default dummy user, override this!!
ANALYTICS_API_USERS:
"dummy-api-user": "changeme"
ANALYTICS_API_CONFIG:
ANALYTICS_DATABASE: 'reports'
SECRET_KEY: 'Your secret key here'
TIME_ZONE: 'America/New_York'
LANGUAGE_CODE: 'en-us'
# email config
EMAIL_HOST: 'smtp.example.com'
EMAIL_HOST_PASSWORD: ""
EMAIL_HOST_USER: ""
EMAIL_PORT: 587
API_AUTH_TOKEN: 'put-your-api-token-here'
STATICFILES_DIRS: []
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
ANALYTICS_API_GUNICORN_WORKERS: "2"
#
# vars are namespace with the module name.
#
analytics_api_environment:
DJANGO_SETTINGS_MODULE: "analyticsdataserver.settings.production"
ANALYTICS_API_CFG: "{{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml"
analytics_api_role_name: "analytics-api"
analytics_api_service_name: "analytics-api"
analytics_api_user: "analytics-api"
analytics_api_app_dir: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_home: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_venv_base: "{{ analytics_api_home }}/venvs"
analytics_api_venv_dir: "{{ analytics_api_venv_base }}/{{ analytics_api_service_name }}"
analytics_api_venv_bin: "{{ analytics_api_venv_dir }}/bin"
analytics_api_code_dir: "{{ analytics_api_app_dir }}/edx-analytics-data-api"
analytics_api_conf_dir: "{{ analytics_api_home }}"
analytics_api_gunicorn_host: "127.0.0.1"
analytics_api_gunicorn_port: "8100"
analytics_api_gunicorn_timeout: "300"
analytics_api_django_settings: "production"
analytics_api_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/edx-analytics-data-api"
analytics_api_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ analytics_api_git_identity_file }}"
analytics_api_git_identity_file: "{{ analytics_api_app_dir }}/git-identity"
analytics_api_log_dir: "{{ COMMON_LOG_DIR }}/{{ analytics_api_service_name }}"
analytics_api_requirements_base: "{{ analytics_api_code_dir }}/requirements"
analytics_api_requirements:
- base.txt
- production.txt
- optional.txt
#
# OS packages
#
analytics_api_debian_pkgs:
- 'libmysqlclient-dev'
analytics_api_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics-api
#
# Overview:
#
#
- name: "restart the analytics service"
supervisorctl_local: >
name={{ analytics_api_service_name }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Role includes for role analytics-api
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- role: edx_service
edx_role_name: "{{ analytics_api_role_name }}"
edx_service_name: "{{ analytics_api_service_name }}"
- supervisor
---
- name: install read-only ssh key
copy: >
content="{{ ANALYTICS_API_GIT_IDENTITY }}" dest={{ analytics_api_git_identity_file }}
owner={{ analytics_api_user }} group={{ analytics_api_user }} mode=0600
- name: setup the analytics-api env file
template: >
src="edx/app/analytics-api/analytics_api_env.j2"
dest="{{ analytics_api_app_dir }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
- name: checkout code
git: >
dest={{ analytics_api_code_dir }} repo={{ analytics_api_source_repo }} version={{ ANALYTICS_API_VERSION }}
accept_hostkey=yes
ssh_opts="{{ analytics_api_git_ssh_opts }}"
register: analytics_api_code_checkout
notify: "restart the analytics service"
sudo_user: "{{ analytics_api_user }}"
- name: write out app config file
template: >
src=edx/app/analytics-api/analytics-api.yaml.j2
dest={{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml
mode=0644 owner={{ analytics_api_user }} group={{ analytics_api_user }}
notify: restart the analytics service
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_venv_dir }}" state=present
sudo_user: "{{ analytics_api_user }}"
notify: restart the analytics service
with_items: analytics_api_requirements
- name: syncdb and migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ analytics_api_venv_bin }}/python ./manage.py syncdb --migrate --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py collectstatic --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }}
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
- name: write out the supervisior wrapper
template: >
src=edx/app/analytics-api/analytics-api.sh.j2
dest={{ analytics_api_app_dir }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
notify: restart the analytics service
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics-api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: restart the analytics service
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
notify: restart the analytics service
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
when: not disable_edx_services
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_venv_bin }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- python
- pip
- django-admin.py
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- manage.py
- name: remove read-only ssh key for the content repo
file: path={{ analytics_api_git_identity_file }} state=absent
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role analytics-api
#
# Overview:
#
# Install the Analytics Data API server, a python
# django application that runs under gunicorn
#
# Dependencies:
#
# Example play:
# - name: Deploy Analytics API
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# ENABLE_DATADOG: False
# ENABLE_SPLUNKFORWARDER: False
# ENABLE_NEWRELIC: False
# roles:
# - aws
# - analytics-api
#
# ansible-playbook -i 'api.example.com,' ./analyticsapi.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- fail: msg="You must provide an private key for the analytics repo"
when: not ANALYTICS_API_GIT_IDENTITY
- include: deploy.yml tags=deploy
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:analytics_api" : "{{ analytics_api_source_repo }} {{ analytics_api_code_checkout.after |truncate(7,True,'')}}"
when: analytics_api_code_checkout.after is defined
#!/usr/bin/env bash
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = analytics_api_venv_bin + '/newrelic-admin run-program ' + analytics_api_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = analytics_api_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC %}
export NEW_RELIC_APP_NAME="{{ ANALYTICS_API_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
---
# {{ ansible_managed }}
{{ ANALYTICS_API_CONFIG | to_nice_yaml }}
# {{ ansible_managed }}
{% for name,value in analytics_api_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
# {{ ansible_managed }}
[program:{{ analytics_api_service_name }}]
command={{ analytics_api_app_dir }}/analytics-api.sh
user={{ common_web_user }}
directory={{ analytics_api_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
......@@ -18,7 +18,7 @@ AS_DB_ANALYTICS_HOST: 'localhost'
AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
AS_WORKERS: '2'
# add public keys to enable the automator user
# for running manage.py commands
......
......@@ -18,7 +18,7 @@ ANALYTICS_DB_ANALYTICS_HOST: 'localhost'
ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '4'
ANALYTICS_WORKERS: '2'
DATABASES:
default: &databases_default
......
---
apache_port: 80
apache_ports:
- 80
apache_sites:
- lms
apache_template_dir: '.'
---
- name: restart apache
service: name=apache2 state=restarted
tags: deploy
---
dependencies:
- common
# Requires nginx package
---
- name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2"
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
- name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
#Installs apache and runs the lms wsgi
# Installs apache and runs the lms wsgi by default
---
- name: Installs apache and mod_wsgi from apt
......@@ -7,30 +7,29 @@
- apache2
- libapache2-mod-wsgi
notify: restart apache
tags:
- apache
- install
- name: disables default site
command: a2dissite 000-default
notify: restart apache
tags:
- apache
- install
- name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: restart apache
tags:
- apache
- install
- name: Register the fact that apache role has run
command: echo True
register: apache_role_run
tags:
- apache
- install
- debug: msg={{ apache_sites }}
- name: Copying apache configs for {{ apache_sites }}
template: >
src={{ apache_template_dir }}/{{ item }}.j2
dest=/etc/apache2/sites-available/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: restart apache
with_items: apache_sites
- include: apache_site.yml state=link site_name=lms
- name: Creating apache2 config links for {{ apache_sites }}
file: >
src=/etc/apache2/sites-available/{{ item }}
dest=/etc/apache2/sites-enabled/{{ item }}
state=link owner=root group=root
notify: restart apache
with_items: apache_sites
NameVirtualHost *:{{ apache_port }}
Listen {{ apache_port }}
{%- for port in apache_ports -%}
NameVirtualHost *:{{ port }}
Listen {{ port }}
{% endfor %}
......@@ -56,7 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
- boto==2.20.1
- boto==2.29.1
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
......
......@@ -52,6 +52,7 @@
sudo_user: "{{ certs_user }}"
environment:
GIT_SSH: "{{ certs_git_ssh }}"
register: certs_checkout
notify: restart certs
- name: remove read-only ssh key for the certs repo
......@@ -96,4 +97,7 @@
- python
- pip
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: certs_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:certs" : "{{ CERT_REPO }} {{ certs_checkout.after|truncate(7,True,'') }}"
when: certs_checkout.after is defined
......@@ -4,7 +4,7 @@
# where edX is installed
# Set global htpasswd credentials
COMMON_ENABLE_BASIC_AUTH: True
COMMON_ENABLE_BASIC_AUTH: False
COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx
......@@ -44,17 +44,18 @@ COMMON_SSH_PASSWORD_AUTH: "no"
# the migrate user is granted table alter privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: !!null
COMMON_MYSQL_READ_ONLY_PASS: 'password'
COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: !!null
COMMON_MYSQL_ADMIN_PASS: 'password'
COMMON_MYSQL_MIGRATE_USER: 'migrate'
COMMON_MYSQL_MIGRATE_PASS: !!null
COMMON_MYSQL_MIGRATE_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
COMMON_TAG_EC2_INSTANCE: False
common_debian_pkgs:
- ntp
- ack-grep
......
......@@ -60,17 +60,17 @@
owner=root group=root mode=644
notify: restart rsyslogd
- name: Install logrotate configuration for edX
template: >
dest=/etc/logrotate.d/edx-services
src=etc/logrotate.d/edx_logrotate.j2
owner=root group=root mode=644
# This is in common to keep all logrotation config
# in the same role
- name: Create hourly subdirectory in logrotate.d
file: path=/etc/logrotate.d/hourly state=directory
- name: Install logrotate configuration for edX
template: >
dest=/etc/logrotate.d/hourly/edx-services
src=etc/logrotate.d/hourly/edx_logrotate.j2
owner=root group=root mode=644
- name: Install logrotate configuration for tracking file
template: >
dest=/etc/logrotate.d/hourly/tracking.log
......@@ -83,10 +83,6 @@
src=etc/cron.hourly/logrotate
owner=root group=root mode=555
# This can be removed after new release of edX
- name: Remove old tracking.log config from /etc/logrotate.d
file: path=/etc/logrotate.d/tracking.log state=absent
- name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME
......
*******************************************************************
* *
* _ __ __ *
* _ _| |\ \/ / *
* / -_) _` | > < *
* \___\__,_|/_/\_\ *
......
......@@ -4,9 +4,13 @@
copytruncate
delaycompress
dateext
dateformat -%Y%m%d-%s
missingok
notifempty
daily
rotate 90
size 1M
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
}
......@@ -40,5 +40,5 @@ edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
# edX configuration repo
configuration_version: master
configuration_version: release
edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml"
......@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run
-h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code edx-analytics-data-api
<version> - can be a commit or tag
EO
......@@ -49,6 +49,8 @@ repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2'
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'"
if [[ -z $1 || -z $2 ]]; then
......
......@@ -10,7 +10,7 @@
#
#
# Tasks for role edx_service
#
#
# Overview:
#
# This role performs the repetive tasks that most edX roles
......@@ -43,14 +43,35 @@
with_items:
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/data"
- name: create edx_service data and staticfiles dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_service_name }}"
group="{{ common_web_group }}"
with_items:
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/data"
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/staticfiles"
- name: create edx_service log dir
file: >
path="{{ item }}"
state=directory
owner="syslog"
group="syslog"
with_items:
- "{{ COMMON_LOG_DIR }}/{{ edx_service_name }}"
# Replace dashes with underscores to support roles that use
# dashes (the role vars will contain underscores)
- name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_debian_pkgs"
with_items: "{{ edx_service_name.replace('-', '_') }}_debian_pkgs"
when: ansible_distribution in common_debian_variants
- name: install a bunch of system packages on which edx_service relies
yum: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants
\ No newline at end of file
with_items: "{{ edx_service_name.replace('-', '_') }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants
......@@ -29,12 +29,14 @@
# Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}}
git: >
dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
dest={{edxapp_code_dir}}
repo={{edx_platform_repo}}
version={{edx_platform_version}}
accept_hostkey=yes
register: chkout
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_platform_checkout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -48,12 +50,15 @@
- name: checkout theme
git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}}
repo={{edxapp_theme_source_repo}}
version={{edxapp_theme_version}}
accept_hostkey=yes
when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_theme_checkout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -183,6 +188,25 @@
- "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the python custom requirements into {{ edxapp_venv_dir }}
- stat: path="{{ custom_requirements_file }}"
register: custom_requirements
sudo_user: "{{ edxapp_user }}"
- name : install python custom-requirements
pip: >
requirements="{{ custom_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
when: custom_requirements.stat.exists and new.stat.md5 != inst.stat.md5
# Install the final python modules into {{ edxapp_venv_dir }}
- name : install python post-post requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
......@@ -248,33 +272,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
......@@ -355,6 +352,32 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=30
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url:
url="{{ s3_one_time_url.url }}"
dest="{{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}"
mode=0600
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf {{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
# creates the supervisor jobs for the
# service variants configured, runs
# gather_assets and db migrations
......@@ -363,6 +386,10 @@
- service_variant_config
- deploy
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
# the services if any of the configurations
......@@ -417,4 +444,7 @@
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: edxapp_installed=true
......@@ -47,8 +47,12 @@
- "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}"
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ edxapp_chrislea_ppa }}"
- name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present update_cache=yes
notify:
- "restart edxapp"
- "restart edxapp_workers"
......
......@@ -75,24 +75,6 @@
when: celery_worker is defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
# Fake syncdb with migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to.
#
# This is run in cases where you want to test to see if migrations
# work without actually runnning them (when creating AMIs for example).
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
shell: >
......@@ -107,54 +89,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the default migrate user is not set,
# in this case use the EDXAPP_MYSQL_USER_MIGRATE user to run migrations
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and not COMMON_MYSQL_MIGRATE_PASS
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Fake migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_only is defined and migrate_only|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Regular migrations
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Gather assets using rake if possible
- name: gather {{ item }} static assets with rake
......
---
- name: get instance information
action: ec2_facts
- name: tag instance with edx_platform version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edx_platform" : "{{ edx_platform_repo }} {{ edxapp_platform_checkout.after|truncate(7,True,'') }}"
when: edxapp_platform_checkout.after is defined
- name: tag instance with edxapp theme version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edxapp_theme" : "{{ edxapp_theme_source_repo }} {{ edxapp_theme_checkout.after|truncate(7,True,'') }}"
when: edxapp_theme_checkout.after is defined
......@@ -20,9 +20,11 @@
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.course}}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.course}}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
......@@ -56,13 +58,8 @@
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} --exclude ".git" {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
......
{% if devstack %}
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% else %}
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% endif %}
{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in cms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do cms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_auth_config | to_nice_json }}
......@@ -6,11 +6,16 @@
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
{% if EDXAPP_WORKERS -%}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ EDXAPP_WORKERS.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %}
{% else -%}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %}
{% endif -%}
{% endif -%}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
......
{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do cms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{% for key, value in cms_env_config.iteritems() %}
{% if value == 'None' %}
{% do cms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_env_config | to_nice_json }}
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in lms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do lms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_auth_config | to_nice_json }}
......@@ -6,11 +6,16 @@
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
{% if EDXAPP_WORKERS -%}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ EDXAPP_WORKERS.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
{% else -%}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
......
{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{% for key, value in lms_env_config.iteritems() %}
{% if value == 'None' %}
{% do lms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_env_config | to_nice_json }}
......@@ -13,17 +13,12 @@
- name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: setup the migration db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER_MIGRATE }}
password={{ EDXAPP_MYSQL_PASSWORD_MIGRATE}}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
- name: setup the edxapp db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
password={{ EDXAPP_MYSQL_PASSWORD }}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
when: EDXAPP_MYSQL_USER is defined
- name: create a database for edxapp
mysql_db: >
......@@ -31,26 +26,27 @@
state=present
encoding=utf8
when: EDXAPP_MYSQL_USER is defined
- name: setup the xqueue db user
mysql_user: >
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
when: XQUEUE_MYSQL_USER is defined
- name: create a database for xqueue
mysql_db: >
db=xqueue
state=present
encoding=utf8
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
when: XQUEUE_MYSQL_USER is defined
- name: setup the ora db user
mysql_user: >
name={{ ORA_MYSQL_USER }}
password={{ ORA_MYSQL_PASSWORD }}
priv='{{ORA_MYSQL_DB_NAME}}.*:ALL'
when: ORA_MYSQL_USER is defined
- name: create a database for ora
mysql_db: >
......@@ -59,20 +55,66 @@
encoding=utf8
when: ORA_MYSQL_USER is defined
- name: setup the discern db user
mysql_user: >
name={{ DISCERN_MYSQL_USER }}
password={{ DISCERN_MYSQL_PASSWORD }}
priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL'
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: create a database for discern
- name: create databases for analytics api
mysql_db: >
db=discern
db={{ item }}
state=present
encoding=utf8
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
when: ANALYTICS_API_CONFIG is defined
with_items:
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}"
- name: create api user for the analytics api
mysql_user: >
name=api001
password=password
priv='{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}.*:ALL/reports.*:SELECT'
when: ANALYTICS_API_CONFIG is defined
- name: create read-only reports user for the analytics-api
mysql_user: >
name=reports001
password=password
priv='{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}.*:SELECT'
when: ANALYTICS_API_CONFIG is defined
- name: setup the migration db user
mysql_user: >
name={{ COMMON_MYSQL_MIGRATE_USER }}
password={{ COMMON_MYSQL_MIGRATE_PASS }}
priv='{{ item }}.*:ALL'
append_privs=yes
when: item != 'None'
with_items:
- "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
- "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
- "{{ ORA_MYSQL_DB_NAME|default('None') }}"
- name: setup the migration db user for analytics
mysql_user: >
name={{ COMMON_MYSQL_MIGRATE_USER }}
password={{ COMMON_MYSQL_MIGRATE_PASS }}
priv='{{ item }}.*:ALL'
append_privs=yes
when: ANALYTICS_API_CONFIG is defined
with_items:
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}"
- name: setup the read-only db user
mysql_user: >
name={{ COMMON_MYSQL_READ_ONLY_USER }}
password={{ COMMON_MYSQL_READ_ONLY_PASS }}
priv='*.*:ALL'
- name: setup the admin db user
mysql_user: >
name={{ COMMON_MYSQL_ADMIN_USER }}
password={{ COMMON_MYSQL_ADMIN_PASS }}
priv='*.*:CREATE USER'
- name: install memcached
......
......@@ -16,6 +16,11 @@ path.logs: {{elasticsearch_log_dir}}
#
bootstrap.mlockall: true
# Disable dynamic scripting as it is insecure and we don't use it
# See: http://bouk.co/blog/elasticsearch-rce/
# CVE: CVE-2014-3120
script.disable_dynamic: true
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
......@@ -37,4 +42,4 @@ bootstrap.mlockall: true
discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}']
{% endif -%}
\ No newline at end of file
{% endif -%}
......@@ -30,7 +30,7 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_NEW_RELIC_APP_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-forum"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
......
......@@ -34,6 +34,7 @@
dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
accept_hostkey=yes
sudo_user: "{{ forum_user }}"
register: forum_checkout
notify: restart the forum service
# TODO: This is done as the common_web_user
......@@ -66,4 +67,7 @@
- include: test.yml tags=deploy
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: forum_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}"
when: forum_checkout.after is defined
......@@ -17,29 +17,29 @@
JENKINS_ADMIN_NAME: 'default_jenkins_name'
# A dictionary of AWS credentials to use to make
# a boto file for jenkins.
JENKINS_ADMIN_AWS_CREDENTIALS: !!null
# jenkins_admin also requires other variables that are not defined by default.
# JENKINS_ADMIN_S3_PROFILE: !!null
# JENKINS_ADMIN_CONFIGURATION_REPO: !!null
# JENKINS_ADMIN_CONFIGURATION_SECURE_REPO: !!null
#
# # git key to use to checkout secure repos on jenkins and in abbey
# JENKINS_ADMIN_GIT_KEY: !!null
#
# # EC2 Key to use when bringing up the abbey instance in ec2 (aws key-pair)
# JENKINS_ADMIN_EC2_KEY: !!null
jenkins_admin_role_name: jenkins_admin
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
#
# OS packages
#
jenkins_admin_debian_repos:
- "deb http://cosmos.cites.illinois.edu/pub/ubuntu/ precise-backports main universe"
jenkins_admin_debian_pkgs:
# These are copied from the edxapp
# role so that we can create virtualenvs
# on the jenkins server for edxapp
- npm
# for compiling the virtualenv
# (only needed if wheel files aren't available)
- build-essential
......@@ -62,7 +62,7 @@ jenkins_admin_debian_pkgs:
# misc
- curl
- ipython
- npm
- nodejs
- ntp
# for shapely
- libgeos-dev
......@@ -77,6 +77,8 @@ jenkins_admin_debian_pkgs:
- ruby1.9.1
# for check-migrations
- mysql-client
# for aws cli scripting
- jq
jenkins_admin_gem_pkgs:
# for generating status.edx.org
......@@ -141,4 +143,3 @@ jenkins_admin_plugins:
jenkins_admin_jobs:
- 'backup-jenkins'
- 'build-ami'
......@@ -21,29 +21,21 @@
#
#
- fail: "JENKINS_ADMIN_S3_PROFILE is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE is not defined."
when: JENKINS_ADMIN_S3_PROFILE is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.name is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE.name is not defined."
when: JENKINS_ADMIN_S3_PROFILE.name is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.access_key is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE.access_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.access_key is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_REPO is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined
- fail: "JENKINS_ADMIN_GIT_KEY is not defined."
when: JENKINS_ADMIN_GIT_KEY is not defined
- fail: "JENKINS_ADMIN_EC2_KEY is not defined."
when: JENKINS_ADMIN_EC2_KEY is not defined
- name: add admin specific apt repositories
apt_repository: repo="{{ item }}" state=present update_cache=yes
with_items: jenkins_admin_debian_repos
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
......@@ -72,6 +64,14 @@
group={{ jenkins_group }}
mode=0644
- name: configure the boto profiles for jenkins
template: >
src="./{{ jenkins_home }}/boto.j2"
dest="{{ jenkins_home }}/.boto"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
- name: create the ssh directory
file: >
path={{ jenkins_home }}/.ssh
......@@ -86,14 +86,6 @@
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
- name: drop the secure credentials
copy: >
content="{{ JENKINS_ADMIN_GIT_KEY }}"
dest={{ jenkins_home }}/.ssh/id_rsa
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0600
- name: create job directory
file: >
path="{{ jenkins_home }}/jobs"
......@@ -120,9 +112,12 @@
mode=0644
with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: install system packages for edxapp virtualenvs
apt: pkg={{ item }} state=present
with_items: jenkins_admin_debian_pkgs
apt: pkg={{','.join(jenkins_admin_debian_pkgs)}} state=present update_cache=yes
# This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins
......
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
......@@ -27,7 +27,6 @@ mkdir -p $BUILD_ID/jobs
# Copy global configuration files into the workspace
cp $JENKINS_HOME/*.xml $BUILD_ID/
# Copy keys and secrets into the workspace
cp $JENKINS_HOME/identity.key $BUILD_ID/
cp $JENKINS_HOME/secret.key $BUILD_ID/
cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/
cp -r $JENKINS_HOME/secrets $BUILD_ID/
......
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties>
<hudson.model.ParametersDefinitionProperty>
<parameterDefinitions>
<hudson.model.StringParameterDefinition>
<name>play</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>deployment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>environment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>refs</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>vars</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration</name>
<description>The GITREF of configuration to use. Leave blank to default to master.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration_secure</name>
<description>The GITREF of configuration-secure repository to use. Leave blank to default to master.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>base_ami</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.BooleanParameterDefinition>
<name>use_blessed</name>
<description></description>
<defaultValue>true</defaultValue>
</hudson.model.BooleanParameterDefinition>
</parameterDefinitions>
</hudson.model.ParametersDefinitionProperty>
<com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.20">
<autoRebuild>false</autoRebuild>
</com.sonyericsson.rebuild.RebuildSettings>
</properties>
<scm class="org.jenkinsci.plugins.multiplescms.MultiSCM" plugin="multiple-scms@0.2">
<scms>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{ JENKINS_ADMIN_CONFIGURATION_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration</scmName>
</hudson.plugins.git.GitSCM>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration-secure</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration-secure</scmName>
</hudson.plugins.git.GitSCM>
</scms>
</scm>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<authToken>MULTIPASS</authToken>
<triggers/>
<concurrentBuild>true</concurrentBuild>
<builders>
<jenkins.plugins.shiningpanda.builders.VirtualenvBuilder plugin="shiningpanda@0.20">
<pythonName>System-CPython-2.7</pythonName>
<home></home>
<clear>false</clear>
<useDistribute>true</useDistribute>
<systemSitePackages>false</systemSitePackages>
<nature>shell</nature>
<command>
#!/bin/bash -x
export jenkins_admin_ec2_key="{{ JENKINS_ADMIN_EC2_KEY }}"
export jenkins_admin_configuration_secure_repo="{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}"
configuration/util/jenkins/build-ami.sh
</command>
<ignoreExitCode>false</ignoreExitCode>
</jenkins.plugins.shiningpanda.builders.VirtualenvBuilder>
<hudson.tasks.Shell>
<command>#!/bin/bash -x
if [[(&quot;$play&quot; == &quot;&quot;)]]; then
echo &quot;No Play Specified. Nothing to Do.&quot;
exit 0
fi
rm /var/tmp/$BUILD_ID-extra-vars.yml
rm /var/tmp/$BUILD_ID-refs.yml</command>
</hudson.tasks.Shell>
</builders>
<publishers/>
</project>
......@@ -4,7 +4,7 @@ jenkins_group: "edx"
jenkins_server_name: "jenkins.testeng.edx.org"
jenkins_port: 8080
jenkins_version: 1.538
jenkins_version: 1.571
jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_{{ jenkins_version }}_all.deb"
jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb"
......@@ -17,7 +17,7 @@ jenkins_plugins:
- { name: "copy-to-slave", version: "1.4.3" }
- { name: "credentials", version: "1.8.3" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "ec2", version: "1.19" }
- { name: "ec2", version: "1.23" }
- { name: "github", version: "1.8" }
- { name: "github-api", version: "1.44" }
- { name: "github-oauth", version: "0.14" }
......@@ -28,10 +28,12 @@ jenkins_plugins:
- { name: "mailer", version: "1.5" }
- { name: "nested-view", version: "1.10" }
- { name: "next-build-number", version: "1.0" }
- { name: "node-iterator-api", version: "1.5" }
- { name: "notification", version: "1.5" }
- { name: "pam-auth", version: "1.0" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "postbuild-task", version: "1.8" }
- { name: "PrioritySorter", version: "2.8" }
- { name: "sauce-ondemand", version: "1.61" }
- { name: "s3", version: "0.5" }
- { name: "ssh-agent", version: "1.3" }
......@@ -45,6 +47,7 @@ jenkins_plugins:
- { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"}
- { name: "xunit", version: "1.89"}
jenkins_bundled_plugins:
- "credentials"
......
---
dependencies:
- common
- role: datadog
COMMON_ENABLE_DATADOG: True
......@@ -128,3 +128,4 @@
notify: start nginx
- include: datadog.yml tags=datadog
when: COMMON_ENABLE_DATADOG
......@@ -3,6 +3,9 @@ jenkins_user: "jenkins"
jenkins_group: "jenkins"
jenkins_home: /home/jenkins
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
# System packages
jenkins_debian_pkgs:
- build-essential
......@@ -15,7 +18,7 @@ jenkins_debian_pkgs:
- libxml2-dev
- libgeos-dev
- libxslt1-dev
- npm
- nodejs
- pkg-config
- gettext
......@@ -69,7 +72,7 @@ jenkins_wheels:
- { pkg: "mongoengine==0.7.10", wheel: "mongoengine-0.7.10-py27-none-any.whl" }
- { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" }
- { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" }
- { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" }
- { pkg: "oauthlib==0.6.3", wheel: "oauthlib-0.6.3-py27-none-any.whl" }
- { pkg: "paramiko==1.9.0", wheel: "paramiko-1.9.0-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "Pillow==1.7.8", wheel: "Pillow-1.7.8-cp27-none-linux_x86_64.whl" }
......@@ -86,7 +89,7 @@ jenkins_wheels:
- { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" }
- { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" }
- { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" }
- { pkg: "requests==1.2.3", wheel: "requests-1.2.3-py27-none-any.whl" }
- { pkg: "requests==2.3.0", wheel: "requests-2.3.0-py27-none-any.whl" }
- { pkg: "scipy==0.11.0", wheel: "scipy-0.11.0-cp27-none-linux_x86_64.whl" }
- { pkg: "Shapely==1.2.16", wheel: "Shapely-1.2.16-cp27-none-linux_x86_64.whl" }
- { pkg: "singledispatch==3.4.0.2", wheel: "singledispatch-3.4.0.2-py27-none-any.whl" }
......
......@@ -26,6 +26,10 @@
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes
......
......@@ -41,6 +41,8 @@
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ ami }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
......
......@@ -53,3 +53,10 @@
regexp=". {{ localdev_home }}/share_x11"
line=". {{ localdev_home }}/share_x11"
state=present
# Create scripts to add paver autocomplete
- name: add paver autocomplete
template:
src=paver_autocomplete dest={{ item.home }}/.paver_autocomplete
owner={{ item.user }} mode=755
with_items: localdev_accounts
......@@ -15,3 +15,5 @@ else
fi
cd "{{ item.home }}/{{ item.repo }}"
source "{{ item.home }}/.paver_autocomplete"
# Courtesy of Gregory Nicholas
_paver()
{
local cur
COMPREPLY=()
# Variable to hold the current word
cur="${COMP_WORDS[COMP_CWORD]}"
# Build a list of the available tasks from: `paver --help --quiet`
local cmds=$(paver -hq | awk '/^ ([a-zA-Z][a-zA-Z0-9_]+)/ {print $1}')
# Generate possible matches and store them in the
# array variable COMPREPLY
COMPREPLY=($(compgen -W "${cmds}" $cur))
}
# Assign the auto-completion function for our command.
complete -F _paver paver
\ No newline at end of file
......@@ -41,7 +41,7 @@
- name: copy mongodb key file
copy: >
src={{ secure_dir }}/files/mongo_key
content="{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }}
mode=0600
owner=mongodb
......
mms_agent_version: "2.2.0.70-1"
mms_agent_url: "https://mms.mongodb.com/download/agent/monitoring/mongodb-mms-monitoring-agent_{{ mms_agent_version }}_amd64.deb"
---
- name: restart mms
service: name=mongodb-mms-monitoring-agent state=restarted
---
# mongo_mms
#
# Example play:
#
# roles:
# - mongo_mms
- fail: MMSAPIKEY is required
when: MMSAPIKEY is not defined
# this cruft can be removed in ansible 1.6, which can have apt install local deb files
- name: install required packages
apt: name={{ item }} state=present
with_items:
- gdebi
- name: download mongo mms agent
get_url: >
url="{{ mms_agent_url }}"
dest="/tmp/mongodb-mms-monitoring-agent-{{ mms_agent_version }}.deb"
register: download_mms_deb
- name: install mongo mms agent
shell: "gdebi -nq /tmp/mongodb-mms-monitoring-agent-{{ mms_agent_version }}.deb"
when: download_mms_deb.changed
notify: restart mms
- name: add key to monitoring-agent.config
lineinfile: >
dest=/etc/mongodb-mms/monitoring-agent.config
regexp="^mmsApiKey="
line="mmsApiKey={{ MMSAPIKEY }}"
notify: restart mms
- name: start mms service
service: name=mongodb-mms-monitoring-agent state=started
......@@ -16,9 +16,9 @@
#
newrelic_role_name: newrelic
NEWRELIC_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_KEY_ID: '548C16BF'
NEWRELIC_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_KEY_ID }}.gpg'
NEWRELIC_DEBIAN_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_DEBIAN_KEY_ID: '548C16BF'
NEWRELIC_DEBIAN_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_DEBIAN_KEY_ID }}.gpg'
NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
#
......@@ -28,4 +28,5 @@ NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
newrelic_debian_pkgs:
- newrelic-sysmond
newrelic_redhat_pkgs: []
newrelic_redhat_pkgs:
- newrelic-sysmond
......@@ -27,18 +27,33 @@
- name: add apt key
apt_key: >
id="{{ NEWRELIC_KEY_ID }}" url="{{ NEWRELIC_KEY_URL }}"
id="{{ NEWRELIC_DEBIAN_KEY_ID }}" url="{{ NEWRELIC_DEBIAN_KEY_URL }}"
state=present
when: ansible_distribution == 'Ubuntu'
- name: Configure the New Relic Servers yum repository
shell: >
rpm -Uvh https://yum.newrelic.com/pub/newrelic/el5/x86_64/newrelic-repo-5-3.noarch.rpm
creates=/etc/yum.repos.d/newrelic.repo
when: ansible_distribution == 'Amazon'
- name: install apt repository
apt_repository: repo="{{ NEWRELIC_REPO }}" update_cache=yes
apt_repository: repo="{{ NEWRELIC_DEBIAN_REPO }}" update_cache=yes
when: ansible_distribution == 'Ubuntu'
- name: install newrelic agent
- name: install newrelic agent (apt)
apt: pkg="newrelic-sysmond"
when: ansible_distribution == 'Ubuntu'
- name: Install newrelic related system packages.
- name: Install newrelic related system packages for Ubuntu
apt: pkg={{ item }} install_recommends=yes state=present
with_items: newrelic_debian_pkgs
when: ansible_distribution == 'Ubuntu'
- name: Install newrelic related system packages for Amazon
yum: pkg={{ item }} state=present
with_items: newrelic_redhat_pkgs
when: ansible_distribution == 'Amazon'
- name: configure the agent with the license key
shell: >
......
......@@ -51,6 +51,8 @@ nginx_lms_preview_gunicorn_hosts:
- 127.0.0.1
nginx_cms_gunicorn_hosts:
- 127.0.0.1
nginx_analytics_api_gunicorn_hosts:
- 127.0.0.1
nginx_cfg:
# - link - turn on
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment