Commit e7fae47e by Han Su Kim

Merge pull request #1239 from edx/rc/Johnnycake

Release ... rc/Johnnycake
parents 6298c7b2 b0447bcb
......@@ -9,3 +9,4 @@
vagrant/*/devstack/edx-platform
vagrant/*/devstack/cs_comments_service
vagrant/*/devstack/ora
vagrant_ansible_inventory_default
......@@ -29,3 +29,6 @@ Ker Ruben Ramos <xdiscent@gmail.com>
Fred Smith <derf@edx.org>
Wang Peifeng <pku9104038@hotmail.com>
Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com>
- Role: xqwatcher, xqueue, nginx, edxapp, common
- Moving nginx basic authorization flag and credentials to the common role
- Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write
files to the tmp directory created for it by codejail.
- Role: Edxapp
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
......@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template
will build quite a number of AWS resources that cost money, so please
consider this before you start.
The configuration phase is manged by [Ansible](http://ansible.cc/).
The configuration phase is managed by [Ansible](http://ansible.com/).
We have provided a number of playbooks that will configure each of
the edX service.
the edX services.
This project is a re-write of the current edX provisioning and
configuration tools, we will be migrating features to this project
......@@ -36,3 +36,5 @@ over time, so expect frequent changes.
For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki).
For info on any large recent changes please see the [change log](https://github.com/edx/configuration/blob/master/CHANGELOG.md).
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-analytics_api-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the Analytics API
hosts: localhost
connection: local
gather_facts: False
tasks:
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create instance security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# Needs ansible 1.7 for vpc support of elbs
# - name: Create elb security group
# ec2_group:
# profile: "{{ profile }}"
# description: "ELB security group"
# name: "ELB-{{ security_group }}"
# vpc_id: "{{ vpc_id }}"
# region: "{{ ec2_region }}"
# rules:
# - proto: tcp
# from_port: "443"
# to_port: "443"
# cidr_ip: "0.0.0.0/0"
# register: created_elb_sec_group
# ignore_errors: True
# Needs 1.7 for VPC support
# - name: "Create ELB"
# ec2_elb_lb:
# profile: "{{ profile }}"
# region: "{{ ec2_region }}"
# zones:
# - us-east-1b
# - us-east-1c
# name: "{{ edp }}"
# state: present
# security_group_ids: "{{ created_elb_sec_group.group_id }}"
# listeners:
# - protocol: https
# load_balancer_port: 443
# instance_protocol: http # optional, defaults to value of protocol setting
# instance_port: 80
# # ssl certificate required for https or ssl
# ssl_certificate_id: "{{ ssl_cert }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
load_balancers: "{{ elb_name }}"
availability_zones:
- us-east-1b
- us-east-1c
min_size: 0
max_size: 2
desired_capacity: 1
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-xqwatcher-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the XQWatcher
hosts: localhost
connection: local
gather_facts: False
tasks:
# ignore_error is used here because this module is not idempotent
# If tags already exist, the task will fail with the following message
# Tags already exists in subnet
- name: Update subnet tags
ec2_tag:
resource: "{{ item }}"
region: "{{ ec2_region }}"
state: present
tags:
Name: "{{ edp }}-subnet"
play: xqwatcher
immutable_metadata: "{'purpose':'{{ environment }}-{{ deployment }}-internal-{{ play }}','target':'ec2'}"
with_items: subnets
ignore_errors: True
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
min_size: 0
max_size: 0
desired_capacity: 0
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
tags:
- foo
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
......@@ -24,13 +24,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -56,13 +67,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -79,13 +101,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -117,25 +150,39 @@
},
"Mappings":{
"AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" },
"m1.small": { "Arch":"64" },
"m1.medium": { "Arch":"64" },
"m1.large": { "Arch":"64" },
"m1.xlarge": { "Arch":"64" },
"m2.xlarge": { "Arch":"64" },
"m2.2xlarge": { "Arch":"64" },
"m2.4xlarge": { "Arch":"64" },
"m3.xlarge": { "Arch":"64" },
"m3.2xlarge": { "Arch":"64" },
"c1.medium": { "Arch":"64" },
"c1.xlarge": { "Arch":"64" },
"cg1.4xlarge": { "Arch":"64HVM" }
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" },
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
......@@ -183,7 +230,7 @@
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
"AZone1"
]
},
"Tags":[
......@@ -205,7 +252,7 @@
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
"AZone1"
]
},
"Tags":[
......@@ -364,7 +411,7 @@
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"InboundSMTPPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
......@@ -376,6 +423,23 @@
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"587",
"To":"587"
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"104",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"1024",
"To":"65535"
}
......@@ -582,6 +646,18 @@
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"5222",
"ToPort":"5222",
"CidrIp":"0.0.0.0/0"
}
],
"SecurityGroupEgress":[
......@@ -604,6 +680,18 @@
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"5222",
"ToPort":"5222",
"CidrIp":"0.0.0.0/0"
}
]
}
......@@ -688,10 +776,11 @@
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":"10.0.0.0/16"
"CidrIp":"10.254.0.0/16"
},
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
......@@ -827,7 +916,23 @@
]
]
}
},
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs":{
"VolumeSize": 100
}
},
{
"DeviceName": "/dev/sdb",
"VirtualName": "ephemeral0"
},
{
"DeviceName": "/dev/sdc",
"VirtualName": "ephemeral1"
}
]
}
},
"AdminSecurityGroup":{
......
......@@ -29,13 +29,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -52,13 +63,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -75,13 +97,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -98,13 +131,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -121,13 +165,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -144,13 +199,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -167,13 +233,58 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"XQWatcherInstanceType":{
"Description":"Xserver server EC2 instance type",
"Type":"String",
"Default":"m1.small",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -229,13 +340,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -252,13 +374,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -272,6 +405,11 @@
"Type":"Number",
"Default":"2"
},
"XQWatcherDesiredCapacity":{
"Description":"The Auto-scaling group desired capacity for the xqueue watcher hosts",
"Type":"Number",
"Default":"2"
},
"CommonClusterDesiredCapacity":{
"Description":"The Auto-scaling group desired capacity for the CommonCluster hosts",
"Type":"Number",
......@@ -309,7 +447,22 @@
"cache.m2.xlarge",
"cache.m2.2xlarge",
"cache.m2.4xlarge",
"cache.c1.xlarge"
"cache.c1.medium",
"cache.c1.xlarge",
"cache.m3.medium",
"cache.m3.large",
"cache.m3.xlarge",
"cache.m3.2xlarge",
"cache.c3.large",
"cache.c3.xlarge",
"cache.c3.2xlarge",
"cache.c3.4xlarge",
"cache.c3.8xlarge",
"cache.r3.large",
"cache.r3.xlarge",
"cache.r3.2xlarge",
"cache.r3.4xlarge",
"cache.r3.8xlarge"
],
"ConstraintDescription":"must select a valid Cache Node type."
},
......@@ -354,13 +507,29 @@
"Description":"Database instance class",
"Type":"String",
"AllowedValues":[
"db.m1.micro",
"db.t1.micro",
"db.m1.small",
"db.m1.large",
"db.m1.xlarge",
"db.m2.xlarge",
"db.m2.2xlarge",
"db.m2.4xlarge"
"db.m2.4xlarge",
"db.c1.medium",
"db.c1.xlarge",
"db.m3.medium",
"db.m3.large",
"db.m3.xlarge",
"db.m3.2xlarge",
"db.c3.large",
"db.c3.xlarge",
"db.c3.2xlarge",
"db.c3.4xlarge",
"db.c3.8xlarge",
"db.r3.large",
"db.r3.xlarge",
"db.r3.2xlarge",
"db.r3.4xlarge",
"db.r3.8xlarge"
],
"ConstraintDescription":"must select a valid database instance type."
},
......@@ -401,25 +570,39 @@
},
"Mappings":{
"AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" },
"m1.small": { "Arch":"64" },
"m1.medium": { "Arch":"64" },
"m1.large": { "Arch":"64" },
"m1.xlarge": { "Arch":"64" },
"m2.xlarge": { "Arch":"64" },
"m2.2xlarge": { "Arch":"64" },
"m2.4xlarge": { "Arch":"64" },
"m3.xlarge": { "Arch":"64" },
"m3.2xlarge": { "Arch":"64" },
"c1.medium": { "Arch":"64" },
"c1.xlarge": { "Arch":"64" },
"cg1.4xlarge": { "Arch":"64HVM" }
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" },
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
......@@ -443,9 +626,11 @@
"Edxapp02": { "CIDR":".11.0/24" },
"XServerJail01": { "CIDR":".20.0/24" },
"XServerJail02": { "CIDR":".21.0/24" },
"CommonCluster01": { "CIDR":".46.0/24"},
"CommonCluster02": { "CIDR":".47.0/24"},
"CommonCluster03": { "CIDR":".48.0/24"},
"XQWatcherJail01": { "CIDR":".30.0/24" },
"XQWatcherJail02": { "CIDR":".31.0/24" },
"CommonCluster01": { "CIDR":".46.0/24" },
"CommonCluster02": { "CIDR":".47.0/24" },
"CommonCluster03": { "CIDR":".48.0/24" },
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" },
"Cache01": { "CIDR":".60.0/24" },
......@@ -457,8 +642,8 @@
"Mongo01": { "CIDR":".90.0/24" },
"Mongo02": { "CIDR":".91.0/24" },
"Mongo03": { "CIDR":".92.0/24" },
"Notifier01": { "CIDR":".100.0/24" },
"Admin": { "CIDR":".200.0/24" }
"Notifier01": { "CIDR":".100.0/24"},
"Admin": { "CIDR":".200.0/24"}
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
......@@ -932,6 +1117,102 @@
]
}
},
"XQWatcherSubnet01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"XQWatcherJail01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"play",
"Value":"xqwatcher"
},
{
"Key":"Network",
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xqwatcher','target':'ec2'}"
]
]
}
}
]
}
},
"XQWatcherSubnet02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"XQWatcherJail02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"play",
"Value":"xqwatcher"
},
{
"Key":"Network",
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xqwatcher','target':'ec2'}"
]
]
}
}
]
}
},
"Data01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
......@@ -1855,6 +2136,28 @@
}
}
},
"PrivateSubnetRouteTableAssociationXQWatcher01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationXQWatcher02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationData01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
......@@ -2129,6 +2432,28 @@
}
}
},
"PrivateSubnetNetworkAclAssociationXQWatcher01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationXQWatcher02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationData01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
......@@ -2817,6 +3142,44 @@
} ]
}
},
"XQWatcherRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [ {
"Effect": "Allow",
"Principal": {
"Service": [ "ec2.amazonaws.com" ]
},
"Action": [ "sts:AssumeRole" ]
} ]
},
"Path": "/",
"Policies": [ {
"PolicyName": "XQWatcherBasePolicy",
"PolicyDocument": {
"Statement":[
{
"Effect":"Allow",
"Action":[
"ec2:DescribeTags"
],
"Resource":"*"
}
]
}
} ]
}
},
"XQWatcherInstanceProfile": {
"Type": "AWS::IAM::InstanceProfile",
"Properties": {
"Path": "/",
"Roles": [ {
"Ref": "XQWatcherRole"
} ]
}
},
"ForumRole": {
"Type": "AWS::IAM::Role",
"Properties": {
......@@ -4216,6 +4579,229 @@
]
}
},
"XQWatcherServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
"IamInstanceProfile":{ "Ref":"XQWatcherInstanceProfile" },
"SecurityGroups":[
{
"Ref":"XQWatcherServerSecurityGroup"
}
],
"ImageId":{
"Fn::FindInMap":[
"AWSRegionArch2AMI",
{
"Ref":"AWS::Region"
},
{
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"XQWatcherInstanceType"
},
"Arch"
]
}
]
},
"KeyName":{
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"XQWatcherInstanceType"
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"100"
}
}
]
}
},
"XQWatcherServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
{
"Fn::GetAtt":[
"XQWatcherSubnet01",
"AvailabilityZone"
]
},
{
"Fn::GetAtt":[
"XQWatcherSubnet02",
"AvailabilityZone"
]
}
],
"VPCZoneIdentifier":[
{
"Ref":"XQWatcherSubnet01"
},
{
"Ref":"XQWatcherSubnet02"
}
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"xqwatcher"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"xqwatcher",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"xqwatcher",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
},
"PropagateAtLaunch":true
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
],
"LaunchConfigurationName":{
"Ref":"XQWatcherServer"
},
"MinSize":{
"Ref":"XQWatcherDesiredCapacity"
},
"MaxSize":{
"Ref":"XQWatcherDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"XQWatcherDesiredCapacity"
}
}
},
"XQWatcherServerScaleUpPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XQWatcherServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
}
},
"XQWatcherServerScaleDownPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XQWatcherServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
}
},
"XQWatcherCPUAlarmHigh":{
"Type":"AWS::CloudWatch::Alarm",
"Properties":{
"AlarmDescription":"Scale-up if CPU > 90% for 10 minutes",
"MetricName":"CPUUtilization",
"Namespace":"AWS/EC2",
"Statistic":"Average",
"Period":"300",
"EvaluationPeriods":"2",
"Threshold":"90",
"AlarmActions":[
{
"Ref":"XQWatcherServerScaleUpPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XQWatcherServerAsGroup"
}
}
],
"ComparisonOperator":"GreaterThanThreshold"
}
},
"XQWatcherCPUAlarmLow":{
"Type":"AWS::CloudWatch::Alarm",
"Properties":{
"AlarmDescription":"Scale-down if CPU < 70% for 10 minutes",
"MetricName":"CPUUtilization",
"Namespace":"AWS/EC2",
"Statistic":"Average",
"Period":"300",
"EvaluationPeriods":"2",
"Threshold":"70",
"AlarmActions":[
{
"Ref":"XQWatcherServerScaleDownPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XQWatcherServerAsGroup"
}
}
],
"ComparisonOperator":"LessThanThreshold"
}
},
"XQWatcherServerSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up SSH access.",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}
}
],
"Tags":[
{
"Key":"play",
"Value":"xqwatcher"
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
}
}
]
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
......
......@@ -47,10 +47,25 @@
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" }
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI" : {
......
......@@ -46,10 +46,25 @@
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" }
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI" : {
......
......@@ -17,7 +17,7 @@
DOCUMENTATION = '''
---
module: ec2
short_description: create or terminate an instance in ec2, return instanceid
short_description: create, terminate, start or stop an instance in ec2, return instanceid
description:
- Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5
version_added: "0.9"
......@@ -25,7 +25,7 @@ options:
key_name:
description:
- key pair to use on the instance
required: true
required: false
default: null
aliases: ['keypair']
id:
......@@ -67,6 +67,13 @@ options:
required: true
default: null
aliases: []
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
required: false
default: null
aliases: []
image:
description:
- I(emi) (or I(ami)) to use for the instance
......@@ -97,24 +104,12 @@ options:
- how long before wait gives up, in seconds
default: 300
aliases: []
ec2_url:
spot_wait_timeout:
version_added: "1.5"
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
required: false
default: null
- how long to wait for the spot instance request to be fulfilled
default: 600
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
count:
description:
- number of instances to launch
......@@ -156,6 +151,13 @@ options:
required: false
default: null
aliases: []
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: null
aliases: []
private_ip:
version_added: "1.2"
description:
......@@ -173,10 +175,16 @@ options:
instance_ids:
version_added: "1.3"
description:
- list of instance ids, currently only used when state='absent'
- "list of instance ids, currently used for states: absent, running, stopped"
required: false
default: null
aliases: []
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: true
state:
version_added: "1.3"
description:
......@@ -184,16 +192,36 @@ options:
required: false
default: 'present'
aliases: []
root_ebs_size:
volumes:
version_added: "1.5"
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
default: null
aliases: []
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: false
exact_count:
version_added: "1.5"
desription:
- size of the root volume in gigabytes
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
required: false
default: null
aliases: []
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver".
required: false
default: null
aliases: []
requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
......@@ -203,7 +231,7 @@ EXAMPLES = '''
# Basic provisioning example
- local_action:
module: ec2
keypair: mykey
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
......@@ -213,39 +241,88 @@ EXAMPLES = '''
# Advanced example with tagging and CloudWatch
- local_action:
module: ec2
keypair: mykey
key_name: mykey
group: databases
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags: '{"db":"postgres"}'
monitoring=yes
instance_tags:
db: postgres
monitoring: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
local_action:
module: ec2
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
device_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
# Multiple groups example
local_action:
module: ec2
keypair: mykey
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags: '{"db":"postgres"}'
monitoring=yes
instance_tags:
db: postgres
monitoring: yes
# Multiple instances with additional volume from snapshot
local_action:
module: ec2
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
# VPC example
- local_action:
module: ec2
keypair: mykey
key_name: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Spot instance example
- local_action:
module: ec2
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Launch instances, runs some tasks
# and then terminate them
......@@ -255,14 +332,14 @@ local_action:
hosts: localhost
gather_facts: False
vars:
keypair: my_keypair
key_name: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }}
local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }}
register: ec2
- name: Add new instance to host group
local_action: add_host hostname={{ item.public_ip }} groupname=launched
......@@ -287,29 +364,186 @@ local_action:
local_action:
module: ec2
state: 'absent'
instance_ids: {{ec2.instance_ids}}
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
local_action:
module: ec2
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
role:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sanbox instances
local_action:
module: ec2
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
#
# Enforce that 5 instances with a tag "foo" are running
#
- local_action:
module: ec2
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- local_action:
module: ec2
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
#
# count_tag complex argument examples
#
# instances with tag foo
count_tag:
foo:
# instances with tag foo=bar
count_tag:
foo: bar
# instances with tags foo=bar & baz
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
count_tag:
- foo
- bar
- baz: bang
'''
import sys
import time
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
from ast import literal_eval
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def find_running_instances_by_count_tag(module, ec2, count_tag):
# get reservations for instances that match tag(s) and are running
reservations = get_reservations(module, ec2, tags=count_tag, state="running")
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result.iterkeys():
if type(result[k]) == dict:
result[k] = _set_non_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, tags=None, state=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
if tags is not None:
if type(tags) is str:
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
if type(tags) is str:
filters.update({"tag-key": tags})
# if list, append each item to filters
if type(tags) is list:
for x in tags:
if type(x) is dict:
x = _set_none_to_blank(x)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if type(tags) is dict:
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems()))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
......@@ -328,6 +562,7 @@ def get_instance_info(inst):
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
......@@ -335,7 +570,8 @@ def get_instance_info(inst):
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor}
'hypervisor': inst.hypervisor,
'ebs_optimized': inst.ebs_optimized}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
......@@ -343,6 +579,24 @@ def get_instance_info(inst):
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
......@@ -355,8 +609,94 @@ def boto_supports_profile_name_arg(ec2):
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in run_instances_method.func_code.co_varnames
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in method.func_code.co_varnames
def enforce_count(module, ec2):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag)
changed = None
checkmode = False
instance_dict_array = None
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([ x.id for x in instances ])
remove_ids = all_instance_ids[0:to_remove]
instances = [ x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if type(inst) is not dict:
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2):
def create_instances(module, ec2, override_count=None):
"""
Creates new instances
......@@ -374,29 +714,30 @@ def create_instances(module, ec2):
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
spot_price = module.params.get('spot_price')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
root_ebs_size = module.params.get('root_ebs_size')
if root_ebs_size:
dev_sda1 = boto.ec2.blockdevicemapping.EBSBlockDeviceType()
dev_sda1.size = root_ebs_size
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
else:
bdm = None
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
# group_id and group_name are exclusive of each other
if group_id and group_name:
......@@ -447,19 +788,15 @@ def create_instances(module, ec2):
try:
params = {'image_id': image,
'key_name': key_name,
'client_token': id,
'min_count': count_remaining,
'max_count': count_remaining,
'monitoring_enabled': monitoring,
'placement': zone,
'placement_group': placement_group,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'subnet_id': vpc_subnet_id,
'private_ip_address': private_ip,
'user_data': user_data,
'block_device_map': bdm}
'user_data': user_data}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
......@@ -468,19 +805,70 @@ def create_instances(module, ec2):
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
res = ec2.run_instances(**params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg = 'Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip and private_ip:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
))
else:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
private_ip_address = private_ip,
))
res = ec2.run_instances(**params)
instids = [ i.id for i in res.instances ]
while True:
try:
res.connection.get_all_instances(instids)
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
......@@ -488,23 +876,58 @@ def create_instances(module, ec2):
continue
else:
module.fail_json(msg = str(e))
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, placement_group):
params['placement_group'] = placement_group
elif placement_group :
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
params.update(dict(
count = count_remaining,
))
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
spot_req_inst_ids = dict()
spot_wait_timeout = time.time() + spot_wait_timeout
while spot_wait_timeout > time.time():
reqs = ec2.get_all_spot_instance_requests()
for sirb in res:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id == sirb.id and sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
break
if spot_wait_timeout <= time.time():
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
instids = spot_req_inst_ids.values()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
if instance_tags:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
except boto.exception.EC2ResponseError, e:
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
this_res = []
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
res_list = res.connection.get_all_instances(instids)
if len(res_list) > 0:
this_res = res_list[0]
num_running = len([ i for i in this_res.instances if i.state=='running' ])
else:
res_list = ec2.get_all_instances(instids)
num_running = 0
for res in res_list:
num_running += len([ i for i in res.instances if i.state=='running' ])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
......@@ -518,8 +941,14 @@ def create_instances(module, ec2):
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
for inst in this_res.instances:
running_instances.append(inst)
#We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by Amazon
if not source_dest_check:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
instance_dict_array = []
created_instance_ids = []
......@@ -561,12 +990,12 @@ def terminate_instances(module, ec2, instance_ids):
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running':
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
except EC2ResponseError, e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
......@@ -596,73 +1025,143 @@ def terminate_instances(module, ec2, instance_ids):
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# Check that our instances are not in the state we want to take them to
# and change them to our desired state
running_instances_array = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError, e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def main():
module = AnsibleModule(
argument_spec = dict(
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
key_name = dict(aliases = ['keypair']),
id = dict(),
group = dict(type='list'),
group_id = dict(type='list'),
region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
spot_price = dict(),
image = dict(),
kernel = dict(),
count = dict(default='1'),
count = dict(type='int', default='1'),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
ec2_url = dict(),
ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
spot_wait_timeout = dict(default=600),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(type='dict'),
vpc_subnet_id = dict(),
assign_public_ip = dict(type='bool', default=False),
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list'),
source_dest_check = dict(type='bool', default=True),
state = dict(default='present'),
root_ebs_size = dict(default=None),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(),
)
)
# def get_ec2_creds(module):
# return ec2_url, ec2_access_key, ec2_secret_key, region
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids']
],
)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# If we specified an ec2_url then try connecting to it
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
ec2 = ec2_connect(module)
tagged_instances = []
if module.params.get('state') == 'absent':
state = module.params.get('state')
if state == 'absent':
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='termination_list needs to be a list of instances to terminate')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif module.params.get('state') == 'present':
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('key_name'):
module.fail_json(msg='key_name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
# import module snippets
from ansible.module_utils.basic import *
......
import os
import prettytable
import hipchat
import time
import random
from ansible import utils
class CallbackModule(object):
"""Send status updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
HIPCHAT_TOKEN (required): HipChat API token
HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible
HIPCHAT_FROM (optional): Name to post as. Default: ansible
HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true
HIPCHAT_MSG_PREFIX (option): Optional prefix to add to all hipchat messages
HIPCHAT_MSG_COLOR (option): Optional color for hipchat messages
HIPCHAT_CONDENSED (option): Condense the task summary output
Requires:
prettytable
"""
def __init__(self):
if 'HIPCHAT_TOKEN' in os.environ:
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
self.enabled = True
else:
self.enabled = False
def _send_hipchat(self, message, room=None, from_name=None, color=None, message_format='text'):
if not room:
room = self.room
if not from_name:
from_name = self.from_name
if not color:
color = self.hipchat_msg_color
try:
self.hipchat_conn.message_room(room, from_name, message, color=color, message_format=message_format)
except Exception as e:
utils.warning("Could not submit message to hipchat: {}".format(e))
def _flush_last_task(self):
if self.last_task:
delta = time.time() - self.last_task_start
self.task_report.append(dict(
changed=self.last_task_changed,
count=self.last_task_count,
delta="{:0>.1f}".format(self.last_task_delta),
task=self.last_task))
self.last_task_count = 0
self.last_task_changed = False
self.last_task = None
self.last_task_delta = 0
def _process_message(self, msg, msg_type='STATUS'):
if msg_type == 'OK' and self.last_task:
if msg.get('changed', True):
self.last_task_changed = True
if msg.get('delta', False):
(hour, minute, sec) = msg['delta'].split(':')
total = float(hour) * 1200 + float(minute) * 60 + float(sec)
self.last_task_delta += total
self.last_task_count += 1
else:
self._flush_last_task()
if msg_type == 'TASK_START':
self.last_task = msg
self.last_task_start = time.time()
elif msg_type == 'FAILED':
self.last_task_start = time.time()
if 'msg' in msg:
self._send_hipchat('/code {}: The ansible run returned the following error:\n\n {}'.format(
self.hipchat_msg_prefix, msg['msg']), color='red', message_format='text')
else:
# move forward the last task start time
self.last_task_start = time.time()
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enabled:
self._process_message(res, 'FAILED')
def runner_on_ok(self, host, res):
if self.enabled:
# don't send the setup results
if res['invocation']['module_name'] != "setup":
self._process_message(res, 'OK')
def runner_on_error(self, host, msg):
if self.enabled:
self._process_message(msg, 'ERROR')
def runner_on_skipped(self, host, item=None):
if self.enabled:
self._process_message(item, 'SKIPPED')
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
if self.enabled:
self._process_message(res, 'ASYNC_POLL')
def runner_on_async_ok(self, host, res, jid):
if self.enabled:
self._process_message(res, 'ASYNC_OK')
def runner_on_async_failed(self, host, res, jid):
if self.enabled:
self._process_message(res, 'ASYNC_FAILED')
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
if self.enabled:
self._process_message(name, 'TASK_START')
def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, pattern):
if self.enabled:
"""Display Playbook and play start messages"""
self.start_time = time.time()
self.playbook_name, _ = os.path.splitext(os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list))
subset = self.play.playbook.inventory._subset
msg = "<b>{description}</b>: Starting ansible run for play <b><i>{play}</i></b>".format(description=self.hipchat_msg_prefix, play=self.playbook_name)
if self.play.playbook.only_tags and 'all' not in self.play.playbook.only_tags:
msg = msg + " with tags <b><i>{}</i></b>".format(','.join(self.play.playbook.only_tags))
if subset:
msg = msg + " on hosts <b><i>{}</i></b>".format(','.join(subset))
self._send_hipchat(msg, message_format='html')
def playbook_on_stats(self, stats):
if self.enabled:
self._flush_last_task()
delta = time.time() - self.start_time
self.start_time = time.time()
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
task_column = '{} - Task'.format(self.hipchat_msg_prefix)
task_summary = prettytable.PrettyTable([task_column, 'Time', 'Count', 'Changed'])
task_summary.align[task_column] = "l"
task_summary.align['Time'] = "r"
task_summary.align['Count'] = "r"
task_summary.align['Changed'] = "r"
for task in self.task_report:
if self.condensed_task_report:
# for the condensed task report skip all tasks
# that are not marked as changed and that have
# a time delta less than 1
if not task['changed'] and float(task['delta']) < 1:
continue
task_summary.add_row([task['task'], task['delta'], str(task['count']), str(task['changed'])])
summary_table = prettytable.PrettyTable(['Ok', 'Changed', 'Unreachable', 'Failures'])
self._send_hipchat("/code " + str(task_summary) )
summary_all_host_output = []
for host in hosts:
stats = stats.summarize(host)
summary_output = "<b>{}</b>: <i>{}</i> - ".format(self.hipchat_msg_prefix, host)
for summary_item in ['ok', 'changed', 'unreachable', 'failures']:
if stats[summary_item] != 0:
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_all_host_output.append(summary_output)
self._send_hipchat("<br />".join(summary_all_host_output), message_format='html')
msg = "<b>{description}</b>: Finished Ansible run for <b><i>{play}</i> in {min:02} minutes, {sec:02} seconds</b><br /><br />".format(
description=self.hipchat_msg_prefix,
play=self.playbook_name,
min=int(delta / 60),
sec=int(delta % 60))
self._send_hipchat(msg, message_format='html')
......@@ -128,5 +128,8 @@ class CallbackModule(object):
if len(payload[msg_type][output]) > 1000:
payload[msg_type][output] = "(clipping) ... " \
+ payload[msg_type][output][-1000:]
if 'stdout_lines' in payload[msg_type]:
# only keep the last 20 or so lines to avoid payload size errors
if len(payload[msg_type]['stdout_lines']) > 20:
payload[msg_type]['stdout_lines'] = ['(clipping) ... '] + payload[msg_type]['stdout_lines'][-20:]
self.sqs.send_message(self.queue, json.dumps(payload))
......@@ -11,7 +11,7 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all
regions = us-east-1
regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server.
......
......@@ -217,7 +217,14 @@ class Ec2Inventory(object):
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
if 'EC2_CACHE_PATH' in os.environ:
cache_path = os.environ['EC2_CACHE_PATH']
elif self.args.cache_path:
cache_path = self.args.cache_path
else:
cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
......@@ -241,6 +248,10 @@ class Ec2Inventory(object):
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
parser.add_argument(
'--cache-path',
help='Override the cache path set in ini file',
required=False)
self.args = parser.parse_args()
......
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
roles:
- alton
- name: Deploy Analytics API
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- role: nginx
nginx_sites:
- analytics-api
- aws
- analytics-api
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -2,16 +2,12 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- certs
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -2,15 +2,11 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- common
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -4,9 +4,26 @@
sudo: True
serial: 1
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
......@@ -14,17 +31,29 @@
- xqueue
- role: xqueue
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
#
# In order to reconfigure the host resolution we are issuing a
# reboot.
......
- name: connect a sandbox to production data
hosts: all
gather_facts: False
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
#
# What we are doing here is updating the sandbox
# server-vars config file so that when update
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
- "EDXAPP_MYSQL_USER: {{ EDXAPP_MYSQL_USER }}"
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
tags: update_edxapp_mysql_host
#
# Requires MySQL-python be installed for system python
# This play will create databases and user for an application.
# It can be run like so:
#
# ansible-playbook -i 'localhost,' create_analytics_reports_dbs.yml -e@./db.yml
#
# where the content of dbs.yml contains the following dictionaries
#
# database_connection: &default_connection
# login_host: "mysql.example.org"
# login_user: "root"
# login_password: "super-secure-password"
# DEFAULT_ENCODING: "utf8"
# databases:
# reports:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# application:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# database_users:
# migrate:
# state: "present"
# password: "user-with-ddl-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# <<: *default_connection
# runtime:
# state: "present"
# password: "user-with-dml-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE"
# <<: *default_connection
- name: Create databases and users
hosts: all
connection: local
gather_facts: False
tasks:
# Install required library, currently this needs to be available
# to system python.
- name: install python mysqldb module
pip: name={{item}} state=present
sudo: yes
with_items:
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.key }}
state={{ item.value.state }}
encoding={{ item.value.encoding }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
with_dict: databases
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.key }}"
priv="{{ '/'.join(item.value.privileges) }}"
password="{{ item.value.password }}"
host={{ item.value.host }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
append_privs=yes
with_dict: database_users
......@@ -2,15 +2,11 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- demo
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -13,8 +9,8 @@
- discern
- discern
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -6,10 +6,6 @@
vars:
migrate_db: "yes"
openid_workaround: True
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
edx_internal: False
roles:
- aws
- role: nginx
......@@ -20,27 +16,30 @@
- xqueue
- xserver
- certs
- analytics-api
nginx_default_sites:
- lms
- edxlocal
- role: edxlocal
tags: edxlocal
- mongo
- { role: 'edxapp', celery_worker: True }
- edxapp
- role: demo
tags: ['demo']
tags: demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- oraclejdk
- elasticsearch
- forum
- { role: "xqueue", update_users: True }
- { role: xserver, when: edx_internal }
- xserver
- ora
- discern
- certs
- edx_ansible
- analytics-api
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
- flower
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -16,8 +12,8 @@
- lms
- edxapp
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
- name: Run edxapp migrations
hosts: all
sudo: False
gather_facts: False
vars:
db_dry_run: "--db-dry-run"
tasks:
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
with_items:
- lms
- cms
......@@ -3,7 +3,41 @@
vars_files:
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml"
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- common
- oraclejdk
- elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy celery flower (monitoring tool)
hosts: all
sudo: True
gather_facts: True
roles:
- flower
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -13,10 +9,8 @@
- forum
- forum
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
# Configure an admin instance with jenkins and asgard.
# Configure an instance with the admin jenkins.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- role: datadog
when: ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
# Configure a Jenkins master instance
# Configure a Jenkins master instance for testeng
# This has the Jenkins Java app, but none of the requirements
# to run the tests.
......@@ -8,6 +8,9 @@
gather_facts: True
vars:
COMMON_DATA_DIR: "/mnt"
COMMON_ENABLE_DATADOG: True
roles:
- common
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_master
......@@ -28,6 +28,7 @@
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart workers"
- name: syncdb and migrate
shell: >
......@@ -38,10 +39,11 @@
DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}"
notify:
- "restart edxapp"
- "restart workers"
handlers:
- name: restart edxapp
shell: "{{ supervisorctl_path }} restart edxapp:{{ item }}"
with_items:
- lms
- cms
shell: "{{ supervisorctl_path }} restart edxapp:"
- name: restart workers
shell: "{{ supervisorctl_path }} restart edxapp_worker:"
......@@ -5,6 +5,40 @@
# ansible_default_ipv4 so
# gather_facts must be set to True
gather_facts: True
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- aws
- rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- hosts: all
sudo: true
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@/path/to/secure/ansible/vars/edx_admin.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_play_bastion
sudo: True
gather_facts: False
roles:
- aws
# ansible-playbook -vvv -c ssh -i admin_url, vpc_admin.yml -e "@path_to_common_overrides" -e "@path_to_deployment_specific_overrides"
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_play_admin
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- newrelic
- alton
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -2,17 +2,13 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: edxapp
celery_worker: True
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -3,9 +3,26 @@
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
......@@ -13,8 +30,21 @@
- xqueue
- role: xqueue
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy xqueue-watcher
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- xqwatcher
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -13,8 +9,8 @@
- xserver
- role: xserver
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
../ansible.cfg
\ No newline at end of file
*
!prod
!stage
!data
!.gitignore
This temp directory created here so that we can make sure it doesn't
collide with other users doing ansible operations on the same machine;
or concurrent installs to different environments, say to prod and stage.
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
# These are environment-specific defaults
forks=10
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
ssh_args= -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
scp_if_ssh=True
......@@ -3,6 +3,6 @@ regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=/tmp
cache_path=ec2_cache/prod
cache_max_age=300
route53=False
[ec2]
regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=ec2_cache/stage
cache_max_age=300
route53=False
......@@ -13,10 +13,11 @@
openid_workaround: True
EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master'
# Set to false if deployed behind another proxy/load balancer.
NGINX_SET_X_FORWARDED_HEADERS: True
# These should stay false for the public AMI
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
roles:
- role: nginx
nginx_sites:
......@@ -41,8 +42,8 @@
- certs
- edx_ansible
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
wait_timeout:
description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
try:
import boto
import boto.ec2
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def get_aws_connection_info(module):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
elif 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
if not access_key:
if 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
elif 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
elif 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
elif 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
else:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# profile_name only works as a key in boto >= 2.24
# so only set profile_name if passed as an argument
if profile_name:
if not boto_supports_profile_name():
module.fail_json("boto does not support profile_name before 2.24")
boto_params['profile_name'] = profile_name
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
boto_params['validate_certs'] = validate_certs
return region, ec2_url, boto_params
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# The instance isn't registered with this ELB so just
# return unchanged
return
lb.deregister_instances([self.instance_id])
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
# already OutOfService is being deregistered.
self.changed = True
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state agains the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availabilty zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg="{} {} {}".format(e, self.region, self.aws_connect_params))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs,
region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
---
# Build a kibana/logstash/elasticsearch server for capturing and
# analyzing logs.
- name: Configure syslog server
hosts: all
sudo: yes
roles:
- common
- oraclejdk
- elasticsearch
- logstash
- kibana
- role: nginx
nginx_sites:
- kibana
---
AIDE_REPORT_EMAIL: 'root'
---
# install and configure aide IDS
#
- name: install aide
apt: pkg="aide" state="present"
- name: configure aide defaults
template: >
src=etc/default/aide.j2 dest=/etc/default/aide
owner=root group=root mode=0644
- name: open read permissions on aide logs
file: >
name="/var/log/aide"
recurse="yes"
state="directory"
mode="755"
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
sudo: yes
# These settings are mainly for the wrapper scripts around aide,
# such as aideinit and /etc/cron.daily/aide
# send reports to syslog
REPORT_URL=syslog:LOG_LOCAL1
# This is used as the host name in the AIDE reports that are sent out
# via e-mail. It defaults to the output of $(hostname --fqdn), but can
# be set to arbitrary values.
# FQDN=
# This is used as the subject for the e-mail reports.
# If your mail system only threads by subject, you might want to add
# some variable content here (for example $(date +%Y-%m-%d)).
MAILSUBJ="Daily AIDE report for $FQDN"
# This is the email address reports get mailed to
# default is root
# This variable is expanded before it is used, so you can use variables
# here. For example, MAILTO=$FQDN-aide@domain.example will send the
# report to host.name.example-aide@domain.example is the local FQDN is
# host.name.example.
MAILTO={{ AIDE_REPORT_EMAIL }}
# Set this to yes to suppress mailings when no changes have been
# detected during the AIDE run and no error output was given.
#QUIETREPORTS=no
# This parameter defines which AIDE command to run from the cron script.
# Sensible values are "update" and "check".
# Default is "check", ensuring backwards compatibility.
# Since "update" does not take any longer, it is recommended to use "update",
# so that a new database is created every day. The new database needs to be
# manually copied over the current one, though.
COMMAND=update
# This parameter defines what to do with a new database created by
# COMMAND=update. It is ignored if COMMAND!=update.
# no: Do not copy new database to old database. This is the default.
# yes: Copy new database to old database. This means that changes to the
# file system are only reported once. Possibly dangerous.
# ifnochange: Copy new database to old database if no changes have
# been reported. This is needed for ANF/ARF to work reliably.
COPYNEWDB=no
# Set this to yes to truncate the detailed changes part in the mail. The full
# output will still be listed in the log file.
TRUNCATEDETAILS=yes
# Set this to yes to suppress file changes by package and security
# updates from appearing in the e-mail report. Filtered file changes will
# still be listed in the log file. This option parses the /var/log/dpkg.log
# file and implies TRUNCATEDETAILS=yes
FILTERUPDATES=yes
# Set this to yes to suppress file changes by package installations
# from appearing in the e-mail report. Filtered file changes will still
# be listed in the log file. This option parses the /var/log/dpkg.log file and
# implies TRUNCATEDETAILS=yes.
FILTERINSTALLATIONS=yes
# This parameter defines how many lines to return per e-mail. Output longer
# than this value will be truncated in the e-mail sent out.
# Set value to "0" to disable this option.
LINES=1000
# This parameter gives a grep regular expression. If given, all output lines
# that _don't_ match the regexp are listed first in the script's output. This
# allows to easily remove noise from the AIDE report.
NOISE=""
# This parameter defines which options are given to aide in the daily
# cron job. The default is "-V4".
AIDEARGS=""
# These parameters control update-aide.conf and give the defaults for
# the --confdir, --confd and --settingsd options
# UPAC_CONFDIR="/etc/aide"
# UPAC_CONFD="$UPAC_CONFDIR/aide.conf.d"
# UPAC_SETTINGSD="$UPAC_CONFDIR/aide.settings.d"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role alton
#
#
# vars are namespace with the module name.
#
ALTON_USERNAME: '1234_1234@chat.hipchat.com'
ALTON_PASSWORD: 'password'
ALTON_V2_TOKEN: 'HIPCHAT_V2_TOKEN'
ALTON_ROOMS: 'Hammer'
ALTON_NAME: 'Alton W. Daemon'
ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null
# Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null
alton_role_name: alton
alton_user: alton
alton_app_dir: "{{ COMMON_APP_DIR }}/alton"
alton_code_dir: "{{ alton_app_dir }}/alton"
alton_venvs_dir: "{{ alton_app_dir }}/venvs"
alton_venv_dir: "{{ alton_venvs_dir }}/alton"
alton_venv_bin: "{{ alton_venv_dir }}/bin"
alton_source_repo: "https://github.com/edx/alton.git"
alton_version: "HEAD"
alton_requirements_file: "{{ alton_code_dir }}/requirements.txt"
alton_supervisor_wrapper: "{{ alton_app_dir }}/alton-supervisor.sh"
alton_environment:
WILL_USERNAME: "{{ ALTON_USERNAME }}"
WILL_PASSWORD: "{{ ALTON_PASSWORD }}"
WILL_V2_TOKEN: "{{ ALTON_V2_TOKEN }}"
WILL_ROOMS: "{{ ALTON_ROOMS }}"
WILL_NAME: "{{ ALTON_NAME }}"
WILL_HANDLE: "{{ ALTON_HANDLE }}"
WILL_REDIS_URL: "{{ ALTON_REDIS_URL }}"
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
#
# OS packages
#
alton_debian_pkgs: []
alton_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role alton
#
# Overview:
#
#
- name: restart alton
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role alton
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- supervisor
- redis
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: install the requirements
pip: >
requirements="{{ alton_requirements_file }}"
virtualenv="{{ alton_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create the supervisor wrapper
template: >
src="{{ alton_supervisor_wrapper|basename }}.j2"
dest="{{ alton_supervisor_wrapper }}"
mode=0755
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create a supervisor config
template: >
src=alton.conf.j2 dest="{{ supervisor_available_dir }}/alton.conf"
owner="{{ supervisor_user }}"
group="{{ supervisor_user }}"
sudo_user: "{{ supervisor_user }}"
notify: restart alton
- name: enable the supervisor config
file: >
src="{{ supervisor_available_dir }}/alton.conf"
dest="{{ supervisor_cfg_dir }}/alton.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
notify: restart alton
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure alton is started
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role alton
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create application user
user: >
name="{{ alton_user }}" home="{{ alton_app_dir }}"
createhome=no shell=/bin/false
- name: create alton user dirs
file: >
path="{{ item }}" state=directory
owner="{{ alton_user }}" group="{{ common_web_group }}"
with_items:
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
#!/bin/bash
source {{ alton_app_dir }}/alton_env
cd {{ alton_code_dir }}
{{ alton_venv_bin }}/python run_alton.py
[program:alton]
command={{ alton_supervisor_wrapper }}
priority=999
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
stopsignal=QUIT
# {{ ansible_managed }}
{% for name,value in alton_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role analytics-api
#
ANALYTICS_API_GIT_IDENTITY: !!null
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
ANALYTICS_API_NEWRELIC_APPNAME: "your Newrelic appname"
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_VERSION: "master"
# Default dummy user, override this!!
ANALYTICS_API_USERS:
"dummy-api-user": "changeme"
ANALYTICS_API_CONFIG:
ANALYTICS_DATABASE: 'reports'
SECRET_KEY: 'Your secret key here'
TIME_ZONE: 'America/New_York'
LANGUAGE_CODE: 'en-us'
# email config
EMAIL_HOST: 'smtp.example.com'
EMAIL_HOST_PASSWORD: ""
EMAIL_HOST_USER: ""
EMAIL_PORT: 587
API_AUTH_TOKEN: 'put-your-api-token-here'
STATICFILES_DIRS: []
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
#
# vars are namespace with the module name.
#
analytics_api_environment:
DJANGO_SETTINGS_MODULE: "analyticsdataserver.settings.production"
ANALYTICS_API_CFG: "{{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml"
analytics_api_role_name: "analytics-api"
analytics_api_service_name: "analytics-api"
analytics_api_user: "analytics-api"
analytics_api_app_dir: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_home: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_venv_base: "{{ analytics_api_home }}/venvs"
analytics_api_venv_dir: "{{ analytics_api_venv_base }}/{{ analytics_api_service_name }}"
analytics_api_venv_bin: "{{ analytics_api_venv_dir }}/bin"
analytics_api_code_dir: "{{ analytics_api_app_dir }}/edx-analytics-data-api"
analytics_api_conf_dir: "{{ analytics_api_home }}"
analytics_api_gunicorn_host: "127.0.0.1"
analytics_api_gunicorn_port: "8100"
analytics_api_gunicorn_workers: "8"
analytics_api_gunicorn_timeout: "300"
analytics_api_django_settings: "production"
analytics_api_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/edx-analytics-data-api"
analytics_api_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ analytics_api_git_identity_file }}"
analytics_api_git_identity_file: "{{ analytics_api_app_dir }}/git-identity"
analytics_api_log_dir: "{{ COMMON_LOG_DIR }}/{{ analytics_api_service_name }}"
analytics_api_requirements_base: "{{ analytics_api_code_dir }}/requirements"
analytics_api_requirements:
- base.txt
- production.txt
- optional.txt
#
# OS packages
#
analytics_api_debian_pkgs:
- 'libmysqlclient-dev'
analytics_api_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics-api
#
# Overview:
#
#
- name: "restart the analytics service"
supervisorctl_local: >
name={{ analytics_api_service_name }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Role includes for role analytics-api
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- role: edx_service
edx_role_name: "{{ analytics_api_role_name }}"
edx_service_name: "{{ analytics_api_service_name }}"
- supervisor
---
- name: install read-only ssh key
copy: >
content="{{ ANALYTICS_API_GIT_IDENTITY }}" dest={{ analytics_api_git_identity_file }}
owner={{ analytics_api_user }} group={{ analytics_api_user }} mode=0600
- name: setup the analytics-api env file
template: >
src="edx/app/analytics-api/analytics_api_env.j2"
dest="{{ analytics_api_app_dir }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
- name: checkout code
git: >
dest={{ analytics_api_code_dir }} repo={{ analytics_api_source_repo }} version={{ ANALYTICS_API_VERSION }}
accept_hostkey=yes
ssh_opts="{{ analytics_api_git_ssh_opts }}"
register: analytics_api_code_checkout
notify: "restart the analytics service"
sudo_user: "{{ analytics_api_user }}"
- name: write out app config file
template: >
src=edx/app/analytics-api/analytics-api.yaml.j2
dest={{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml
mode=0644 owner={{ analytics_api_user }} group={{ analytics_api_user }}
notify: restart the analytics service
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_venv_dir }}" state=present
sudo_user: "{{ analytics_api_user }}"
notify: restart the analytics service
with_items: analytics_api_requirements
- name: syncdb and migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ analytics_api_venv_bin }}/python ./manage.py syncdb --migrate --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py collectstatic --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }} --create-user
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
- name: write out the supervisior wrapper
template: >
src=edx/app/analytics-api/analytics-api.sh.j2
dest={{ analytics_api_app_dir }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
notify: restart the analytics service
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics-api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: restart the analytics service
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
notify: restart the analytics service
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
when: not disable_edx_services
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_venv_bin }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- python
- pip
- django-admin.py
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- manage.py
- name: remove read-only ssh key for the content repo
file: path={{ analytics_api_git_identity_file }} state=absent
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role analytics-api
#
# Overview:
#
# Install the Analytics Data API server, a python
# django application that runs under gunicorn
#
# Dependencies:
#
# Example play:
# - name: Deploy Analytics API
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# ENABLE_DATADOG: False
# ENABLE_SPLUNKFORWARDER: False
# ENABLE_NEWRELIC: False
# roles:
# - aws
# - analytics-api
#
# ansible-playbook -i 'api.example.com,' ./analyticsapi.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- fail: msg="You must provide an private key for the analytics repo"
when: not ANALYTICS_API_GIT_IDENTITY
- include: deploy.yml tags=deploy
#!/usr/bin/env bash
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = analytics_api_venv_bin + '/newrelic-admin run-program ' + analytics_api_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = analytics_api_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC %}
export NEW_RELIC_APP_NAME="{{ ANALYTICS_API_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ analytics_api_gunicorn_workers }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
---
# {{ ansible_managed }}
{{ ANALYTICS_API_CONFIG | to_nice_yaml }}
# {{ ansible_managed }}
{% for name,value in analytics_api_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
# {{ ansible_managed }}
[program:{{ analytics_api_service_name }}]
command={{ analytics_api_app_dir }}/analytics-api.sh
user={{ common_web_user }}
directory={{ analytics_api_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
---
apache_port: 80
apache_ports:
- 80
apache_sites:
- lms
apache_template_dir: '.'
---
- name: restart apache
service: name=apache2 state=restarted
tags: deploy
---
dependencies:
- common
# Requires nginx package
---
- name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2"
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
- name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
#Installs apache and runs the lms wsgi
# Installs apache and runs the lms wsgi by default
---
- name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
apt: pkg={{ item }} install_recommends=no state=present update_cache=yes
with_items:
- apache2
- libapache2-mod-wsgi
notify: restart apache
tags:
- apache
- install
- name: disables default site
command: a2dissite 000-default
notify: restart apache
tags:
- apache
- install
- name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: restart apache
tags:
- apache
- install
- name: Register the fact that apache role has run
command: echo True
register: apache_role_run
tags:
- apache
- install
- debug: msg={{ apache_sites }}
- name: Copying apache configs for {{ apache_sites }}
template: >
src={{ apache_template_dir }}/{{ item }}.j2
dest=/etc/apache2/sites-available/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: restart apache
with_items: apache_sites
- include: apache_site.yml state=link site_name=lms
- name: Creating apache2 config links for {{ apache_sites }}
file: >
src=/etc/apache2/sites-available/{{ item }}
dest=/etc/apache2/sites-enabled/{{ item }}
state=link owner=root group=root
notify: restart apache
with_items: apache_sites
WSGIPythonHome {{ edxapp_venv_dir }}
WSGIRestrictEmbedded On
<VirtualHost *:{{apache_port}}>
<VirtualHost *:{{ apache_port }}>
ServerName https://{{ lms_env_config.SITE_NAME }}
ServerAlias *.{{ lms_env_config.SITE_NAME }}
UseCanonicalName On
......@@ -46,6 +46,6 @@ WSGIRestrictEmbedded On
ErrorLog ${APACHE_LOG_DIR}/apache-edx-error.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx
CustomLog ${APACHE_LOG_DIR}/apache-edx-access.log apache-edx
CustomLog {{ APACHE_LOG_DIR }}/apache-edx-access.log apache-edx
</VirtualHost>
NameVirtualHost *:{{apache_port}}
Listen {{apache_port}}
{%- for port in apache_ports -%}
NameVirtualHost *:{{ port }}
Listen {{ port }}
{% endfor %}
......@@ -56,6 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
- boto==2.20.1
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
......
......@@ -47,7 +47,7 @@
- db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh
when: COMMON_MYSQL_READ_ONLY_PASS
when: COMMON_MYSQL_READ_ONLY_PASS is defined
# These templates rely on there being a global
# read_only mongo user, you must override the default
......@@ -67,4 +67,4 @@
db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh
when: COMMON_MONGO_READ_ONLY_PASS
when: COMMON_MONGO_READ_ONLY_PASS is defined
......@@ -60,7 +60,9 @@
notify: restart certs
- name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
pip: >
requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ certs_user }}"
notify: restart certs
......@@ -73,7 +75,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure certs has started
......
......@@ -3,6 +3,11 @@
# to change the base directory
# where edX is installed
# Set global htpasswd credentials
COMMON_ENABLE_BASIC_AUTH: True
COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx
COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
......@@ -18,6 +23,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
# do not include http/https
COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname
......@@ -38,15 +44,17 @@ COMMON_SSH_PASSWORD_AUTH: "no"
# the migrate user is granted table alter privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: !!null
COMMON_MYSQL_READ_ONLY_PASS: 'password'
COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: !!null
COMMON_MYSQL_ADMIN_PASS: 'password'
COMMON_MYSQL_MIGRATE_USER: 'migrate'
COMMON_MYSQL_MIGRATE_PASS: !!null
COMMON_MYSQL_MIGRATE_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
common_debian_pkgs:
- ntp
- ack-grep
......@@ -66,8 +74,9 @@ common_debian_pkgs:
- curl
common_pip_pkgs:
- pip==1.5.4
- virtualenv==1.11.4
- pip==1.5.6
- setuptools==3.6
- virtualenv==1.11.6
- virtualenvwrapper
common_web_user: www-data
......
......@@ -15,6 +15,12 @@
- "{{ COMMON_BIN_DIR }}"
- "{{ COMMON_CFG_DIR }}"
# Determine if machine is provisioned via vagrant
# Some EC2-specific steps would need to be skipped
- name: check if instance is vagrant
stat: path=/home/vagrant
register: vagrant_home_dir
# Need to install python-pycurl to use Ansible's apt_repository module
- name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes
......@@ -56,10 +62,14 @@
- name: Install logrotate configuration for edX
template: >
dest=/etc/logrotate.d/edx-services
src=etc/logrotate.d/edx_logrotate.j2
dest=/etc/logrotate.d/hourly/edx-services
src=etc/logrotate.d/hourly/edx_logrotate.j2
owner=root group=root mode=644
# This can be removed after new release of edX
- name: Remove old edx-services config from /etc/logrotate.d
file: path=/etc/logrotate.d/edx-services state=absent
# This is in common to keep all logrotation config
# in the same role
- name: Create hourly subdirectory in logrotate.d
......@@ -103,13 +113,15 @@
# Remove some of the default motd display on ubuntu
# and add a custom motd. These do not require an
# ssh restart
# Only needed for EC2 instances.
- name: update the ssh motd on Ubuntu
file: >
mode=0644
path={{ item }}
when: vagrant_home_dir.stat.exists == false
with_items:
- "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo"
- "/usr/share/landscape/landscape-sysinfo.wrapper"
- "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade"
......
......@@ -4,9 +4,13 @@
copytruncate
delaycompress
dateext
dateformat -%Y%m%d-%s
missingok
notifempty
daily
rotate 90
size 1M
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
}
---
DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_debian_pkgs:
- apparmor-utils
- build-essential
......
......@@ -86,7 +86,7 @@
- name: create a symlink for venv supervisor
file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
dest={{ COMMON_BIN_DIR }}/supervisorctl.devpi
state=link
- name: create a symlink for supervisor config
......@@ -103,7 +103,7 @@
- name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
- name: ensure devpi is started
supervisorctl_local: >
......
......@@ -123,7 +123,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure discern, discern_celery has started
......
......@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run
-h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code edx-analytics-data-api
<version> - can be a commit or tag
EO
......@@ -43,13 +43,13 @@ edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c lo
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'"
repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
if [[ -z $1 || -z $2 ]]; then
......
......@@ -44,12 +44,34 @@
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- name: create edx_service data and staticfiles dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_service_name }}"
group="{{ common_web_group }}"
with_items:
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/data"
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/staticfiles"
- name: create edx_service log dir
file: >
path="{{ item }}"
state=directory
owner="syslog"
group="syslog"
with_items:
- "{{ COMMON_LOG_DIR }}/{{ edx_service_name }}"
# Replace dashes with underscores to support roles that use
# dashes (the role vars will contain underscores)
- name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_debian_pkgs"
with_items: "{{ edx_service_name.replace('-', '_') }}_debian_pkgs"
when: ansible_distribution in common_debian_variants
- name: install a bunch of system packages on which edx_service relies
yum: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_redhat_pkgs"
with_items: "{{ edx_service_name.replace('-', '_') }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants
......@@ -17,7 +17,7 @@ EDXAPP_PREVIEW_LMS_BASE: ''
EDXAPP_CMS_BASE: ''
EDXAPP_AWS_ACCESS_KEY_ID: ''
EDXAPP_AWS_SECRET_ACCESS_KEY: ''
EDXAPP_XQUEUE_BASIC_AUTH: [ 'edx', 'edx' ]
EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ]
EDXAPP_XQUEUE_DJANGO_AUTH:
username: 'lms'
password: 'password'
......@@ -32,11 +32,14 @@ EDXAPP_MONGO_DB_NAME: 'edxapp'
EDXAPP_MYSQL_DB_NAME: 'edxapp'
EDXAPP_MYSQL_USER: 'edxapp001'
EDXAPP_MYSQL_USER_ADMIN: 'root'
EDXAPP_MYSQL_USER_MIGRATE: 'migrate'
EDXAPP_MYSQL_PASSWORD: 'password'
EDXAPP_MYSQL_PASSWORD_READ_ONLY: 'password'
EDXAPP_MYSQL_PASSWORD_ADMIN: 'password'
EDXAPP_MYSQL_PASSWORD_MIGRATE: 'password'
EDXAPP_MYSQL_REPLICA_DB_NAME: "{{ EDXAPP_MYSQL_DB_NAME }}"
EDXAPP_MYSQL_REPLICA_USER: "{{ EDXAPP_MYSQL_USER }}"
EDXAPP_MYSQL_REPLICA_PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}"
EDXAPP_MYSQL_REPLICA_HOST: "{{ EDXAPP_MYSQL_HOST }}"
EDXAPP_MYSQL_REPLICA_PORT: "{{ EDXAPP_MYSQL_PORT }}"
EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
......@@ -61,7 +64,7 @@ EDXAPP_ZENDESK_API_KEY: ''
EDXAPP_CELERY_USER: 'celery'
EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_PLATFORM_NAME: 'edX'
EDXAPP_PLATFORM_NAME: 'Your Platform Name Here'
EDXAPP_CAS_SERVER_URL: ''
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ''
......@@ -88,6 +91,8 @@ EDXAPP_BOOK_URL: ''
# if xqueue is run on the same server
# as the lms (it's sent in the request)
EDXAPP_SITE_NAME: 'localhost'
EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}"
EDXAPP_CMS_SITE_NAME: 'localhost'
EDXAPP_MEDIA_URL: ''
EDXAPP_ANALYTICS_SERVER_URL: ''
EDXAPP_FEEDBACK_SUBMISSION_EMAIL: ''
......@@ -106,6 +111,7 @@ EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_CMS_SSL_NGINX_PORT: 48010
EDXAPP_LANG: 'en_US.UTF-8'
EDXAPP_LANGUAGE_CODE : 'en'
EDXAPP_TIME_ZONE: 'America/New_York'
EDXAPP_TECH_SUPPORT_EMAIL: 'technical@example.com'
......@@ -115,6 +121,10 @@ EDXAPP_DEFAULT_FROM_EMAIL: 'registration@example.com'
EDXAPP_DEFAULT_FEEDBACK_EMAIL: 'feedback@example.com'
EDXAPP_DEFAULT_SERVER_EMAIL: 'devops@example.com'
EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL: 'no-reply@example.com'
EDXAPP_UNIVERSITY_EMAIL: 'university@example.com'
EDXAPP_PRESS_EMAIL: 'press@example.com'
EDXAPP_PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount'
EDXAPP_PLATFORM_FACEBOOK_ACCOUNT: 'http://www.facebook.com/YourPlatformFacebookAccount'
EDXAPP_ENV_EXTRA: {}
EDXAPP_AUTH_EXTRA: {}
......@@ -146,7 +156,7 @@ EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$']
EDXAPP_NO_PREREQ_INSTALL: 1
# whether to setup the python codejail or not
EDXAPP_PYTHON_SANDBOX: false
EDXAPP_PYTHON_SANDBOX: true
# this next setting, if true, turns on actual sandbox enforcement. If not true,
# it puts the sandbox in 'complain' mode, for reporting but not enforcement
EDXAPP_SANDBOX_ENFORCE: true
......@@ -173,9 +183,6 @@ EDXAPP_USE_GIT_IDENTITY: false
# into this var
EDXAPP_GIT_IDENTITY: !!null
# Configuration for database migration
EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ EDXAPP_MYSQL_DB_NAME }}"
EDXAPP_UPDATE_STATIC_FILES_KEY: false
# Set this to true if you want to install the private pip
# requirements in the edx-platform repo.
......@@ -184,6 +191,33 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: false
# set to true
EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false
EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "UA-DUMMY"
EDXAPP_PEARSON_TEST_PASSWORD: ""
EDXAPP_SEGMENT_IO_LMS_KEY: ""
EDXAPP_EDX_API_KEY: ""
# This is the default set in common.py
EDXAPP_VERIFY_STUDENT:
DAYS_GOOD_FOR: 365
EDXAPP_GOOGLE_ANALYTICS_LINKEDIN: ""
EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS: {}
EDXAPP_BULK_EMAIL_EMAILS_PER_TASK: 500
# If using microsites this should point to the microsite repo
EDXAPP_MICROSITE_ROOT_DIR: "{{ edxapp_app_dir }}/edx-microsite"
# this dictionary defines what microsites are configured
EDXAPP_MICROSITE_CONFIGRATION: {}
# Instructor code that will not be run in the code sandbox
EDXAPP_COURSES_WITH_UNSAFE_CODE: []
EDXAPP_SESSION_COOKIE_DOMAIN: ""
# XML Course related flags
EDXAPP_XML_FROM_GIT: false
EDXAPP_XML_S3_BUCKET: !!null
EDXAPP_XML_S3_KEY: !!null
EDXAPP_NEWRELIC_LMS_APPNAME: "edX-LMS"
EDXAPP_NEWRELIC_CMS_APPNAME: "edX-CMS"
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -204,6 +238,7 @@ edxapp_gem_bin: "{{ edxapp_gem_root }}/bin"
edxapp_user: edxapp
edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_rbenv_bin }}:{{ edxapp_rbenv_shims }}:{{ edxapp_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_course_static_dir: "{{ edxapp_data_dir }}/course_static"
edxapp_course_data_dir: "{{ edxapp_data_dir }}/data"
edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads"
edxapp_theme_dir: "{{ edxapp_data_dir }}/themes"
......@@ -246,6 +281,7 @@ edxapp_chksum_req_files:
- "{{ pre_requirements_file }}"
- "{{ post_requirements_file }}"
- "{{ base_requirements_file }}"
- "{{ custom_requirements_file }}"
- "{{ paver_requirements_file }}"
- "{{ sandbox_post_requirements }}"
- "{{ sandbox_base_requirements }}"
......@@ -302,6 +338,7 @@ edxapp_generic_auth_config: &edxapp_generic_auth
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
user: $EDXAPP_MONGO_USER
ADDITIONAL_OPTIONS: $EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
MODULESTORE:
default: &edxapp_generic_default_modulestore
......@@ -323,6 +360,13 @@ edxapp_generic_auth_config: &edxapp_generic_auth
OPTIONS: *generic_modulestore_default_options
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
DATABASES:
read_replica:
ENGINE: 'django.db.backends.mysql'
NAME: $EDXAPP_MYSQL_REPLICA_DB_NAME
USER: $EDXAPP_MYSQL_REPLICA_USER
PASSWORD: $EDXAPP_MYSQL_REPLICA_PASSWORD
HOST: $EDXAPP_MYSQL_REPLICA_HOST
PORT: $EDXAPP_MYSQL_REPLICA_PORT
default:
ENGINE: 'django.db.backends.mysql'
NAME: $EDXAPP_MYSQL_DB_NAME
......@@ -342,8 +386,13 @@ edxapp_generic_auth_config: &edxapp_generic_auth
ZENDESK_API_KEY: $EDXAPP_ZENDESK_API_KEY
CELERY_BROKER_USER: $EDXAPP_CELERY_USER
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
GOOGLE_ANALYTICS_ACCOUNT: $EDXAPP_GOOGLE_ANALYTICS_ACCOUNT
generic_env_config: &edxapp_generic_env
COURSES_WITH_UNSAFE_CODE: $EDXAPP_COURSES_WITH_UNSAFE_CODE
BULK_EMAIL_EMAILS_PER_TASK: $EDXAPP_BULK_EMAIL_EMAILS_PER_TASK
MICROSITE_ROOT_DIR: $EDXAPP_MICROSITE_ROOT_DIR
MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGRATION
GRADES_DOWNLOAD:
STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE
BUCKET: $EDXAPP_GRADE_BUCKET
......@@ -361,12 +410,12 @@ generic_env_config: &edxapp_generic_env
FEATURES: $EDXAPP_FEATURES
WIKI_ENABLED: true
SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
SITE_NAME: $EDXAPP_SITE_NAME
LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/edx"
MEDIA_URL: $EDXAPP_MEDIA_URL
ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
TIME_ZONE: $EDXAPP_TIME_ZONE
LANGUAGE_CODE : $EDXAPP_LANGUAGE_CODE
MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP
MKTG_URLS: $EDXAPP_MKTG_URLS
# repo root for courses
......@@ -383,27 +432,25 @@ generic_env_config: &edxapp_generic_env
mongo_metadata_inheritance:
<<: *default_generic_cache
KEY_PREFIX: 'integration_mongo_metadata_inheritance'
TIMEOUT: 300
staticfiles:
<<: *default_generic_cache
KEY_PREFIX: 'integration_static_files'
celery:
<<: *default_generic_cache
KEY_PREFIX: 'integration_celery'
TIMEOUT: "7200"
CELERY_BROKER_TRANSPORT: 'amqp'
CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME
COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL
LOGGING_ENV: $EDXAPP_LOGGING_ENV
SESSION_COOKIE_DOMAIN: !!null
SESSION_COOKIE_DOMAIN: $EDXAPP_SESSION_COOKIE_DOMAIN
COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY
SEGMENT_IO_LMS: true
THEME_NAME: $edxapp_theme_name
TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL
CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL
BUGS_EMAIL: $EDXAPP_BUGS_EMAIL
CODE_JAIL:
limits:
VMEM: 0
REALTIME: 3
DEFAULT_FROM_EMAIL: $EDXAPP_DEFAULT_FROM_EMAIL
DEFAULT_FEEDBACK_EMAIL: $EDXAPP_DEFAULT_FEEDBACK_EMAIL
SERVER_EMAIL: $EDXAPP_DEFAULT_SERVER_EMAIL
......@@ -413,9 +460,19 @@ generic_env_config: &edxapp_generic_env
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS:
'preview\.': 'draft'
UNIVERSITY_EMAIL: $EDXAPP_UNIVERSITY_EMAIL
PRESS_EMAIL: $EDXAPP_PRESS_EMAIL
PLATFORM_TWITTER_ACCOUNT: $EDXAPP_PLATFORM_TWITTER_ACCOUNT
PLATFORM_FACEBOOK_ACCOUNT: $EDXAPP_PLATFORM_FACEBOOK_ACCOUNT
lms_auth_config:
<<: *edxapp_generic_auth
PEARSON_TEST_PASSWORD: $EDXAPP_PEARSON_TEST_PASSWORD
SEGMENT_IO_LMS_KEY: $EDXAPP_SEGMENT_IO_LMS_KEY
EDX_API_KEY: $EDXAPP_EDX_API_KEY
VERIFY_STUDENT: $EDXAPP_VERIFY_STUDENT
GOOGLE_ANALYTICS_LINKEDIN: $EDXAPP_GOOGLE_ANALYTICS_LINKEDIN
CC_PROCESSOR: $EDXAPP_CC_PROCESSOR
MODULESTORE:
default: &lms_default_modulestore
......@@ -447,18 +504,25 @@ lms_auth_config:
lms_env_config:
<<: *edxapp_generic_env
PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY
'CODE_JAIL':
SITE_NAME: $EDXAPP_LMS_SITE_NAME
CODE_JAIL:
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
'limits':
'VMEM': 0
'REALTIME': 5
'user': '{{ edxapp_sandbox_user }}'
python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
limits:
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# Time in seconds that the jailed process has to run.
REALTIME: 1
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
user: '{{ edxapp_sandbox_user }}'
cms_auth_config:
<<: *edxapp_generic_auth
cms_env_config:
<<: *edxapp_generic_env
SITE_NAME: $EDXAPP_CMS_SITE_NAME
# install dir for the edx-platform repo
edxapp_code_dir: "{{ edxapp_app_dir }}/edx-platform"
......@@ -497,7 +561,7 @@ worker_core_mult:
edxapp_use_custom_theme: false
edxapp_theme_name: ""
edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git'
edxapp_theme_version: 'HEAD'
edxapp_theme_version: 'master'
# make this the public URL instead of writable
edx_platform_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-platform.git"
......@@ -508,6 +572,7 @@ local_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/local.txt"
pre_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/pre.txt"
post_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/post.txt"
base_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/base.txt"
custom_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/custom.txt"
paver_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/paver.txt"
github_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/github.txt"
private_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/edx-private.txt"
......
......@@ -6,6 +6,7 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi
- nltk
- role: user
user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}"
......
......@@ -108,7 +108,7 @@
# Set the npm registry
- name: Set the npm registry
shell:
npm config set registry 'http://registry.npmjs.org'
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
......@@ -218,7 +218,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module
......@@ -249,6 +248,33 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
......@@ -283,7 +309,7 @@
sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX
register: sandbox_install_output
changed_when: "'installed' in sandbox_install_output"
changed_when: sandbox_install_output.stdout is defined and 'installed' in sandbox_install_output.stdout
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -333,6 +359,9 @@
# service variants configured, runs
# gather_assets and db migrations
- include: service_variant_config.yml
tags:
- service_variant_config
- deploy
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
......@@ -343,7 +372,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure edxapp has started
......
......@@ -25,6 +25,7 @@
- "{{ edxapp_venvs_dir }}"
- "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- name: create edxapp log dir
file: >
......
......@@ -75,24 +75,6 @@
when: celery_worker is defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
# Fake syncdb with migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to.
#
# This is run in cases where you want to test to see if migrations
# work without actually runnning them (when creating AMIs for example).
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
shell: >
......@@ -107,54 +89,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the default migrate user is not set,
# in this case use the EDXAPP_MYSQL_USER_MIGRATE user to run migrations
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and not COMMON_MYSQL_MIGRATE_PASS
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Fake migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_only is defined and migrate_only|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Regular migrations
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Gather assets using rake if possible
- name: gather {{ item }} static assets with rake
......
- name: clone the xml course repo
git: >
repo="{{ item.repo_url }}"
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
version="{{ item.version }}"
accept_hostkey=True
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
with_items: EDXAPP_XML_COURSES
- name: update course.xml
template: >
src=course.xml.j2
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/course.xml"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
- name: make symlinks for the static data
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: make symlinks so code works
file: >
src="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
dest="{{ edxapp_course_data_dir }}/{{ item.course }}"
state=link
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: import courses with nostatic flag
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import --nostatic {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "no static import"
- name: import courses including static data
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete courses that were fully imported
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode=put
overwrite=True
src="/tmp/static_course_content.tar.gz"
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
- name: remove archive from disk
file: path="/tmp/static_course_content.tar.gz" state=absent
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
[program:cms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
#include <tunables/global>
{{ edxapp_sandbox_venv_dir }}/bin/python flags=(complain) {
{{ edxapp_sandbox_venv_dir }}/bin/python {
#include <abstractions/base>
{{ edxapp_sandbox_venv_dir }}/** mr,
{{ edxapp_code_dir }}/common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix,
/tmp/codejail-*/** rix,
/tmp/codejail-*/** wrix,
#
# Whitelist particiclar shared objects from the system
......@@ -19,6 +19,7 @@
/usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
#
# Allow access to selections from /proc
......
<course org="{{ item.org }}" course="{{ item.course }}" url_name="{{ item.run }}" />
[program:lms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
......@@ -13,17 +13,12 @@
- name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: setup the migration db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER_MIGRATE }}
password={{ EDXAPP_MYSQL_PASSWORD_MIGRATE}}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
- name: setup the edxapp db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
password={{ EDXAPP_MYSQL_PASSWORD }}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
when: EDXAPP_MYSQL_USER is defined
- name: create a database for edxapp
mysql_db: >
......@@ -37,20 +32,21 @@
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
when: XQUEUE_MYSQL_USER is defined
- name: create a database for xqueue
mysql_db: >
db=xqueue
state=present
encoding=utf8
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
when: XQUEUE_MYSQL_USER is defined
- name: setup the ora db user
mysql_user: >
name={{ ORA_MYSQL_USER }}
password={{ ORA_MYSQL_PASSWORD }}
priv='{{ORA_MYSQL_DB_NAME}}.*:ALL'
when: ORA_MYSQL_USER is defined
- name: create a database for ora
mysql_db: >
......@@ -59,20 +55,56 @@
encoding=utf8
when: ORA_MYSQL_USER is defined
- name: setup the discern db user
mysql_user: >
name={{ DISCERN_MYSQL_USER }}
password={{ DISCERN_MYSQL_PASSWORD }}
priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL'
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: create a database for discern
- name: create databases for analytics api
mysql_db: >
db=discern
db={{ item }}
state=present
encoding=utf8
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
when: ANALYTICS_API_CONFIG is defined
with_items:
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}"
- name: create api user for the analytics api
mysql_user: >
name=api001
password=password
priv='{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}.*:ALL/reports.*:SELECT'
when: ANALYTICS_API_CONFIG is defined
- name: create read-only reports user for the analytics-api
mysql_user: >
name=reports001
password=password
priv='{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}.*:SELECT'
when: ANALYTICS_API_CONFIG is defined
- name: setup the migration db user
mysql_user: >
name={{ COMMON_MYSQL_MIGRATE_USER }}
password={{ COMMON_MYSQL_MIGRATE_PASS }}
priv='{{ item }}.*:ALL'
append_privs=yes
when: item != 'None'
with_items:
- "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
- "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
- "{{ ORA_MYSQL_DB_NAME|default('None') }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME']|default('None') }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME']|default('None') }}"
- name: setup the read-only db user
mysql_user: >
name={{ COMMON_MYSQL_READ_ONLY_USER }}
password={{ COMMON_MYSQL_READ_ONLY_PASS }}
priv='*.*:ALL'
- name: setup the admin db user
mysql_user: >
name={{ COMMON_MYSQL_ADMIN_USER }}
password={{ COMMON_MYSQL_ADMIN_PASS }}
priv='*.*:CREATE USER'
- name: install memcached
......
---
# By default, point to the RabbitMQ broker running locally
FLOWER_BROKER_USERNAME: "celery"
FLOWER_BROKER_PASSWORD: "celery"
FLOWER_BROKER_HOST: "127.0.0.1"
FLOWER_BROKER_PORT: 5672
FLOWER_ADDRESS: "0.0.0.0"
FLOWER_PORT: "5555"
flower_user: "flower"
flower_app_dir: "{{ COMMON_APP_DIR }}/flower"
flower_data_dir: "{{ COMMON_DATA_DIR }}/flower"
flower_log_dir: "{{ COMMON_LOG_DIR }}/flower"
flower_venv_dir: "{{ flower_app_dir }}/venvs/flower"
flower_venv_bin: "{{ flower_venv_dir }}/bin"
flower_python_reqs:
- "flower==0.7.0"
flower_deploy_path: "{{ flower_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/bin:/sbin:/bin"
flower_broker: "amqp://{{ FLOWER_BROKER_USERNAME }}:{{ FLOWER_BROKER_PASSWORD }}@{{ FLOWER_BROKER_HOST }}:{{ FLOWER_BROKER_PORT }}"
flower_environment:
PATH: $flower_deploy_path
---
- name: restart flower
supervisorctl_local: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="flower"
sudo_user: "{{supervisor_service_user }}"
---
dependencies:
- supervisor
- devpi
---
- name: create application user
user: >
name="{{ flower_user }}" home="{{ flower_app_dir }}"
createhome=no shell=/bin/false
notify:
- "restart flower"
- name: create flower user dirs
file: >
path="{{ item }}" state=directory
owner="{{ flower_user }}" group="{{ common_web_group }}"
notify:
- "restart flower"
with_items:
- "{{ flower_app_dir }}"
- "{{ flower_data_dir }}"
- "{{ flower_venv_dir }}"
- "{{ flower_log_dir }}"
- name: create flower environment script
template: >
src=flower_env.j2 dest={{ flower_app_dir }}/flower_env
owner={{ flower_user }} group={{ common_web_group }}
mode=0644
notify:
- "restart flower"
- name: create virtualenv and install Python requirements
pip: >
name="{{ item }}"
virtualenv="{{ flower_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ flower_user }}"
environment: "{{ flower_environment }}"
with_items: "flower_python_reqs"
notify:
- "restart flower"
- name: create supervisor configuration
template: >
src=flower.conf.j2 dest={{ supervisor_available_dir }}/flower.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
notify:
- "restart flower"
- name: enable supervisor configuration
file: >
src={{ supervisor_available_dir }}/flower.conf
dest={{ supervisor_cfg_dir }}/flower.conf
state=link
force=yes
sudo_user: "{{ supervisor_user }}"
notify:
- "restart flower"
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
notify:
- "restart flower"
[program:flower]
environment=PATH="{{ flower_deploy_path }}"
user={{ common_web_user }}
command={{ flower_venv_bin }}/celery flower --broker {{ flower_broker }} --address={{ FLOWER_ADDRESS }} --port={{ FLOWER_PORT }}
stdout_logfile={{ supervisor_log_dir }}/flower-stdout.log
stderr_logfile={{ supervisor_log_dir }}/flower-stderr.log
# {{ ansible_managed }}
{% for name,value in flower_environment.items() %}
{%- if value %}
export {{ name }}="{{ value }}"
{%- endif %}
{% endfor %}
......@@ -26,8 +26,12 @@ FORUM_API_KEY: "password"
FORUM_ELASTICSEARCH_HOST: "localhost"
FORUM_ELASTICSEARCH_PORT: "9200"
FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}"
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
FORUM_LISTEN_PORT: "4567"
......@@ -44,6 +48,7 @@ forum_environment:
SEARCH_SERVER: "{{ FORUM_ELASTICSEARCH_URL }}"
MONGOHQ_URL: "{{ FORUM_MONGO_URL }}"
HOME: "{{ forum_app_dir }}"
NEW_RELIC_ENABLE: "{{ FORUM_NEW_RELIC_ENABLE }}"
NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}"
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
......
......@@ -53,7 +53,7 @@
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure forum is started
......
......@@ -17,17 +17,13 @@
JENKINS_ADMIN_NAME: 'default_jenkins_name'
# A dictionary of AWS credentials to use to make
# a boto file for jenkins.
JENKINS_ADMIN_AWS_CREDENTIALS: !!null
# jenkins_admin also requires other variables that are not defined by default.
# JENKINS_ADMIN_S3_PROFILE: !!null
# JENKINS_ADMIN_CONFIGURATION_REPO: !!null
# JENKINS_ADMIN_CONFIGURATION_SECURE_REPO: !!null
#
# # git key to use to checkout secure repos on jenkins and in abbey
# JENKINS_ADMIN_GIT_KEY: !!null
#
# # EC2 Key to use when bringing up the abbey instance in ec2 (aws key-pair)
# JENKINS_ADMIN_EC2_KEY: !!null
jenkins_admin_role_name: jenkins_admin
......@@ -89,7 +85,55 @@ jenkins_admin_plugins:
- { name: "rebuild", version: "1.21" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "build-token-root", version: "1.1" }
- { name: "matrix-auth", version: "1.0.2" }
- { name: "mailer", version: "1.5" }
- { name: "external-monitor-job", version: "1.1" }
- { name: "ldap", version: "1.2" }
- { name: "pam-auth", version: "1.0" }
- { name: "ant", version: "1.2" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "credentials", version: "1.8.3" }
- { name: "ssh-credentials", version: "1.5.1" }
- { name: "ssh-agent", version: "1.3" }
- { name: "token-macro", version: "1.8.1" }
- { name: "parameterized-trigger", version: "2.20" }
- { name: "multiple-scms", version: "0.3" }
- { name: "git", version: "1.5.0" }
- { name: "thinBackup", version: "1.7.4" }
- { name: "maven-plugin", version: "2.0" }
- { name: "build-token-root", version: "1.0" }
- { name: "copy-project-link", version: "1.2" }
- { name: "scriptler", version: "2.6.1" }
- { name: "rebuild", version: "1.20" }
- { name: "ssh-slaves", version: "1.4" }
- { name: "translation", version: "1.10" }
- { name: "dynamicparameter", version: "0.2.0" }
- { name: "hipchat", version: "0.1.5" }
- { name: "throttle-concurrents", version: "1.8.2" }
- { name: "mask-passwords", version: "2.7.2" }
- { name: "jquery", version: "1.7.2-1" }
- { name: "dashboard-view", version: "2.9.1" }
- { name: "build-pipeline-plugin", version: "1.4" }
- { name: "s3", version: "0.5" }
- { name: "tmpcleaner", version: "1.1" }
- { name: "jobConfigHistory", version: "2.4" }
- { name: "build-timeout", version: "1.11" }
- { name: "next-build-number", version: "1.0" }
- { name: "nested-view", version: "1.10" }
- { name: "timestamper", version: "1.5.7" }
- { name: "github-api", version: "1.44" }
- { name: "postbuild-task", version: "1.8" }
- { name: "cobertura", version: "1.9.2" }
- { name: "notification", version: "1.5" }
- { name: "violations", version: "0.7.11" }
- { name: "copy-to-slave", version: "1.4.3" }
- { name: "github", version: "1.8" }
- { name: "copyartifact", version: "1.28" }
- { name: "shiningpanda", version: "0.20" }
- { name: "htmlpublisher", version: "1.2" }
- { name: "github-oauth", version: "0.14" }
- { name: "build-name-setter", version: "1.3" }
- { name: "ec2", version: "1.19" }
jenkins_admin_jobs:
- 'backup-jenkins'
- 'build-ami'
......@@ -21,30 +21,18 @@
#
#
- fail: "JENKINS_ADMIN_S3_PROFILE is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE is not defined."
when: JENKINS_ADMIN_S3_PROFILE is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.name is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE.name is not defined."
when: JENKINS_ADMIN_S3_PROFILE.name is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.access_key is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE.access_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.access_key is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
- fail: msg="JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_REPO is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined
- fail: "JENKINS_ADMIN_GIT_KEY is not defined."
when: JENKINS_ADMIN_GIT_KEY is not defined
- fail: "JENKINS_ADMIN_EC2_KEY is not defined."
when: JENKINS_ADMIN_EC2_KEY is not defined
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins
......@@ -72,6 +60,14 @@
group={{ jenkins_group }}
mode=0644
- name: configure the boto profiles for jenkins
template: >
src="./{{ jenkins_home }}/boto.j2"
dest="{{ jenkins_home }}/.boto"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
- name: create the ssh directory
file: >
path={{ jenkins_home }}/.ssh
......@@ -86,14 +82,6 @@
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
- name: drop the secure credentials
copy: >
content="{{ JENKINS_ADMIN_GIT_KEY }}"
dest={{ jenkins_home }}/.ssh/id_rsa
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0600
- name: create job directory
file: >
path="{{ jenkins_home }}/jobs"
......@@ -133,5 +121,9 @@
mode=0440 validate='visudo -cf %s'
- name: install global gem dependencies
gem: name={{ item.name }} state=present version={{ item.version }}
gem: >
name={{ item.name }}
state=present
version={{ item.version }}
user_install=no
with_items: jenkins_admin_gem_pkgs
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties>
<hudson.model.ParametersDefinitionProperty>
<parameterDefinitions>
<hudson.model.StringParameterDefinition>
<name>play</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>deployment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>environment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>refs</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>vars</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration</name>
<description>The GITREF of configuration to use. Leave blank to default to master.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration_secure</name>
<description>The GITREF of configuration-secure repository to use. Leave blank to default to master.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>base_ami</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.BooleanParameterDefinition>
<name>use_blessed</name>
<description></description>
<defaultValue>true</defaultValue>
</hudson.model.BooleanParameterDefinition>
</parameterDefinitions>
</hudson.model.ParametersDefinitionProperty>
<com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.20">
<autoRebuild>false</autoRebuild>
</com.sonyericsson.rebuild.RebuildSettings>
</properties>
<scm class="org.jenkinsci.plugins.multiplescms.MultiSCM" plugin="multiple-scms@0.2">
<scms>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{ JENKINS_ADMIN_CONFIGURATION_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration</scmName>
</hudson.plugins.git.GitSCM>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration-secure</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration-secure</scmName>
</hudson.plugins.git.GitSCM>
</scms>
</scm>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<authToken>MULTIPASS</authToken>
<triggers/>
<concurrentBuild>true</concurrentBuild>
<builders>
<jenkins.plugins.shiningpanda.builders.VirtualenvBuilder plugin="shiningpanda@0.20">
<pythonName>System-CPython-2.7</pythonName>
<home></home>
<clear>false</clear>
<useDistribute>true</useDistribute>
<systemSitePackages>false</systemSitePackages>
<nature>shell</nature>
<command>
#!/bin/bash -x
export jenkins_admin_ec2_key="{{ JENKINS_ADMIN_EC2_KEY }}"
export jenkins_admin_configuration_secure_repo="{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}"
configuration/util/jenkins/build-ami.sh
</command>
<ignoreExitCode>false</ignoreExitCode>
</jenkins.plugins.shiningpanda.builders.VirtualenvBuilder>
<hudson.tasks.Shell>
<command>#!/bin/bash -x
if [[(&quot;$play&quot; == &quot;&quot;)]]; then
echo &quot;No Play Specified. Nothing to Do.&quot;
exit 0
fi
rm /var/tmp/$BUILD_ID-extra-vars.yml
rm /var/tmp/$BUILD_ID-refs.yml</command>
</hudson.tasks.Shell>
</builders>
<publishers/>
</project>
......@@ -44,6 +44,7 @@ jenkins_plugins:
- { name: "violations", version: "0.7.11" }
- { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"}
jenkins_bundled_plugins:
- "credentials"
......
- name: enable jenkins datadog
shell: cp /etc/dd-agent/conf.d/jenkins.yaml.example /etc/dd-agent/conf.d/jenkins.yaml creates=/etc/dd-agent/conf.d/jenkins.yaml
notify: restart the datadog service
......@@ -126,3 +126,6 @@
dest=/etc/nginx/sites-enabled/jenkins
state=link
notify: start nginx
- include: datadog.yml tags=datadog
when: COMMON_ENABLE_DATADOG
......@@ -126,4 +126,4 @@ jenkins_wheels:
- { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "MySQL-python==1.2.4", wheel: "MySQL_python-1.2.4-cp27-none-linux_x86_64.whl" }
- { pkg: "MySQL-python==1.2.5", wheel: "MySQL_python-1.2.5-cp27-none-linux_x86_64.whl" }
......@@ -10,4 +10,5 @@
- include: system.yml
- include: python.yml
- include: ruby.yml
- include: jscover.yml
......@@ -44,3 +44,27 @@
template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# Run the wheel_venv.sh script for the first time
# This was previously done in the Jenkins global
# configuration as part of the AMI Init script.
# Moving here so that we can archive a clean snapshot
# of the virtualenv with only the defined packages
# from jenkins_wheels.
- name: Run the wheel_venv.sh script
command: >
./wheel_venv.sh edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv
sudo_user: "{{ jenkins_user }}"
# Archive the current state of the virtualenv
# as a starting point for new builds.
# The edx-venv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean virtualenv archive
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
---
# Archive the current state of the rbenv
# as a starting point for new builds.
# The edx-rbenv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean rbenv archive
command: >
tar -cpzf edx-rbenv_clean.tar.gz .rbenv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-rbenv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
---
KIBANA_SERVER_NAME: "192.168.33.10"
KIBANA_NGINX_PORT: 80
KIBANA_SSL_NGINX_PORT: 443
kibana_app_dir: /edx/app/kibana
kibana_file: kibana-3.0.0.tar.gz
kibana_url: "https://download.elasticsearch.org/kibana/kibana/{{ kibana_file }}"
{
"title": "edX Log Analysis",
"services": {
"query": {
"idQueue": [],
"list": {
"0": {
"query": "@message: WARNING",
"alias": "",
"color": "#EAB839",
"id": 0,
"pin": false,
"type": "lucene",
"enable": true
},
"1": {
"id": 1,
"color": "#7EB26D",
"query": "@message: INFO",
"alias": "",
"pin": false,
"type": "lucene",
"enable": true
},
"2": {
"id": 2,
"color": "#BF1B00",
"query": "@message: ERROR",
"alias": "",
"pin": false,
"type": "lucene",
"enable": true
},
"3": {
"id": 3,
"color": "#F9D9F9",
"query": "*",
"alias": "",
"pin": false,
"type": "lucene",
"enable": true
}
},
"ids": [
0,
1,
2,
3
]
},
"filter": {
"idQueue": [
1,
2,
3
],
"list": {
"0": {
"type": "time",
"field": "@timestamp",
"from": "now-1h",
"to": "now",
"mandate": "must",
"active": true,
"alias": "",
"id": 0
},
"1": {
"type": "querystring",
"query": "*pika*",
"mandate": "mustNot",
"active": true,
"alias": "",
"id": 1
},
"2": {
"type": "querystring",
"query": "*connectionpool*",
"mandate": "mustNot",
"active": true,
"alias": "",
"id": 3
}
},
"ids": [
0,
1,
2
]
}
},
"rows": [
{
"title": "Graph",
"height": "350px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"span": 12,
"editable": true,
"group": [
"default"
],
"type": "histogram",
"mode": "count",
"time_field": "@timestamp",
"value_field": null,
"auto_int": true,
"resolution": 100,
"interval": "30s",
"fill": 3,
"linewidth": 3,
"timezone": "browser",
"spyable": true,
"zoomlinks": true,
"bars": false,
"stack": true,
"points": false,
"lines": true,
"legend": true,
"x-axis": true,
"y-axis": true,
"percentage": false,
"interactive": true,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"title": "Events over time",
"intervals": [
"auto",
"1s",
"1m",
"5m",
"10m",
"30m",
"1h",
"3h",
"12h",
"1d",
"1w",
"1M",
"1y"
],
"options": true,
"tooltip": {
"value_type": "cumulative",
"query_as_alias": true
},
"scale": 1,
"y_format": "none",
"grid": {
"max": null,
"min": 0
},
"annotate": {
"enable": false,
"query": "*",
"size": 20,
"field": "_type",
"sort": [
"_score",
"desc"
]
},
"pointradius": 5,
"show_query": true,
"legend_counts": true,
"zerofill": true,
"derivative": false
}
],
"notice": false
},
{
"title": "Charts",
"height": "250px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"span": 4,
"editable": true,
"type": "hits",
"loadingEditor": false,
"query": {
"field": "syslog_severity",
"goal": 100
},
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"size": 10,
"exclude": [],
"donut": true,
"tilt": true,
"legend": "above",
"labels": true,
"mode": "terms",
"default_field": "DEFAULT",
"spyable": true,
"title": "Log Severity",
"style": {
"font-size": "10pt"
},
"arrangement": "horizontal",
"chart": "pie",
"counter_pos": "above"
},
{
"span": 4,
"editable": true,
"type": "hits",
"loadingEditor": false,
"query": {
"field": "@source_host",
"goal": 100
},
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"size": 10,
"exclude": [],
"donut": true,
"tilt": true,
"legend": "above",
"labels": true,
"mode": "terms",
"default_field": "DEFAULT",
"spyable": true,
"title": "Logs by Host",
"style": {
"font-size": "10pt"
},
"arrangement": "horizontal",
"chart": "pie",
"counter_pos": "above"
},
{
"span": 4,
"editable": true,
"type": "hits",
"loadingEditor": false,
"style": {
"font-size": "10pt"
},
"arrangement": "horizontal",
"chart": "pie",
"counter_pos": "above",
"donut": true,
"tilt": true,
"labels": true,
"spyable": true,
"queries": {
"mode": "selected",
"ids": [
0,
1,
2
]
},
"title": "Percent by Python Severity"
}
],
"notice": false
},
{
"title": "Trends",
"height": "50px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"span": 4,
"editable": true,
"type": "trends",
"loadingEditor": false,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"style": {
"font-size": "14pt"
},
"ago": "1h",
"arrangement": "vertical",
"spyable": true,
"title": "Hourly"
},
{
"span": 4,
"editable": true,
"type": "trends",
"loadingEditor": false,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"style": {
"font-size": "14pt"
},
"ago": "1d",
"arrangement": "vertical",
"spyable": true,
"title": "Daily"
},
{
"span": 4,
"editable": true,
"type": "trends",
"loadingEditor": false,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"style": {
"font-size": "14pt"
},
"ago": "1w",
"arrangement": "vertical",
"spyable": true,
"title": "Weekly"
}
],
"notice": false
},
{
"title": "Error Events",
"height": "550px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"error": false,
"span": 12,
"editable": true,
"type": "table",
"loadingEditor": false,
"status": "Stable",
"queries": {
"mode": "selected",
"ids": [
2
]
},
"size": 100,
"pages": 5,
"offset": 0,
"sort": [
"@timestamp",
"desc"
],
"group": "default",
"style": {
"font-size": "9pt"
},
"overflow": "min-height",
"fields": [
"@timestamp",
"@source_host",
"message"
],
"highlight": [],
"sortable": true,
"header": true,
"paging": true,
"field_list": true,
"all_fields": false,
"trimFactor": 300,
"normTimes": true,
"spyable": true,
"title": "Errors",
"localTime": false,
"timeField": "@timestamp"
}
],
"notice": false
},
{
"title": "Events",
"height": "350px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"title": "All events",
"error": false,
"span": 12,
"editable": true,
"group": [
"default"
],
"type": "table",
"size": 100,
"pages": 5,
"offset": 0,
"sort": [
"@timestamp",
"desc"
],
"style": {
"font-size": "9pt"
},
"overflow": "min-height",
"fields": [
"@source_host",
"message"
],
"highlight": [],
"sortable": true,
"header": true,
"paging": true,
"spyable": true,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"field_list": true,
"status": "Stable",
"trimFactor": 300,
"normTimes": true,
"all_fields": false,
"localTime": false,
"timeField": "@timestamp"
}
],
"notice": false
}
],
"editable": true,
"failover": false,
"index": {
"interval": "day",
"pattern": "[logstash-]YYYY.MM.DD",
"default": "NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED",
"warm_fields": true
},
"style": "dark",
"panel_hints": true,
"pulldowns": [
{
"type": "query",
"collapse": false,
"notice": false,
"query": "*",
"pinned": true,
"history": [
"*",
"@message: ERROR",
"@message: INFO",
"@message: WARNING",
"@message: WARN",
"*corresponding*",
"@message: INFO OR syslog_severity: info",
"@message: INFO OR @log_severity: info",
"ERROR",
"WARNING"
],
"remember": 10,
"enable": true
},
{
"type": "filtering",
"collapse": true,
"notice": false,
"enable": true
}
],
"nav": [
{
"type": "timepicker",
"collapse": false,
"notice": false,
"status": "Stable",
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
],
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"timefield": "@timestamp",
"now": true,
"filter_id": 0,
"enable": true
}
],
"loader": {
"save_gist": false,
"save_elasticsearch": true,
"save_local": true,
"save_default": true,
"save_temp": true,
"save_temp_ttl_enable": true,
"save_temp_ttl": "30d",
"load_gist": true,
"load_elasticsearch": true,
"load_elasticsearch_size": 20,
"load_local": true,
"hide": false
},
"refresh": "1m"
}
\ No newline at end of file
---
- name: restart nginx
service: name=nginx state=restarted
- name: reload nginx
service: name=nginx state=reloaded
---
dependencies:
- common
- nginx
# requires:
# - oraclejdk
# - elasticsearch
# - nginx
---
- name: Ensure app apt dependencies are installed
apt: pkg={{ item }} state=installed
with_items:
- python-software-properties
- git
- nginx
- name: Ensure {{ kibana_app_dir }} exists
file: path={{ kibana_app_dir }} state=directory owner=root group=root mode=0755
- name: Ensure subdirectories exist
file: path={{ kibana_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
with_items:
- htdocs
- share
- name: ensure we have the specified kibana release
get_url: url={{ kibana_url }} dest={{ kibana_app_dir }}/share/{{ kibana_file }}
- name: extract
shell: >
chdir={{ kibana_app_dir }}/share
tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}
creates={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
- name: install
shell: >
chdir={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
cp -R * {{ kibana_app_dir }}/htdocs/
- name: copy config
template: src=config.js.j2 dest={{ kibana_app_dir }}/htdocs/config.js
/**
* These is the app's configuration, If you need to configure
* the default dashboard, please see dashboards/default
*/
define(['settings'],
function (Settings) {
return new Settings({
/**
* URL to your elasticsearch server. You almost certainly don't
* want 'http://localhost:9200' here. Even if Kibana and ES are on
* the same host
*
* By default this will attempt to reach ES at the same host you have
* elasticsearch installed on. You probably want to set it to the FQDN of your
* elasticsearch host
* @type {String}
*/
//elasticsearch: "http://"+window.location.hostname+":9200",
{% if NGINX_ENABLE_SSL %}
elasticsearch: "https://{{ KIBANA_SERVER_NAME }}/e",
{% else %}
elasticsearch: "http://{{ KIBANA_SERVER_NAME }}/e",
{% endif %}
/**
* The default ES index to use for storing Kibana specific object
* such as stored dashboards
* @type {String}
*/
kibana_index: "kibana-int",
/**
* Panel modules available. Panels will only be loaded when they are defined in the
* dashboard, but this list is used in the "add panel" interface.
* @type {Array}
*/
panel_names: [
'histogram',
'map',
'table',
'filtering',
'timepicker',
'text',
'hits',
'column',
'trends',
'bettermap',
'query',
'terms',
'stats',
'sparklines',
'goal',
]
});
});
......@@ -18,7 +18,7 @@
- name: terminating single instance
local_action:
module: ec2_local
module: ec2
state: 'absent'
region: "{{ region }}"
instance_ids: ${tag_lookup.instance_ids}
......@@ -36,7 +36,7 @@
- name: Launch ec2 instance
local_action:
module: ec2_local
module: ec2
keypair: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
......@@ -44,7 +44,9 @@
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
root_ebs_size: "{{ root_ebs_size }}"
volumes:
- device_name: /dev/sda1
volume_size: "{{ root_ebs_size }}"
zone: "{{ zone }}"
instance_profile_name: "{{ instance_profile_name }}"
register: ec2
......
......@@ -16,7 +16,7 @@
- name: set forum rbenv and gem permissions
file:
path={{ item }} state=directory mode=770
path={{ item }} state=directory recurse=yes mode=770
with_items:
- "{{ forum_app_dir }}/.gem"
- "{{ forum_app_dir }}/.rbenv"
......
---
LOGSTASH_DAYS_TO_KEEP: 30
LOGSTASH_ROTATE: true
logstash_app_dir: /edx/app/logstash
logstash_log_dir: /edx/var/log/logstash
logstash_data_dir: /edx/var/logstash/file_logs
logstash_syslog_port: 514
logstash_file: logstash-1.3.3-flatjar.jar
logstash_url: "https://download.elasticsearch.org/logstash/logstash/{{ logstash_file }}"
logstash_python_requirements:
- pyes==0.19.0
logstash_scripts_repo: https://github.com/crashdump/logstash-elasticsearch-scripts
logstash_rotate_cron:
hour: 5
minute: 42
logstash_optimize_cron:
hour: 6
minute: 15
{
"template": "logstash-*",
"settings" : {
"number_of_shards" : 1,
"number_of_replicas" : 0,
"index" : {
"query" : { "default_field" : "message" },
"store" : { "compress" : { "stored" : true, "tv": true } }
}
},
"mappings": {
"_default_": {
"_all": { "enabled": false },
"_source": { "compress": true },
"dynamic_templates": [
{
"string_template" : {
"match" : "*",
"mapping": { "type": "string", "index": "not_analyzed" },
"match_mapping_type" : "string"
}
}
],
"properties" : {
"@fields": { "type": "object", "dynamic": true, "path": "full" },
"@message" : { "type" : "string", "index" : "analyzed" },
"@source" : { "type" : "string", "index" : "not_analyzed" },
"@source_host" : { "type" : "string", "index" : "not_analyzed" },
"@source_path" : { "type" : "string", "index" : "not_analyzed" },
"@tags": { "type": "string", "index" : "not_analyzed" },
"@timestamp" : { "type" : "date", "index" : "not_analyzed" },
"@type" : { "type" : "string", "index" : "not_analyzed" }
}
}
}
}
---
- name: restart logstash
service: name=logstash state=restarted
---
dependencies:
- common
- elasticsearch
# requires:
# - oraclejdk
# - elasticsearch
---
- name: Ensure app apt dependencies are installed
apt: pkg={{ item }} state=installed
with_items:
- redis-server
- name: Ensure {{ logstash_app_dir }} exists
file: path={{ logstash_app_dir }} state=directory owner=root group=root mode=0755
- name: Ensure subdirectories exist
file: path={{ logstash_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
with_items:
- bin
- etc
- share
- name: ensure logstash config is in place
template: src=logstash.conf.j2 dest={{ logstash_app_dir }}/etc/logstash.conf owner=root group=root mode=0644
notify: restart logstash
- name: ensure logstash upstart job is in place
template: src=logstash.upstart.conf.j2 dest=/etc/init/logstash.conf owner=root group=root mode=0755
- name: ensure logstash has a logging dir at {{ logstash_log_dir }}
file: path={{ logstash_log_dir }} owner=root group=root mode=0755 state=directory
- name: ensure we have the specified logstash release
get_url: url={{ logstash_url }} dest={{ logstash_app_dir }}/share/{{ logstash_file }}
- name: ensure symlink with no version exists at {{ logstash_app_dir }}/share/logstash.jar
file: src={{ logstash_app_dir }}/share/${logstash_file} dest={{ logstash_app_dir }}/share/logstash.jar state=link
- name: start logstash
action: service name=logstash state=started enabled=yes
- name: Ensure we are running
wait_for: port={{ logstash_syslog_port }} host=localhost timeout=60
- name: Copy logstash es index template
copy: src=template_logstash.json dest=/etc/elasticsearch/template_logstash.json
- name: Enable logstash es index template
shell: chdir=/etc/elasticsearch executable=/bin/bash curl -XPUT 'http://localhost:9200/_template/template_logstash' -d @template_logstash.json
- name: Install python requirements
pip: name={{ item }} state=present
with_items: logstash_python_requirements
- name: Checkout logstash rotation scripts
git: repo={{ logstash_scripts_repo }} dest={{ logstash_app_dir }}/share/logstash-elasticsearch-scripts
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index rotation"
hour={{ logstash_rotate_cron.hour }}
minute={{ logstash_rotate_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index optimization"
hour={{ logstash_optimize_cron.hour }}
minute={{ logstash_optimize_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
when: LOGSTASH_ROTATE|bool
input {
tcp {
port => {{ logstash_syslog_port }}
type => syslog
}
udp {
port => {{ logstash_syslog_port }}
type => syslog
}
}
filter {
if [type] == "syslog" {
grok {
match => { "message" => "<%{POSINT:syslog_pri}>%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{@source_host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
if !("_grokparsefailure" in [tags]) {
mutate {
replace => [ "@source_host", "%{syslog_hostname}" ]
replace => [ "@message", "%{syslog_message}" ]
}
}
mutate {
remove_field => [ "syslog_hostname", "syslog_message", "syslog_timestamp" ]
}
}
}
output {
# Example just to output to elasticsearch
elasticsearch { }
# And gzip for each host and program
file {
path => '{{ logstash_data_dir }}/%{@source_host}/all.%{+yyyyMMdd}.gz'
gzip => true
}
# Should add option for S3 as well.
}
# logstash-indexer.conf# logstash - indexer instance
#
description "logstash indexer instance"
start on virtual-filesystems
stop on runlevel [06]
respawn
respawn limit 5 30
limit nofile 65550 65550
env HOME={{ logstash_app_dir }}
env JAVA_OPTS='-Xms512m -Xmx512m'
env PATH=$PATH:/usr/lib/jvm/{{ oraclejdk_base }}/bin
chdir {{ logstash_app_dir }}
setuid root
console log
# for versions 1.1.1 - 1.1.4 the internal web service crashes when touched
# and the current workaround is to just not run it and run Kibana instead
script
exec java -jar {{ logstash_app_dir }}/share/logstash.jar agent -f {{ logstash_app_dir }}/etc/logstash.conf --log {{ logstash_log_dir }}/logstash-indexer.out
end script
......@@ -10,12 +10,15 @@ mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo"
mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo"
mongo_user: mongodb
MONGODB_APT_KEY: "http://docs.mongodb.org/10gen-gpg-key.asc"
MONGODB_REPO: "deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen"
# Vars Meant to be overridden
MONGO_USERS:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
- user: edxapp
password: password
database: edxapp
......
......@@ -8,12 +8,12 @@
- name: add the mongodb signing key
apt_key: >
id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc
url={{MONGODB_APT_KEY}}
state=present
- name: add the mongodb repo to the sources list
apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
repo='{{ MONGODB_REPO }}'
state=present
- name: install mongo server and recommends
......
mms_agent_version: "2.2.0.70-1"
mms_agent_url: "https://mms.mongodb.com/download/agent/monitoring/mongodb-mms-monitoring-agent_{{ mms_agent_version }}_amd64.deb"
---
- name: restart mms
service: name=mongodb-mms-monitoring-agent state=restarted
---
# mongo_mms
#
# Example play:
#
# roles:
# - mongo_mms
- fail: MMSAPIKEY is required
when: MMSAPIKEY is not defined
# this cruft can be removed in ansible 1.6, which can have apt install local deb files
- name: download mongo mms agent
get_url: >
url="{{ mms_agent_url }}"
dest="/tmp/mongodb-mms-monitoring-agent-{{ mms_agent_version }}.deb"
register: download_mms_deb
- name: install mongo mms agent
shell: "gdebi -nq /tmp/mongodb-mms-monitoring-agent-{{ mms_agent_version }}.deb"
when: download_mms_deb.changed
notify: restart mms
- name: add key to monitoring-agent.config
lineinfile: >
dest=/etc/mongodb-mms/monitoring-agent.config
regexp="^mmsApiKey="
line="mmsApiKey={{ MMSAPIKEY }}"
notify: restart mms
- name: start mms service
service: name=mongodb-mms-monitoring-agent state=started
......@@ -19,7 +19,7 @@ newrelic_role_name: newrelic
NEWRELIC_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_KEY_ID: '548C16BF'
NEWRELIC_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_KEY_ID }}.gpg'
NEWRELIC_LICENSE_KEY: 'NEW-RELIC-KEY'
NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
#
# OS packages
......
# Variables for nginx role
---
# Set global htaccess for nginx
NGINX_HTPASSWD_USER: !!null
NGINX_HTPASSWD_PASS: !!null
NGINX_ENABLE_SSL: False
# Set these to real paths on your
# filesystem, otherwise nginx will
......@@ -16,6 +13,15 @@ NGINX_ENABLE_SSL: False
NGINX_SSL_CERTIFICATE: 'ssl-cert-snakeoil.pem'
NGINX_SSL_KEY: 'ssl-cert-snakeoil.key'
# When set to False, nginx will pass X-Forwarded-For, X-Forwarded-Port,
# and X-Forwarded-Proto headers through to the backend unmodified.
# This is desired when nginx is deployed behind another load balancer
# which takes care of properly setting the X-Forwarded-* headers.
# When there is no other load balancer in front of nginx, set this
# variable to True to force nginx to set the values of the X-Forwarded-*
# headers to reflect the properties of the incoming request.
NGINX_SET_X_FORWARDED_HEADERS: False
nginx_app_dir: "{{ COMMON_APP_DIR }}/nginx"
nginx_data_dir: "{{ COMMON_DATA_DIR }}/nginx"
nginx_conf_dir: "{{ nginx_app_dir }}/conf.d"
......@@ -29,6 +35,10 @@ nginx_debian_pkgs:
- nginx
- python-passlib
CMS_HOSTNAME: '~^((stage|prod)-)?studio.*'
nginx_template_dir: "edx/app/nginx/sites-available"
nginx_xserver_gunicorn_hosts:
- 127.0.0.1
nginx_xqueue_gunicorn_hosts:
......@@ -41,6 +51,8 @@ nginx_lms_preview_gunicorn_hosts:
- 127.0.0.1
nginx_cms_gunicorn_hosts:
- 127.0.0.1
nginx_analytics_api_gunicorn_hosts:
- 127.0.0.1
nginx_cfg:
# - link - turn on
......
......@@ -58,7 +58,7 @@
- name: Copying nginx configs for {{ nginx_sites }}
template: >
src=edx/app/nginx/sites-available/{{ item }}.j2
src={{ nginx_template_dir }}/{{ item }}.j2
dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: reload nginx
......@@ -74,10 +74,10 @@
- name: Write out htpasswd file
htpasswd: >
name={{ NGINX_HTPASSWD_USER }}
password={{ NGINX_HTPASSWD_PASS }}
name={{ COMMON_HTPASSWD_USER }}
password={{ COMMON_HTPASSWD_PASS }}
path={{ nginx_htpasswd_file }}
when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS
when: COMMON_ENABLE_BASIC_AUTH
- name: Create nginx log file location (just in case)
file: >
......
upstream analytics_api_app_server {
{% for host in nginx_analytics_api_gunicorn_hosts %}
server {{ host }}:{{ analytics_api_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
server {
listen {{ ANALYTICS_API_NGINX_PORT }} default_server;
location ~ ^/static/(?P<file>.*) {
root {{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }};
try_files /staticfiles/$file =404;
}
location / {
{% include "basic-auth.j2" %}
try_files $uri @proxy_to_app;
}
# No basic auth security on the heartbeat url, so that ELB can use it
location /api/v0/status {
try_files $uri @proxy_to_app;
}
{% include "robots.j2" %}
location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://analytics_api_app_server;
}
}
{% if NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS %}
{% if COMMON_ENABLE_BASIC_AUTH %}
satisfy any;
allow 127.0.0.1;
......
......@@ -27,8 +27,7 @@ server {
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
{% endif %}
server_name ~^((stage|prod)-)?studio\..*;
server_name {{ CMS_HOSTNAME }};
access_log {{ nginx_log_dir }}/access.log;
error_log {{ nginx_log_dir }}/error.log error;
......@@ -42,9 +41,15 @@ server {
location @proxy_to_cms_app {
{% if NGINX_SET_X_FORWARDED_HEADERS %}
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-For $remote_addr;
{% else %}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
{% endif %}
proxy_set_header Host $http_host;
proxy_redirect off;
......
......@@ -43,9 +43,15 @@ server {
{% include "robots.j2" %}
location @proxy_to_app {
{% if NGINX_SET_X_FORWARDED_HEADERS %}
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-For $remote_addr;
{% else %}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
{% endif %}
proxy_set_header Host $http_host;
proxy_redirect off;
......
......@@ -21,9 +21,15 @@ server {
rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last;
location @proxy_to_lms-preview_app {
{% if NGINX_SET_X_FORWARDED_HEADERS %}
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-For $remote_addr;
{% else %}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
{% endif %}
proxy_set_header Host $http_host;
proxy_redirect off;
......
......@@ -38,9 +38,15 @@ server {
rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last;
location @proxy_to_lms_app {
{% if NGINX_SET_X_FORWARDED_HEADERS %}
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-For $remote_addr;
{% else %}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
{% endif %}
proxy_set_header Host $http_host;
proxy_redirect off;
......
{%- if "kibana" in nginx_default_sites -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
upstream elasticsearch_server {
server 127.0.0.1:9200;
}
server {
# Kibana server, templated by ansible
{% if NGINX_ENABLE_SSL %}
listen {{KIBANA_NGINX_PORT}} {{default_site}};
listen {{KIBANA_SSL_NGINX_PORT}} {{default_site}} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% else %}
listen {{KIBANA_NGINX_PORT}} {{default_site}};
{% endif %}
server_name {{ KIBANA_SERVER_NAME }};
root {{ kibana_app_dir }}/htdocs;
access_log {{ nginx_log_dir }}/kibana.access.log;
error_log {{ nginx_log_dir }}/kibana.error.log error;
# Access restriction
{% include "basic-auth.j2" %}
# Set image format types to expire in a very long time
location ~* ^.+\.(jpg|jpeg|gif|png|ico)$ {
access_log off;
expires max;
}
# Set css and js to expire in a very long time
location ~* ^.+\.(css|js)$ {
access_log off;
expires max;
}
# Elastic Search
location /e {
rewrite /e/(.*) /$1 break;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_redirect off;
proxy_pass http://elasticsearch_server;
}
# Kibana
location / {
root {{ kibana_app_dir }}/htdocs;
index index.html;
expires 1d;
try_files $uri/ $uri;
if (-f $request_filename) {
break;
}
}
}
---
NLTK_DATA_DIR: "/usr/local/share/nltk_data"
# Once the file is downloaded, it won't be downloaded again,
# so if you need to version the data files, you should upload
# your own version of the files with the version appended to the filename.
NLTK_DATA:
- { path: "taggers/maxent_treebank_pos_tagger",
url: "http://nltk.github.com/nltk_data/packages/taggers/maxent_treebank_pos_tagger.zip" }
- { path: "corpora/stopwords",
url: "http://nltk.github.com/nltk_data/packages/corpora/stopwords.zip" }
- { path: "corpora/wordnet",
url: "http://nltk.github.com/nltk_data/packages/corpora/wordnet.zip" }
---
- name: Install unzip
apt: pkg=unzip state=present
- name: create the nltk data directory and subdirectories
file: path={{ NLTK_DATA_DIR }}/{{ item.path|dirname }} state=directory
with_items: NLTK_DATA
tags:
- deploy
- name: download nltk data
get_url: >
dest={{ NLTK_DATA_DIR }}/{{ item.url|basename }}
url={{ item.url }}
with_items: NLTK_DATA
register: nltk_download
tags:
- deploy
- name: unarchive nltk data
shell: >
unzip {{NLTK_DATA_DIR}}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: NLTK_DATA
when: nltk_download|changed
tags:
- deploy
......@@ -11,10 +11,6 @@ ora_venv_dir: "{{ ora_venvs_dir }}/ora"
ora_venv_bin: "{{ ora_venv_dir }}/bin"
ora_user: "ora"
ora_deploy_path: "{{ ora_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
ora_nltk_data_dir: "{{ ora_data_dir}}/nltk_data"
ora_nltk_download_url: http://edx-static.s3.amazonaws.com/nltk/nltk-data-20131113.tar.gz
ora_nltk_tmp_file: "{{ ora_data_dir }}/nltk.tmp.tar.tz"
ora_source_repo: https://github.com/edx/edx-ora.git
ora_version: 'master'
......@@ -143,7 +139,6 @@ ora_auth_config:
ora_environment:
SERVICE_VARIANT: ora
NLTK_DATA: $ora_nltk_data_dir
LANG: $ORA_LANG
PATH: $ora_deploy_path
......
---
dependencies:
- supervisor
- nltk
......@@ -104,7 +104,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
when: not disable_edx_services
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
- name: ensure ora is started
supervisorctl_local: >
......
......@@ -43,17 +43,3 @@
notify:
- restart ora
- restart ora_celery
- name: download and install nltk
shell: |
set -e
curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }}
tar zxf {{ ora_nltk_tmp_file }}
rm -f {{ ora_nltk_tmp_file }}
touch {{ ora_nltk_download_url|basename }}-installed
creates={{ ora_data_dir }}/{{ ora_nltk_download_url|basename }}-installed
chdir={{ ora_data_dir }}
sudo_user: "{{ common_web_user }}"
notify:
- restart ora
- restart ora_celery
......@@ -5,7 +5,7 @@ command={{ ora_venv_bin }}/gunicorn --preload -b {{ ora_gunicorn_host }}:{{ ora_
user={{ common_web_user }}
directory={{ ora_code_dir }}
environment=PID=/var/run/gunicorn/edx-ora.pid,WORKERS={{ ora_gunicorn_workers }},PORT={{ ora_gunicorn_port }},ADDRESS={{ ora_gunicorn_host }},LANG={{ ORA_LANG }},DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }}
environment=PID=/var/run/gunicorn/edx-ora.pid,WORKERS={{ ora_gunicorn_workers }},PORT={{ ora_gunicorn_port }},ADDRESS={{ ora_gunicorn_host }},LANG={{ ORA_LANG }},DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
......@@ -5,7 +5,7 @@ command={{ ora_venv_bin }}/python {{ ora_code_dir }}/manage.py celeryd --logleve
user={{ common_web_user }}
directory={{ ora_code_dir }}
environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }}
environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
......@@ -26,7 +26,7 @@
shell: >
gdebi --n {{ rabbitmq_pkg_url|basename }}
chdir=/var/tmp
when: is_installed.stdout == "not installed"
when: is_installed.stdout is defined and is_installed.stdout == "not installed"
- name: stop rabbit cluster
service: name=rabbitmq-server state=stopped
......
......@@ -83,16 +83,16 @@
git: >
repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
accept_hostkey=yes
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers))
sudo_user: "{{ rbenv_user }}"
- name: install ruby-build
command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers))
- name: remove temporary directory
file: path={{ tempdir.stdout }} state=absent
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers))
- name: check ruby {{ rbenv_ruby_version }} installed
shell: "rbenv versions | grep {{ rbenv_ruby_version }}"
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role redis
#
REDIS_PASSWORD: !!null
REDIS_BIND_IP: 127.0.0.1
REDIS_PERSISTENCE_DIR: "/var/lib/redis"
REDIS_MEMORY_LIMIT: "512mb"
REDIS_MAX_MEMORY_POLICY: "noeviction"
#
# vars are namespace with the module name.
#
redis_role_name: redis
redis_ppa: "ppa:chris-lea/redis-server"
redis_user: redis
redis_group: redis
#
# OS packages
#
redis_debian_pkgs:
- "redis-server=2:2.8.11-1chl1~precise1"
redis_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role redis
#
# Overview:
#
#
- name: reload redis
service: name=redis-server state=restarted
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role redis
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role redis
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: add the redis ppa
apt_repository: repo="{{ redis_ppa }}"
- name: install redis system packages
apt: pkg={{ item }} install_recommends=yes state=present
with_items: redis_debian_pkgs
notify: reload redis
- name: update redis configuration
template: >
src=etc/redis/redis.conf.j2
dest=/etc/redis/redis.conf
owner=root
group={{ redis_group }}
mode=640
notify: reload redis
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
bind {{ REDIS_BIND_IP }}
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /dev/null
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
syslog-enabled yes
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename redis.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir {{ REDIS_PERSISTENCE_DIR }}
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
{% if REDIS_PASSWORD %}
{# comment the password incase it has spaces. #}
requirepass "{{ REDIS_PASSWORD }}"
{% else %}
# requirepass foobared
{% endif %}
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
maxmemory {{ REDIS_MEMORY_LIMIT }}
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is: volatile-lru
#
maxmemory-policy {{ REDIS_MAX_MEMORY_POLICY }}
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
---
SNORT_OINKCODE: 'oinkcode'
SNORT_RULES_URL: [ 'http://www.snort.org/pub-bin/oinkmaster.cgi/{{ SNORT_OINKCODE }}/snortrules-snapshot-2931.tar.gz',
'http://rules.emergingthreats.net/open/snort-2.9.0/emerging.rules.tar.gz' ]
---
# install and configure snort IDS
#
- name: install snort
apt: pkg={{ item }} state="present"
with_items:
- snort
- oinkmaster
- name: configure snort
template: >
src=etc/snort/snort.conf.j2 dest=/etc/snort/snort.conf
owner=root group=root mode=0644
- name: configure snort (debian)
template: >
src=etc/snort/snort.debian.conf.j2 dest=/etc/snort/snort.debian.conf
owner=root group=root mode=0644
- name: configure oinkmaster
template: >
src=etc/oinkmaster.conf.j2 dest=/etc/oinkmaster.conf
owner=root group=root mode=0644
- name: update snort
shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/
sudo: yes
- name: snort service
service: >
name="snort"
state="started"
- name: open read permissions on snort logs
file: >
name="/var/log/snort"
state="directory"
mode="755"
- name: install oinkmaster cronjob
template: >
src=etc/cron.daily/oinkmaster.j2 dest=/etc/cron.daily/oinkmaster
owner=root group=root mode=0755
#! /bin/bash
oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/ > /dev/null
service snort restart
{% for url in SNORT_RULES_URL %}
url = {{ url }}
{% endfor %}
# Ignore local.rules from the rules archive by default since we might
# have put some local rules in our own local.rules and we don't want it
# to get overwritten by the empty one from the archive after each
# update.
skipfile local.rules
# The file deleted.rules contains rules that have been deleted from
# other files, so there is usually no point in updating it.
skipfile deleted.rules
# Also skip snort.conf by default since we don't want to overwrite our
# own snort.conf if we have it in the same directory as the rules. If
# you have your own production copy of snort.conf in another directory,
# it may be really nice to check for changes in this file though,
# especially since variables are sometimes added or modified and
# new/old files are included/excluded.
skipfile snort.conf
#--------------------------------------------------
# VRT Rule Packages Snort.conf
#
# For more information visit us at:
# http://www.snort.org Snort Website
# http://vrt-sourcefire.blogspot.com/ Sourcefire VRT Blog
#
# Mailing list Contact: snort-sigs@lists.sourceforge.net
# False Positive reports: fp@sourcefire.com
# Snort bugs: bugs@snort.org
#
# Compatible with Snort Versions:
# VERSIONS : 2.9.2.0
#
# Snort build options:
# OPTIONS : --enable-ipv6 --enable-gre --enable-mpls --enable-targetbased --enable-decoder-preprocessor-rules --enable-ppm --enable-perfprofiling --enable-zlib --enable-active-response --enable-normalizer --enable-reload --enable-react --enable-flexresp3
#
# Additional information:
# This configuration file enables active response, to run snort in
# test mode -T you are required to supply an interface -i <interface>
# or test mode will fail to fully validate the configuration and
# exit with a FATAL error
#--------------------------------------------------
###################################################
# This file contains a sample snort configuration.
# You should take the following steps to create your own custom configuration:
#
# 1) Set the network variables.
# 2) Configure the decoder
# 3) Configure the base detection engine
# 4) Configure dynamic loaded libraries
# 5) Configure preprocessors
# 6) Configure output plugins
# 7) Customize your rule set
# 8) Customize preprocessor and decoder rule set
# 9) Customize shared object rule set
###################################################
###################################################
# Step #1: Set the network variables. For more information, see README.variables
###################################################
# Setup the network addresses you are protecting
ipvar HOME_NET any
# Set up the external network addresses. Leave as "any" in most situations
ipvar EXTERNAL_NET any
#ipvar EXTERNAL_NET !$HOME_NET
# List of DNS servers on your network
ipvar DNS_SERVERS $HOME_NET
# List of SMTP servers on your network
ipvar SMTP_SERVERS $HOME_NET
# List of web servers on your network
ipvar HTTP_SERVERS $HOME_NET
# List of sql servers on your network
ipvar SQL_SERVERS $HOME_NET
# List of telnet servers on your network
ipvar TELNET_SERVERS $HOME_NET
# List of ssh servers on your network
ipvar SSH_SERVERS $HOME_NET
# List of ftp servers on your network
ipvar FTP_SERVERS $HOME_NET
# List of sip servers on your network
ipvar SIP_SERVERS $HOME_NET
# List of ports you run web servers on
portvar HTTP_PORTS [80,8000,18000,18010,18020,18030,18040,18050,18060,18070,18080,18090,18100]
# List of ports you want to look for SHELLCODE on.
portvar SHELLCODE_PORTS !80
# List of ports you might see oracle attacks on
portvar ORACLE_PORTS 1024:
# List of ports you want to look for SSH connections on:
portvar SSH_PORTS 22
# List of ports you run ftp servers on
portvar FTP_PORTS [21,2100,3535]
# List of ports you run SIP servers on
portvar SIP_PORTS [5060,5061,5600]
# other variables, these should not be modified
ipvar AIM_SERVERS [64.12.24.0/23,64.12.28.0/23,64.12.161.0/24,64.12.163.0/24,64.12.200.0/24,205.188.3.0/24,205.188.5.0/24,205.188.7.0/24,205.188.9.0/24,205.188.153.0/24,205.188.179.0/24,205.188.248.0/24]
# Path to your rules files (this can be a relative path)
# Note for Windows users: You are advised to make this an absolute path,
# such as: c:\snort\rules
var RULE_PATH /etc/snort/rules
var SO_RULE_PATH /etc/snort/so_rules
var PREPROC_RULE_PATH /etc/snort/preproc_rules
###################################################
# Step #2: Configure the decoder. For more information, see README.decode
###################################################
# Stop generic decode events:
config disable_decode_alerts
# Stop Alerts on experimental TCP options
config disable_tcpopt_experimental_alerts
# Stop Alerts on obsolete TCP options
config disable_tcpopt_obsolete_alerts
# Stop Alerts on T/TCP alerts
config disable_tcpopt_ttcp_alerts
# Stop Alerts on all other TCPOption type events:
config disable_tcpopt_alerts
# Stop Alerts on invalid ip options
config disable_ipopt_alerts
# Alert if value in length field (IP, TCP, UDP) is greater th elength of the packet
# config enable_decode_oversized_alerts
# Same as above, but drop packet if in Inline mode (requires enable_decode_oversized_alerts)
# config enable_decode_oversized_drops
# Configure IP / TCP checksum mode
config checksum_mode: all
# Configure maximum number of flowbit references. For more information, see README.flowbits
# config flowbits_size: 64
# Configure ports to ignore
# config ignore_ports: tcp 21 6667:6671 1356
# config ignore_ports: udp 1:17 53
# Configure active response for non inline operation. For more information, see REAMDE.active
# config response: eth0 attempts 2
# Configure DAQ related options for inline operation. For more information, see README.daq
#
# config daq: <type>
# config daq_dir: <dir>
# config daq_mode: <mode>
# config daq_var: <var>
#
# <type> ::= pcap | afpacket | dump | nfq | ipq | ipfw
# <mode> ::= read-file | passive | inline
# <var> ::= arbitrary <name>=<value passed to DAQ
# <dir> ::= path as to where to look for DAQ module so's
# Configure specific UID and GID to run snort as after dropping privs. For more information see snort -h command line options
#
# config set_gid:
# config set_uid:
# Configure default snaplen. Snort defaults to MTU of in use interface. For more information see README
#
# config snaplen:
#
# Configure default bpf_file to use for filtering what traffic reaches snort. For more information see snort -h command line options (-F)
#
# config bpf_file:
#
# Configure default log directory for snort to log to. For more information see snort -h command line options (-l)
#
# config logdir:
###################################################
# Step #3: Configure the base detection engine. For more information, see README.decode
###################################################
# Configure PCRE match limitations
config pcre_match_limit: 3500
config pcre_match_limit_recursion: 1500
# Configure the detection engine See the Snort Manual, Configuring Snort - Includes - Config
config detection: search-method ac-split search-optimize max-pattern-len 20
# Configure the event queue. For more information, see README.event_queue
config event_queue: max_queue 8 log 3 order_events content_length
###################################################
# Per packet and rule latency enforcement
# For more information see README.ppm
###################################################
# Per Packet latency configuration
#config ppm: max-pkt-time 250, \
# fastpath-expensive-packets, \
# pkt-log
# Per Rule latency configuration
#config ppm: max-rule-time 200, \
# threshold 3, \
# suspend-expensive-rules, \
# suspend-timeout 20, \
# rule-log alert
###################################################
# Configure Perf Profiling for debugging
# For more information see README.PerfProfiling
###################################################
#config profile_rules: print all, sort avg_ticks
#config profile_preprocs: print all, sort avg_ticks
###################################################
# Step #4: Configure dynamic loaded libraries.
# For more information, see Snort Manual, Configuring Snort - Dynamic Modules
###################################################
# path to dynamic preprocessor libraries
dynamicpreprocessor directory /usr/lib/snort_dynamicpreprocessor/
# path to base preprocessor engine
dynamicengine /usr/lib/snort_dynamicengine/libsf_engine.so
# path to dynamic rules libraries
# dynamicdetection directory /usr/lib/snort_dynamicrules
###################################################
# Step #5: Configure preprocessors
# For more information, see the Snort Manual, Configuring Snort - Preprocessors
###################################################
# Inline packet normalization. For more information, see README.normalize
# Does nothing in IDS mode
preprocessor normalize_ip4
preprocessor normalize_tcp: ips ecn stream
preprocessor normalize_icmp4
preprocessor normalize_ip6
preprocessor normalize_icmp6
# Target-based IP defragmentation. For more inforation, see README.frag3
preprocessor frag3_global: max_frags 65536
preprocessor frag3_engine: policy windows detect_anomalies overlap_limit 10 min_fragment_length 100 timeout 180
# Target-Based stateful inspection/stream reassembly. For more inforation, see README.stream5
preprocessor stream5_global: track_tcp yes, \
track_udp yes, \
track_icmp no, \
max_tcp 262144, \
max_udp 131072, \
max_active_responses 2, \
min_response_seconds 5
preprocessor stream5_tcp: policy windows, detect_anomalies, require_3whs 180, \
overlap_limit 10, small_segments 3 bytes 150, timeout 180, \
ports client 21 22 23 25 42 53 79 109 110 111 113 119 135 136 137 139 143 \
161 445 513 514 587 593 691 1433 1521 2100 3306 6070 6665 6666 6667 6668 6669 \
7000 8181 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779, \
ports both 80 81 311 443 465 563 591 593 636 901 989 992 993 994 995 1220 1414 1830 2301 2381 2809 3128 3702 5250 7907 7001 7802 7777 7779 \
7801 7900 7901 7902 7903 7904 7905 7906 7908 7909 7910 7911 7912 7913 7914 7915 7916 \
7917 7918 7919 7920 8000 8008 8028 8080 8088 8118 8123 8180 8243 8280 8888 9090 9091 9443 9999 11371 \
18000 18010 18020 18030 18040 18050 18060 18070 18080 18090 18100
preprocessor stream5_udp: timeout 180
# performance statistics. For more information, see the Snort Manual, Configuring Snort - Preprocessors - Performance Monitor
# preprocessor perfmonitor: time 300 file /var/snort/snort.stats pktcnt 10000
# HTTP normalization and anomaly detection. For more information, see README.http_inspect
preprocessor http_inspect: global iis_unicode_map unicode.map 1252 compress_depth 65535 decompress_depth 65535
preprocessor http_inspect_server: server default \
chunk_length 500000 \
server_flow_depth 0 \
client_flow_depth 0 \
post_depth 65495 \
oversize_dir_length 500 \
max_header_length 750 \
max_headers 100 \
ports { 80 8000 18000 18010 18020 18030 18040 18050 18060 18070 18080 18090 18100 } \
non_rfc_char { 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 } \
enable_cookie \
extended_response_inspection \
inspect_gzip \
normalize_utf \
unlimited_decompress \
apache_whitespace no \
ascii no \
bare_byte no \
directory no \
double_decode no \
iis_backslash no \
iis_delimiter no \
iis_unicode no \
multi_slash no \
utf_8 no \
u_encode yes \
webroot no
# ONC-RPC normalization and anomaly detection. For more information, see the Snort Manual, Configuring Snort - Preprocessors - RPC Decode
preprocessor rpc_decode: 111 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779 no_alert_multiple_requests no_alert_large_fragments no_alert_incomplete
# Back Orifice detection.
preprocessor bo
# FTP / Telnet normalization and anomaly detection. For more information, see README.ftptelnet
preprocessor ftp_telnet: global inspection_type stateful encrypted_traffic no
preprocessor ftp_telnet_protocol: telnet \
ayt_attack_thresh 20 \
normalize ports { 23 } \
detect_anomalies
preprocessor ftp_telnet_protocol: ftp server default \
def_max_param_len 100 \
ports { 21 2100 3535 } \
telnet_cmds yes \
ignore_telnet_erase_cmds yes \
ftp_cmds { ABOR ACCT ADAT ALLO APPE AUTH CCC CDUP } \
ftp_cmds { CEL CLNT CMD CONF CWD DELE ENC EPRT } \
ftp_cmds { EPSV ESTA ESTP FEAT HELP LANG LIST LPRT } \
ftp_cmds { LPSV MACB MAIL MDTM MIC MKD MLSD MLST } \
ftp_cmds { MODE NLST NOOP OPTS PASS PASV PBSZ PORT } \
ftp_cmds { PROT PWD QUIT REIN REST RETR RMD RNFR } \
ftp_cmds { RNTO SDUP SITE SIZE SMNT STAT STOR STOU } \
ftp_cmds { STRU SYST TEST TYPE USER XCUP XCRC XCWD } \
ftp_cmds { XMAS XMD5 XMKD XPWD XRCP XRMD XRSQ XSEM } \
ftp_cmds { XSEN XSHA1 XSHA256 } \
alt_max_param_len 0 { ABOR CCC CDUP ESTA FEAT LPSV NOOP PASV PWD QUIT REIN STOU SYST XCUP XPWD } \
alt_max_param_len 200 { ALLO APPE CMD HELP NLST RETR RNFR STOR STOU XMKD } \
alt_max_param_len 256 { CWD RNTO } \
alt_max_param_len 400 { PORT } \
alt_max_param_len 512 { SIZE } \
chk_str_fmt { ACCT ADAT ALLO APPE AUTH CEL CLNT CMD } \
chk_str_fmt { CONF CWD DELE ENC EPRT EPSV ESTP HELP } \
chk_str_fmt { LANG LIST LPRT MACB MAIL MDTM MIC MKD } \
chk_str_fmt { MLSD MLST MODE NLST OPTS PASS PBSZ PORT } \
chk_str_fmt { PROT REST RETR RMD RNFR RNTO SDUP SITE } \
chk_str_fmt { SIZE SMNT STAT STOR STRU TEST TYPE USER } \
chk_str_fmt { XCRC XCWD XMAS XMD5 XMKD XRCP XRMD XRSQ } \
chk_str_fmt { XSEM XSEN XSHA1 XSHA256 } \
cmd_validity ALLO < int [ char R int ] > \
cmd_validity EPSV < [ { char 12 | char A char L char L } ] > \
cmd_validity MACB < string > \
cmd_validity MDTM < [ date nnnnnnnnnnnnnn[.n[n[n]]] ] string > \
cmd_validity MODE < char ASBCZ > \
cmd_validity PORT < host_port > \
cmd_validity PROT < char CSEP > \
cmd_validity STRU < char FRPO [ string ] > \
cmd_validity TYPE < { char AE [ char NTC ] | char I | char L [ number ] } >
preprocessor ftp_telnet_protocol: ftp client default \
max_resp_len 256 \
bounce yes \
ignore_telnet_erase_cmds yes \
telnet_cmds yes
# SMTP normalization and anomaly detection. For more information, see README.SMTP
preprocessor smtp: ports { 25 465 587 691 } \
inspection_type stateful \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0 \
log_mailfrom \
log_rcptto \
log_filename \
log_email_hdrs \
normalize cmds \
normalize_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \
normalize_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \
normalize_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \
normalize_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
max_command_line_len 512 \
max_header_line_len 1000 \
max_response_line_len 512 \
alt_max_command_line_len 260 { MAIL } \
alt_max_command_line_len 300 { RCPT } \
alt_max_command_line_len 500 { HELP HELO ETRN EHLO } \
alt_max_command_line_len 255 { EXPN VRFY ATRN SIZE BDAT DEBUG EMAL ESAM ESND ESOM EVFY IDENT NOOP RSET } \
alt_max_command_line_len 246 { SEND SAML SOML AUTH TURN ETRN DATA RSET QUIT ONEX QUEU STARTTLS TICK TIME TURNME VERB X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
valid_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \
valid_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \
valid_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \
valid_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
xlink2state { enabled }
# Portscan detection. For more information, see README.sfportscan
# preprocessor sfportscan: proto { all } memcap { 10000000 } sense_level { low }
# ARP spoof detection. For more information, see the Snort Manual - Configuring Snort - Preprocessors - ARP Spoof Preprocessor
# preprocessor arpspoof
# preprocessor arpspoof_detect_host: 192.168.40.1 f0:0f:00:f0:0f:00
# SSH anomaly detection. For more information, see README.ssh
preprocessor ssh: server_ports { 22 } \
autodetect \
max_client_bytes 19600 \
max_encrypted_packets 20 \
max_server_version_len 100 \
enable_respoverflow enable_ssh1crc32 \
enable_srvoverflow enable_protomismatch
# SMB / DCE-RPC normalization and anomaly detection. For more information, see README.dcerpc2
preprocessor dcerpc2: memcap 102400, events [co ]
preprocessor dcerpc2_server: default, policy WinXP, \
detect [smb [139,445], tcp 135, udp 135, rpc-over-http-server 593], \
autodetect [tcp 1025:, udp 1025:, rpc-over-http-server 1025:], \
smb_max_chain 3, smb_invalid_shares ["C$", "D$", "ADMIN$"]
# DNS anomaly detection. For more information, see README.dns
preprocessor dns: ports { 53 } enable_rdata_overflow
# SSL anomaly detection and traffic bypass. For more information, see README.ssl
preprocessor ssl: ports { 443 465 563 636 989 992 993 994 995 7801 7802 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 }, trustservers, noinspect_encrypted
# SDF sensitive data preprocessor. For more information see README.sensitive_data
preprocessor sensitive_data: alert_threshold 25
# SIP Session Initiation Protocol preprocessor. For more information see README.sip
preprocessor sip: max_sessions 10000, \
ports { 5060 5061 5600 }, \
methods { invite \
cancel \
ack \
bye \
register \
options \
refer \
subscribe \
update \
join \
info \
message \
notify \
benotify \
do \
qauth \
sprack \
publish \
service \
unsubscribe \
prack }, \
max_uri_len 512, \
max_call_id_len 80, \
max_requestName_len 20, \
max_from_len 256, \
max_to_len 256, \
max_via_len 1024, \
max_contact_len 512, \
max_content_len 1024
# IMAP preprocessor. For more information see README.imap
preprocessor imap: \
ports { 143 } \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0
# POP preprocessor. For more information see README.pop
preprocessor pop: \
ports { 110 } \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0
###################################################
# Step #6: Configure output plugins
# For more information, see Snort Manual, Configuring Snort - Output Modules
###################################################
# unified2
# Recommended for most installs
# output unified2: filename merged.log, limit 128, nostamp, mpls_event_types, vlan_event_types
# Additional configuration for specific types of installs
# output alert_unified2: filename snort.alert, limit 128, nostamp
# output log_unified2: filename snort.log, limit 128, nostamp
# syslog
output alert_syslog: LOG_AUTH LOG_ALERT
# pcap
output log_tcpdump: tcpdump.log
# database
# output database: alert, <db_type>, user=<username> password=<password> test dbname=<name> host=<hostname>
# output database: log, <db_type>, user=<username> password=<password> test dbname=<name> host=<hostname>
#
# On Debian Systems, the database configuration is kept in a separate file:
# /etc/snort/database.conf.
# This file can be empty, if you are not using any database information
# If you are using databases, please edit that file instead of this one, to
# ensure smoother upgrades to future versions of this package.
include database.conf
#
# prelude
# output alert_prelude
# metadata reference data. do not modify these lines
include classification.config
include reference.config
###################################################
# Step #7: Customize your rule set
# For more information, see Snort Manual, Writing Snort Rules
#
# NOTE: All categories are enabled in this conf file
###################################################
# site specific rules
include $RULE_PATH/local.rules
include $RULE_PATH/attack-responses.rules
include $RULE_PATH/backdoor.rules
include $RULE_PATH/bad-traffic.rules
# include $RULE_PATH/blacklist.rules
# include $RULE_PATH/botnet-cnc.rules
include $RULE_PATH/chat.rules
# include $RULE_PATH/content-replace.rules
include $RULE_PATH/ddos.rules
include $RULE_PATH/dns.rules
include $RULE_PATH/dos.rules
include $RULE_PATH/community-dos.rules
include $RULE_PATH/exploit.rules
include $RULE_PATH/community-exploit.rules
include $RULE_PATH/finger.rules
include $RULE_PATH/ftp.rules
include $RULE_PATH/community-ftp.rules
include $RULE_PATH/icmp.rules
include $RULE_PATH/icmp-info.rules
include $RULE_PATH/imap.rules
include $RULE_PATH/community-imap.rules
include $RULE_PATH/info.rules
include $RULE_PATH/misc.rules
include $RULE_PATH/multimedia.rules
include $RULE_PATH/mysql.rules
include $RULE_PATH/netbios.rules
include $RULE_PATH/nntp.rules
include $RULE_PATH/community-nntp.rules
include $RULE_PATH/oracle.rules
include $RULE_PATH/community-oracle.rules
include $RULE_PATH/other-ids.rules
include $RULE_PATH/p2p.rules
# include $RULE_PATH/phishing-spam.rules
include $RULE_PATH/policy.rules
# include $RULE_PATH/community-policy.rules
# include $RULE_PATH/community-inappropriate.rules
# include $RULE_PATH/community-game.rules
# include $RULE_PATH/community-misc.rules
include $RULE_PATH/pop2.rules
include $RULE_PATH/pop3.rules
include $RULE_PATH/rpc.rules
include $RULE_PATH/rservices.rules
# include $RULE_PATH/scada.rules
include $RULE_PATH/scan.rules
# Note: this rule is extremely chatty, enable with care
include $RULE_PATH/shellcode.rules
include $RULE_PATH/smtp.rules
include $RULE_PATH/community-smtp.rules
include $RULE_PATH/snmp.rules
# include $RULE_PATH/specific-threats.rules
# include $RULE_PATH/spyware-put.rules
include $RULE_PATH/sql.rules
include $RULE_PATH/telnet.rules
include $RULE_PATH/tftp.rules
include $RULE_PATH/virus.rules
include $RULE_PATH/community-virus.rules
include $RULE_PATH/community-bot.rules
# include $RULE_PATH/voip.rules
include $RULE_PATH/community-sip.rules
# Specific web server rules:
# include $RULE_PATH/web-activex.rules
include $RULE_PATH/web-attacks.rules
include $RULE_PATH/web-cgi.rules
include $RULE_PATH/web-client.rules
include $RULE_PATH/web-coldfusion.rules
include $RULE_PATH/web-frontpage.rules
include $RULE_PATH/web-iis.rules
include $RULE_PATH/web-misc.rules
include $RULE_PATH/web-php.rules
include $RULE_PATH/web-attacks.rules
include $RULE_PATH/community-sql-injection.rules
include $RULE_PATH/community-web-client.rules
include $RULE_PATH/community-web-dos.rules
include $RULE_PATH/community-web-iis.rules
include $RULE_PATH/community-web-misc.rules
include $RULE_PATH/community-web-php.rules
include $RULE_PATH/web-attacks.rules
include $RULE_PATH/community-sql-injection.rules
include $RULE_PATH/community-web-client.rules
include $RULE_PATH/community-web-dos.rules
include $RULE_PATH/community-web-iis.rules
include $RULE_PATH/community-web-misc.rules
include $RULE_PATH/community-web-php.rules
include $RULE_PATH/x11.rules
###################################################
# Step #8: Customize your preprocessor and decoder alerts
# For more information, see README.decoder_preproc_rules
###################################################
# decoder and preprocessor event rules
# include $PREPROC_RULE_PATH/preprocessor.rules
# include $PREPROC_RULE_PATH/decoder.rules
# include $PREPROC_RULE_PATH/sensitive-data.rules
###################################################
# Step #9: Customize your Shared Object Snort Rules
# For more information, see http://vrt-sourcefire.blogspot.com/2009/01/using-vrt-certified-shared-object-rules.html
###################################################
# dynamic library rules
# include $SO_RULE_PATH/bad-traffic.rules
# include $SO_RULE_PATH/chat.rules
# include $SO_RULE_PATH/dos.rules
# include $SO_RULE_PATH/exploit.rules
# include $SO_RULE_PATH/icmp.rules
# include $SO_RULE_PATH/imap.rules
# include $SO_RULE_PATH/misc.rules
# include $SO_RULE_PATH/multimedia.rules
# include $SO_RULE_PATH/netbios.rules
# include $SO_RULE_PATH/nntp.rules
# include $SO_RULE_PATH/pop3.rules
# include $SO_RULE_PATH/p2p.rules
# include $SO_RULE_PATH/smtp.rules
# include $SO_RULE_PATH/snmp.rules
# include $SO_RULE_PATH/specific-threats.rules
# include $SO_RULE_PATH/sql.rules
# include $SO_RULE_PATH/web-activex.rules
# include $SO_RULE_PATH/web-client.rules
# include $SO_RULE_PATH/web-iis.rules
# include $SO_RULE_PATH/web-misc.rules
# Event thresholding or suppression commands. See threshold.conf
include threshold.conf
# snort.debian.config (Debian Snort configuration file)
#
# This file was generated by the post-installation script of the snort
# package using values from the debconf database.
#
# It is used for options that are changed by Debian to leave
# the original configuration files untouched.
#
# This file is automatically updated on upgrades of the snort package
# *only* if it has not been modified since the last upgrade of that package.
#
# If you have edited this file but would like it to be automatically updated
# again, run the following command as root:
# dpkg-reconfigure snort
DEBIAN_SNORT_STARTUP="boot"
DEBIAN_SNORT_HOME_NET=""
DEBIAN_SNORT_OPTIONS=""
DEBIAN_SNORT_INTERFACE="eth0"
DEBIAN_SNORT_SEND_STATS="true"
DEBIAN_SNORT_STATS_RCPT="root"
DEBIAN_SNORT_STATS_THRESHOLD="1"
......@@ -18,20 +18,28 @@ splunk_role_name: 'splunk'
SPLUNKFORWARDER_SERVER: 'localhost:9997'
SPLUNKFORWARDER_PACKAGE_LOCATION: !!null
SPLUNKFORWARDER_PACKAGE_URL: !!null
SPLUNKFORWARDER_DEB: !!null
SPLUNKFORWARDER_PASSWORD: !!null
SPLUNKFORWARDER_LOG_ITEMS:
- directory: '{{ COMMON_LOG_DIR }}'
- source: '{{ COMMON_LOG_DIR }}/lms'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'edx'
- directory: '/var/log'
- source: '{{ COMMON_LOG_DIR }}/cms'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'edx'
- source: '{{ COMMON_LOG_DIR }}'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'syslog'
- source: '/var/log'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'syslog'
- directory: '{{ COMMON_LOG_DIR }}/nginx'
- source: '{{ COMMON_LOG_DIR }}/nginx'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'nginx'
......
......@@ -32,7 +32,7 @@
- name: download the splunk deb
get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_URL}}"
register: download_deb
- name: install splunk forwarder
......
......@@ -2,7 +2,7 @@
host = {{ansible_hostname}}
{% for loggable in SPLUNKFORWARDER_LOG_ITEMS%}
[monitor://{{loggable.directory}}]
[monitor://{{loggable.source}}]
recursive = {{loggable.recursive|default(false)}}
{% if loggable.sourcetype is defined %}
sourcetype = {{loggable.sourcetype}}
......
......@@ -25,37 +25,37 @@
- name: stop supervisor
stat: path=/etc/init/supervisor.conf
register: stat_out
changed_when: stat_out.stat.exists
changed_when: stat_out is defined and stat_out.stat.exists
notify: stop supervisor
- name: stop supervisor.devpi
stat: path=/etc/init/supervisor.devpi.conf
register: stat_out
changed_when: stat_out.stat.exists
changed_when: stat_out is defined and stat_out.stat.exists
notify: stop supervisor
- name: stop nginx
stat: path=/etc/init.d/nginx
register: stat_out
changed_when: stat_out.stat.exists
changed_when: stat_out is defined and stat_out.stat.exists
notify: stop nginx
- name: stop rabbitmq-server
stat: path=/etc/init.d/rabbitmq-server
register: stat_out
changed_when: stat_out.stat.exists
changed_when: stat_out is defined and stat_out.stat.exists
notify: stop rabbitmq-server
- name: stop memcached
stat: path=/etc/init.d/memcached
register: stat_out
changed_when: stat_out.stat.exists
changed_when: stat_out is defined and stat_out.stat.exists
notify: stop memcached
- name: stop mongodb
stat: path=/etc/init.d/mongodb
register: stat_out
changed_when: stat_out.stat.exists
changed_when: stat_out is defined and stat_out.stat.exists
notify: stop mongodb
- shell: "true"
......
......@@ -13,6 +13,11 @@
---
SUPERVISOR_HTTP_BIND_IP: '127.0.0.1'
# Used by the pre-supervisor script if you want to
# notify a hipchat room with the output.
SUPERVISOR_HIPCHAT_API_KEY: !!null
SUPERVISOR_HIPCHAT_ROOM: default
# do not override the bind_port since
# all supervisors will then try to listen
# on the same one
......@@ -35,3 +40,7 @@ supervisor_cfg: "{{ supervisor_app_dir }}/supervisord.conf"
# upstart service name and user
supervisor_service: supervisor
supervisor_service_user: "{{ common_web_user }}"
supervisor_pip_pkgs:
- boto
- python-simple-hipchat
# Get the tags for this instance
import argparse
import boto
import boto.utils
from boto.utils import get_instance_metadata
from boto.exception import AWSConnectionError
import hipchat
import os
import subprocess
import traceback
# Services that should be checked for migrations.
MIGRATION_COMMANDS = {
'lms': "{python} {code_dir}/manage.py lms migrate --noinput --settings=aws --db-dry-run --merge",
'cms': "{python} {code_dir}/manage.py cms migrate --noinput --settings=aws --db-dry-run --merge",
'xqueue': "{python} {code_dir}/manage.py xqueue migrate --noinput --settings=aws --db-dry-run --merge",
}
HIPCHAT_USER = "PreSupervisor"
def services_for_instance(instance_id):
"""
Get the list of all services named by the services tag in this
instance's tags.
"""
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
services = instance.tags['services'].split(',')
except KeyError as ke:
msg = "Tag named 'services' not found on this instance({})".format(instance_id)
raise Exception(msg)
for service in services:
yield service
def edp_for_instance(instance_id):
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
environment = instance.tags['environment']
deployment = instance.tags['deployment']
play = instance.tags['play']
except KeyError as ke:
msg = "{} tag not found on this instance({})".format(ke.message, instance_id)
raise Exception(msg)
return (environment, deployment, play)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
......@@ -14,24 +57,95 @@ if __name__ == '__main__':
parser.add_argument("-e","--enabled",
help="The location of the enabled services.")
migration_args = parser.add_argument_group("edxapp_migrations",
"Args for running edxapp migration checks.")
migration_args.add_argument("--edxapp-code-dir",
help="Location of the edx-platform code.")
migration_args.add_argument("--edxapp-python",
help="Path to python to use for executing migration check.")
xq_migration_args = parser.add_argument_group("xqueue_migrations",
"Args for running xqueue migration checks.")
xq_migration_args.add_argument("--xqueue-code-dir",
help="Location of the edx-platform code.")
xq_migration_args.add_argument("--xqueue-python",
help="Path to python to use for executing migration check.")
hipchat_args = parser.add_argument_group("hipchat",
"Args for hipchat notification.")
hipchat_args.add_argument("-c","--hipchat-api-key",
help="Hipchat token if you want to receive notifications via hipchat.")
hipchat_args.add_argument("-r","--hipchat-room",
help="Room to send messages to.")
args = parser.parse_args()
ec2 = boto.connect_ec2()
instance_id = boto.utils.get_instance_metadata()['instance-id']
reservations = ec2.get_all_instances(instance_ids=[instance_id])
report = []
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
services = instance.tags['services'].split(',')
for service in services:
prefix = None
notify = None
try:
if args.hipchat_api_key:
hc = hipchat.HipChat(token=args.hipchat_api_key)
notify = lambda message: hc.message_room(room_id=args.hipchat_room,
message_from=HIPCHAT_USER, message=message)
except Exception as e:
print("Failed to initialize hipchat, {}".format(e))
traceback.print_exc()
instance_id = get_instance_metadata()['instance-id']
prefix = instance_id
try:
environment, deployment, play = edp_for_instance(instance_id)
prefix = "{environment}-{deployment}-{play}-{instance_id}".format(
environment=environment,
deployment=deployment,
play=play,
instance_id=instance_id)
for service in services_for_instance(instance_id):
if service in MIGRATION_COMMANDS:
# Do extra migration related stuff.
if (service == 'lms' or service == 'cms') and args.edxapp_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.edxapp_python,
code_dir=args.edxapp_code_dir)
if os.path.exists(args.edxapp_code_dir):
os.chdir(args.edxapp_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
elif service == 'xqueue' and args.xqueue_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.xqueue_python,
code_dir=xqueue_code_dir)
if os.path.exists(args.xqueue_code_dir):
os.chdir(args.xqueue_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
# Link to available service.
available_file = "{}/{}.conf".format(args.available, service)
link_location = "{}/{}.conf".format(args.enabled, service)
available_file = os.path.join(args.available, "{}.conf".format(service))
link_location = os.path.join(args.enabled, "{}.conf".format(service))
if os.path.exists(available_file):
subprocess.call("ln -sf {} {}".format(available_file, link_location), shell=True)
report.append("Linking service: {}".format(service))
else:
report.append("No conf available for service: {}".format(link_location))
print("\n".join(report))
raise Exception("No conf available for service: {}".format(link_location))
except AWSConnectionError as ae:
msg = "{}: ERROR : {}".format(prefix, ae)
if notify:
notify(msg)
notify(traceback.format_exc())
raise ae
except Exception as e:
msg = "{}: ERROR : {}".format(prefix, e)
print(msg)
if notify:
notify(msg)
else:
msg = "{}: {}".format(prefix, " | ".join(report))
print(msg)
if notify:
notify(msg)
......@@ -94,10 +94,10 @@
- name: install supervisor in its venv
pip: >
name=boto virtualenv="{{supervisor_venv_dir}}" state=present
name={{ item }} virtualenv="{{supervisor_venv_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ supervisor_user }}"
when: supervisor_service == "supervisor" and disable_edx_services and not devstack
with_items: supervisor_pip_pkgs
- name: create supervisor upstart job
template: >
......@@ -166,4 +166,4 @@
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
......@@ -4,4 +4,5 @@ start on runlevel [2345]
task
setuid {{ supervisor_user }}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{supervisor_available_dir}} --enabled={{supervisor_cfg_dir}}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{supervisor_available_dir}} --enabled={{supervisor_cfg_dir}} {% if SUPERVISOR_HIPCHAT_API_KEY is defined %}--hipchat-api-key {{ SUPERVISOR_HIPCHAT_API_KEY }} --hipchat-room {{ SUPERVISOR_HIPCHAT_ROOM }} {% endif %} {% if edxapp_code_dir is defined %}--edxapp-python {{ COMMON_BIN_DIR }}/python.edxapp --edxapp-code-dir {{ edxapp_code_dir }}{% endif %} {% if xqueue_code_dir is defined %}--xqueue-code-dir {{ xqueue_code_dir }} --xqueue-python {{ COMMON_BIN_DIR }}/python.xqueue {% endif %}
......@@ -16,8 +16,8 @@ XQUEUE_S3_PATH_PREFIX: 'sandbox-xqueue'
XQUEUE_LOCAL_LOGLEVEL: 'INFO'
XQUEUE_AWS_ACCESS_KEY_ID : ''
XQUEUE_AWS_SECRET_ACCESS_KEY : ''
XQUEUE_BASIC_AUTH_USER: 'edx'
XQUEUE_BASIC_AUTH_PASSWORD: 'edx'
XQUEUE_BASIC_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}"
XQUEUE_BASIC_AUTH_PASSWORD: "{{ COMMON_HTPASSWD_PASS }}"
XQUEUE_DJANGO_USERS:
lms: 'password'
XQUEUE_RABBITMQ_USER: 'edx'
......@@ -30,6 +30,7 @@ XQUEUE_MYSQL_USER: 'xqueue001'
XQUEUE_MYSQL_PASSWORD: 'password'
XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306'
XQUEUE_NEWRELIC_APPNAME: "edX-xqueue"
# Internal vars below this line
#############################################
......
......@@ -91,7 +91,7 @@
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure xqueue, consumer is running
......
[program:xqueue]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = xqueue_venv_bin + '/newrelic-admin run-program ' + xqueue_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = xqueue_venv_bin + '/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
command={{ xqueue_venv_bin }}/gunicorn --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% else %}
command={{ xqueue_venv_bin }}/gunicorn --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ xqueue_code_dir }}
environment=PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue"
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ XQUEUE_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role xqwatcher
#
XQWATCHER_CONFIG:
HTTP_BASIC_AUTH: ["{{ COMMON_HTPASSWD_USER }}","{{ COMMON_HTPASSWD_PASS }}"]
POLL_TIME: 10
XQWATCHER_COURSES:
- COURSE: "exampleX-101x"
GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
GIT_REF: "master"
QUEUE_NAME: "exampleX-101x"
QUEUE_CONFIG:
SERVER: "https://xqueue.example.com"
CONNECTIONS: 5
AUTH: ["user", "password"]
HANDLERS:
- HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
CODEJAIL:
name: "exampleX-101x"
python_bin: "{{ xqwatcher_venv_base }}/exampleX-101x/bin/python"
user: "exampleX-101x"
KWARGS:
grader_root: "../data/exampleX-101x/graders/"
- COURSE: "exampleX-202x"
GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
GIT_REF: "master"
QUEUE_NAME: "exampleX-202x"
QUEUE_CONFIG:
SERVER: "https://xqueue.example.com"
CONNECTIONS: 5
AUTH: ["user", "password"]
HANDLERS:
- HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
CODEJAIL:
name: "exampleX-202x"
python_bin: "{{ xqwatcher_venv_base }}/exampleX-202x/bin/python"
user: "exampleX-202x"
KWARGS:
grader_root: "../data/exampleX-202x/graders/"
XQWATCHER_GIT_IDENTITY: |
-----BEGIN RSA PRIVATE KEY-----
Your key if you need to access any private repositories
-----END RSA PRIVATE KEY-----
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
XQWATCHER_NEWRELIC_APPNAME: "your Newrelic appname"
XQWATCHER_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
#
#
# vars are namespace with the module name.
#
xqwatcher_role_name: "xqwatcher"
xqwatcher_service_name: "xqwatcher"
xqwatcher_user: "xqwatcher"
xqwatcher_module: "xqueue_watcher"
xqwatcher_app_dir: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_home: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_venv_base: "{{ xqwatcher_home }}/venvs"
xqwatcher_venv_dir: "{{ xqwatcher_venv_base }}/{{ xqwatcher_service_name }}"
xqwatcher_code_dir: "{{ xqwatcher_app_dir }}/src"
xqwatcher_conf_dir: "{{ xqwatcher_home }}"
xqwatcher_data_dir: "{{ xqwatcher_home }}/data"
xqwatcher_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/xqueue-watcher.git"
xqwatcher_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xqwatcher_git_identity }}"
XQWATCHER_VERSION: "master"
xqwatcher_git_identity: "{{ xqwatcher_app_dir }}/git-identity"
xqwatcher_requirements_file: "{{ xqwatcher_code_dir }}/requirements.txt"
xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}"
#
# supervisor related config
#
xqwatcher_supervisor_app_dir: "{{ xqwatcher_app_dir }}/supervisor"
xqwatcher_supervisor_data_dir: "{{ COMMON_DATA_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_supervisor_log_dir: "{{ xqwatcher_log_dir }}"
xqwatcher_supervisor_venv_dir: "{{ xqwatcher_venv_base }}/supervisor"
xqwatcher_supervisor_user: "{{ xqwatcher_user }}"
xqwatcher_supervisor_venv_bin: "{{ xqwatcher_supervisor_venv_dir }}/bin"
xqwatcher_supervisor_ctl: "{{ xqwatcher_supervisor_venv_bin }}/supervisorctl"
xqwatcher_supervisor_cfg_dir: "{{ xqwatcher_supervisor_app_dir }}/conf.d"
xqwatcher_supervisor_available_dir: "{{ xqwatcher_supervisor_app_dir }}/conf.available.d"
#
# OS packages
#
xqwatcher_debian_pkgs:
- apparmor-utils
xqwatcher_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role xqwatcher
#
# Overview:
#
#
- name: restart xqwatcher
supervisorctl_local: >
state=restarted
supervisorctl_path={{ xqwatcher_supervisor_ctl }}
config={{ xqwatcher_supervisor_app_dir }}/supervisord.conf
name={{ xqwatcher_service_name }}
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role xqwatcher
#
# the role name are service name differ by _ and -, the latter isn't safe
# random corners of ansible/jinga/python variable expansion.
dependencies:
- role: edx_service
edx_role_name: "{{ xqwatcher_role_name }}"
edx_service_name: "{{ xqwatcher_service_name }}"
- role: supervisor
supervisor_app_dir: "{{ xqwatcher_supervisor_app_dir }}"
supervisor_data_dir: "{{ xqwatcher_supervisor_data_dir }}"
supervisor_log_dir: "{{ xqwatcher_supervisor_log_dir }}"
supervisor_venv_dir: "{{ xqwatcher_supervisor_venv_dir }}"
supervisor_service_user: "{{ xqwatcher_supervisor_user }}"
supervisor_available_dir: "{{ xqwatcher_supervisor_available_dir }}"
supervisor_service: "supervisor.xqwatcher"
supervisor_http_bind_port: '9003'
---
#
# Tasks related to deploying the code jail for the XQWatcher
#
- name: Create sandboxed user
user: >
name="{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}"
shell=/bin/false
home="/dev/null"
with_items: XQWATCHER_COURSES
#
# Need to disable aa to update the virutalenv
- name: write out apparmor config
template: >
src=etc/apparmor.d/code.jail.j2
dest="/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
mode=0644 owner=root group=root
with_items: XQWATCHER_COURSES
- name: write out sudoers config jail user
template: >
src=etc/sudoers.d/95-jailed-user.j2
dest=/etc/sudoers.d/95-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: write out sudoers for watcher
template: >
src=etc/sudoers.d/95-xqwatcher.j2
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES
- name: create jail virtualenv
shell: >
/usr/local/bin/virtualenv --no-site-packages {{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}
with_items: XQWATCHER_COURSES
- name: write out requirements.txt
template: >
src=edx/app/xqwatcher/data/requirements.txt.j2
dest={{ xqwatcher_data_dir }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt
mode=0440 owner=root group=root
with_items: XQWATCHER_COURSES
- name : install course specific python requirements
pip: >
requirements="{{ xqwatcher_data_dir }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
virtualenv="{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
state=present
extra_args="{{ XQWATCHER_PIP_EXTRA_ARGS }}"
with_items: XQWATCHER_COURSES
- name: give other read permissions to the virtualenv
shell: >
chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}
with_items: XQWATCHER_COURSES
- name: start apparmor service
service: name=apparmor state=started
- name: load code sandbox profile
command: apparmor_parser -r "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES
#
# Leaves aa in either complain or enforce depending upon the value of the
# CODE_JAIL_COMPLAIN var. Complain mode should never be run in an
# environment where untrusted users can submit code
- name: put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPLAIN
with_items: XQWATCHER_COURSES
- name: put code sandbox into aa-enforce
command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPAIN is not defined | not CODE_JAIL_COMPLAIN
\ No newline at end of file
- name: install read-only ssh key
copy: >
content="{{ XQWATCHER_GIT_IDENTITY }}" dest={{ xqwatcher_git_identity }}
owner={{ xqwatcher_user }} group={{ xqwatcher_user }} mode=0600
- name: write out watcher config file
template: >
src=edx/app/xqwatcher/xqwatcher.json.j2
dest={{ xqwatcher_conf_dir }}/xqwatcher.json
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }}
- include: deploy_watcher.yml
tags:
- deploy-watcher
- include: deploy_courses.yml
tags:
- deploy-courses
- name: remove read-only ssh key for the content repo
file: path={{ xqwatcher_git_identity }} state=absent
# Iterates over the data structure documented in tasks/main.yml
# checking out the grader code from the repository specified on
# a per queue basis.
- name: checkout grader code
git: >
dest={{ xqwatcher_data_dir }}/{{ item.COURSE }} repo={{ item.GIT_REPO }}
version={{ item.GIT_REF }}
ssh_opts="{{ xqwatcher_git_ssh_opts }}"
with_items: XQWATCHER_COURSES
# Installs the xqueue watcher code and supervisor scripts.
# The watcher can watch one or many queues and dispatch submissions
# to the appropriate grader which lives in a separate SCM repository.
- name: checkout watcher code
git: >
dest={{ xqwatcher_code_dir }} repo={{ xqwatcher_source_repo }} version={{ XQWATCHER_VERSION }}
accept_hostkey=yes
ssh_opts="{{ xqwatcher_git_ssh_opts }}"
- name: install application requirements
pip: >
requirements="{{ xqwatcher_requirements_file }}"
virtualenv="{{ xqwatcher_venv_dir }}" state=present
sudo: true
sudo_user: "{{ xqwatcher_user }}"
- name: write out course config files
template: >
src=edx/app/xqwatcher/conf.d/course.json.j2
dest={{ xqwatcher_conf_dir }}/conf.d/{{ item.COURSE }}.json
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }}
with_items: XQWATCHER_COURSES
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d/xqwatcher.conf.j2
dest="{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
group={{ xqwatcher_user }} mode=0650
- name: enable supervisor script
file: >
src={{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf
dest={{ xqwatcher_supervisor_cfg_dir }}/xqwatcher.conf
state=link
force=yes
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update"
when: not disable_edx_services
notify: restart xqwatcher
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role xqwatcher
#
# Overview:
#
# This play installs a sandboxed, pull grader that integrates with XQueue. The
# code for the XQWatcher lives here: https://github.com/edx/xqueue-watcher
#
# Multiple courses can be installed on a single server with distinct sandboxes.
#
# Example play:
#
# A play to install the XQWatcher would look like this:
#
# - name: Deploy xqueue-watcher
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# COMMON_APP_DIR: "/edx/app"
# common_web_group: "www-data"
# roles:
# - aws
# - datadog
# - splunkforwarder
# - newrelic
# - xqwatcher
#
# You would use a commone like the following to run the play.
#
# ansible-playbook -i ec2.py ./xqwatcher.yml -e@./example-config.yml
#
# The contents of the example-config.yml would include the queue
# meta data and details related to the repository including the
# grader code.
#
# XQWATCHER_COURSES:
# - COURSE: "exampleX-101x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
# GIT_REF: "master"
# QUEUE_NAME: "exampleX-101x"
# QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com"
# CONNECTIONS: 5
# AUTH: ["user", "password"]
# HANDLERS:
# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
# CODEJAIL:
# name: "exampleX-101x"
# python_bin: "{{ xqwatcher_venv_base }}/exampleX-101x/bin/python"
# user: "exampleX-101x"
# KWARGS:
# grader_root: "../data/exampleX-101x/graders/"
# - COURSE: "exampleX-202x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
# GIT_REF: "master"
# QUEUE_NAME: "exampleX-202x"
# QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com"
# CONNECTIONS: 5
# AUTH: ["user", "password"]
# HANDLERS:
# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
# CODEJAIL:
# name: "exampleX-202x"
# python_bin: "{{ xqwatcher_venv_base }}/exampleX-202x/bin/python"
# user: "exampleX-202x"
# KWARGS:
# grader_root: "../data/exampleX-202x/graders/"
# XQWATCHER_GIT_IDENTITY: |
# -----BEGIN RSA PRIVATE KEY-----
# Your key if you need to access any private repositories
# -----END RSA PRIVATE KEY-----
#
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- name: create conf dir
file: >
path="{{ xqwatcher_conf_dir }}"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- name: create conf.d dir
file: >
path="{{ xqwatcher_conf_dir }}/conf.d"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- include: deploy.yml tags=deploy
; {{ ansible_managed }}
;
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = xqwatcher_venv_dir + '/bin/newrelic-admin run-program ' + xqwatcher_venv_dir + '/bin/python' %}
{% else %}
{% set executable = xqwatcher_venv_dir + '/bin/python' %}
{% endif %}
[program:xqwatcher_service_name]
command={{ executable }} -m {{ xqwatcher_module }} -d {{ xqwatcher_conf_dir }}
process_name=%(program_name)s
user={{ xqwatcher_user }}
directory={{ xqwatcher_code_dir }}
stdout_logfile={{ xqwatcher_supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ xqwatcher_supervisor_log_dir }}/%(program_name)-stderr.log
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ XQWATCHER_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}
killasgroup=true
stopasgroup=true
{
"{{ item.QUEUE_NAME }}":
{{ item.QUEUE_CONFIG | to_nice_json }}
}
\ No newline at end of file
# {{ ansible_managed }}
{% for requirement in item.PYTHON_REQUIREMENTS %}
{{ requirement.name }}=={{ requirement.version }}
{% endfor %}
{{ XQWATCHER_CONFIG | to_nice_json }}
\ No newline at end of file
#include <tunables/global>
{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python {
#include <abstractions/base>
{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/** mr,
#todo need a way of providing.
# edxapp_code_dir /common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix,
/tmp/codejail-*/** wrix,
#
# Whitelist particiclar shared objects from the system
# python installation
#
/usr/lib/python2.7/lib-dynload/_json.so mr,
/usr/lib/python2.7/lib-dynload/_ctypes.so mr,
/usr/lib/python2.7/lib-dynload/_heapq.so mr,
/usr/lib/python2.7/lib-dynload/_io.so mr,
/usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
#
# Allow access to selections from /proc
#
/proc/*/mounts r,
}
{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
......@@ -70,7 +70,7 @@
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure xserver is started
......
ansible==1.5.4
ansible==1.5.5
PyYAML==3.11
Jinja2==2.7.2
MarkupSafe==0.21
MarkupSafe==0.23
argparse==1.2.1
boto==2.20.1
boto==2.28.0
ecdsa==0.11
paramiko==1.13.0
paramiko==1.14.0
pycrypto==2.6.1
wsgiref==0.1.2
docopt==0.6.1
python-simple-hipchat==0.2
prettytable==0.7.2
# Import XML Courses from git repos into the CMS.
# Run with sudo and make sure the user can clone
# the course repos.
# Output Has per course
#{
# repo_url:
# repo_name:
# org:
# course:
# run:
# disposition:
# version:
#}
import argparse
from os.path import basename
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import XML courses from git repos.")
parser.add_argument("-c", "--courses-csv", required=True,
help="A CSV of xml courses to import.")
args = parser.parse_args()
courses = open(args.courses_csv, 'r')
all_course_data = []
all_xml_mappings = {}
for line in courses:
cols = line.strip().split(',')
slug = cols[0]
author_format = cols[1]
disposition = cols[2]
repo_url = cols[4]
version = cols[5]
if author_format.lower() != 'xml' \
or disposition.lower() == "don't import":
continue
# Checkout w/tilde
org, course, run = slug.split("/")
repo_name = "{}~{}".format(basename(repo_url).rstrip('.git'), run)
course_info = {
"repo_url": repo_url,
"repo_name": repo_name,
"org": org,
"course": course,
"run": run,
"disposition": disposition.lower(),
"version": version,
}
all_course_data.append(course_info)
if disposition.lower() == "on disk":
all_xml_mappings[slug] = 'xml'
edxapp_xml_courses = { "EDXAPP_XML_COURSES": all_course_data, "EDXAPP_XML_MAPPINGS": all_xml_mappings }
print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)
......@@ -20,7 +20,6 @@ fi
## Install system pre-requisites
##
sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev
wget https://bitbucket.org/pypa/setuptools/raw/0.8/ez_setup.py -O - | sudo python
sudo pip install --upgrade pip
sudo pip install --upgrade virtualenv
......
......@@ -17,10 +17,19 @@
# - dns_name
# - environment
# - name_tag
env
export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $BUILD_USER ]]; then
BUILD_USER=jenkins
fi
if [[ -z $BUILD_USER_ID ]]; then
BUILD_USER_ID=edx-sandbox
fi
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
......@@ -75,11 +84,17 @@ if [[ -z $ami ]]; then
ami="ami-97dbc3fe"
elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then
ami="ami-59a4a230"
elif [[ $server_type == "ubuntu_14.04(experimental)" ]]; then
ami="ami-408c7f28"
fi
fi
if [[ -z $instance_type ]]; then
instance_type="m3.medium"
instance_type="m1.medium"
fi
if [[ -z $enable_monitoring ]]; then
enable_monitoring="false"
fi
deploy_host="${dns_name}.${dns_zone}"
......@@ -106,22 +121,50 @@ migrate_db: "yes"
openid_workaround: True
rabbitmq_ip: "127.0.0.1"
rabbitmq_refresh: True
COMMON_HOSTNAME: edx-server
COMMON_HOSTNAME: $dns_name
COMMON_DEPLOYMENT: edx
COMMON_ENVIRONMENT: sandbox
# User provided extra vars
$extra_vars
EOF
if [[ $basic_auth == "true" ]]; then
# vars specific to provisioning added to $extra-vars
cat << EOF_AUTH >> $extra_vars_file
NGINX_HTPASSWD_USER: $auth_user
NGINX_HTPASSWD_PASS: $auth_pass
COMMON_HTPASSWD_USER: $auth_user
COMMON_HTPASSWD_PASS: $auth_pass
XQUEUE_BASIC_AUTH_USER: $auth_user
XQUEUE_BASIC_AUTH_PASSWORD: $auth_pass
EOF_AUTH
fi
if [[ $edx_internal == "true" ]]; then
# if this isn't a public server add the github
# user and set edx_internal to True so that
# xserver is installed
cat << EOF >> $extra_vars_file
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_CMS_BASE: studio.${deploy_host}
EDXAPP_SITE_NAME: ${deploy_host}
CERTS_DOWNLOAD_URL: "http://${deploy_host}:18090"
CERTS_VERIFY_URL: "http://${deploy_host}:18090"
edx_internal: True
COMMON_USER_INFO:
- name: ${github_username}
github: true
type: admin
USER_CMD_PROMPT: '[$name_tag] '
COMMON_ENABLE_NEWRELIC: $enable_monitoring
COMMON_ENABLE_DATADOG: $enable_monitoring
FORUM_NEW_RELIC_ENABLE: $enable_monitoring
EDXAPP_NEWRELIC_LMS_APPNAME: sandbox-${dns_name}-edxapp-lms
EDXAPP_NEWRELIC_CMS_APPNAME: sandbox-${dns_name}-edxapp-cms
XQUEUE_NEWRELIC_APPNAME: sandbox-${dns_name}-xqueue
FORUM_NEW_RELIC_APP_NAME: sandbox-${dns_name}-forums
EOF
fi
if [[ $recreate == "true" ]]; then
# vars specific to provisioning added to $extra-vars
......@@ -147,25 +190,6 @@ rabbitmq_refresh: True
elb: $elb
EOF
if [[ $edx_internal == "true" ]]; then
# if this isn't a public server add the github
# user and set edx_internal to True so that
# xserver is installed
cat << EOF >> $extra_vars_file
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_CMS_BASE: studio.${deploy_host}
EDXAPP_SITE_NAME: ${deploy_host}
CERTS_DOWNLOAD_URL: "http://${deploy_host}:18090"
CERTS_VERIFY_URL: "http://${deploy_host}:18090"
edx_internal: True
COMMON_USER_INFO:
- name: ${github_username}
github: true
type: admin
USER_CMD_PROMPT: '[$name_tag] '
EOF
fi
# run the tasks to launch an ec2 instance from AMI
......@@ -193,8 +217,7 @@ if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scra
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," $extra_var_arg --user ubuntu
fi
if [[ $server_type == "full_edx_installation" ]]; then
if [[ $reconfigure != "true" && $server_type == "full_edx_installation" ]]; then
# Run deploy tasks for the roles selected
for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then
......
......@@ -9,5 +9,3 @@ BUILD_USER_LAST_NAME=$(ascii_convert $BUILD_USER_LAST_NAME)
BUILD_USER_FIRST_NAME=$(ascii_convert $BUILD_USER_FIRST_NAME)
BUILD_USER_ID=$(ascii_convert $BUILD_USER_ID)
BUILD_USER=$(ascii_convert $BUILD_USER)
......@@ -83,6 +83,34 @@ if [[ "$use_blessed" == "true" ]]; then
blessed_params="--blessed"
fi
playbookdir_params=""
if [[ ! -z "$playbook_dir" ]]; then
playbookdir_params="--playbook-dir $playbook_dir"
fi
configurationprivate_params=""
if [[ ! -z "$configurationprivaterepo" ]]; then
configurationprivate_params="--configuration-private-repo $configurationprivaterepo"
if [[ ! -z "$configurationprivateversion" ]]; then
configurationprivate_params="$configurationprivate_params --configuration-private-version $configurationprivateversion"
fi
fi
stackname_params=""
if [[ ! -z "$playbook_dir" ]]; then
stackname_params="--playbook-dir $playbook_dir"
fi
hipchat_params=""
if [[ ! -z "$hipchat_room_id" ]] && [[ ! -z "$hipchat_api_token" ]]; then
hipchat_params="--hipchat-room-id $hipchat_room_id --hipchat-api-token $hipchat_api_token"
fi
cleanup_params=""
if [[ "$cleanup" == "false" ]]; then
cleanup_params="--no-cleanup"
fi
cd configuration
pip install -r requirements.txt
......@@ -94,4 +122,4 @@ cat /var/tmp/$BUILD_ID-refs.yml
echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p $play -t c1.medium -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo
python -u abbey.py -p $play -t c3.large -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $hipchat_params $cleanup_params
#!/usr/bin/env bash
set -x
if [[
-z $WORKSPACE ||
-z $environment ||
-z $deployment
]]; then
echo "Environment incorrect for this wrapper script"
env
exit 1
fi
env
cd $WORKSPACE/edx-platform
# install requirements
# These requirements will be installed into the shinginpanda
# virtualenv on the jenkins server and are necessary to run
# run migrations locally
pip install --exists-action w -r requirements/edx/pre.txt
pip install --exists-action w -r requirements/edx/base.txt
pip install --exists-action w -r requirements/edx/post.txt
pip install --exists-action w -r requirements/edx/repo.txt
pip install --exists-action w -r requirements/edx/github.txt
pip install --exists-action w -r requirements/edx/local.txt
cd $WORKSPACE/configuration/playbooks/edx-east
if [[ -f ${WORKSPACE}/configuration-secure/ansible/vars/${deployment}.yml ]]; then
extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${deployment}.yml"
fi
if [[ $db_dry_run=="false" ]]; then
# Set this to an empty string if db_dry_run is
# not set. By default the db_dry_run var is
# set to --db-dry-run
extra_var_args+=" -e db_dry_run=''"
fi
extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${environment}-${deployment}.yml"
extra_var_args+=" -e edxapp_app_dir=${WORKSPACE}"
extra_var_args+=" -e edxapp_code_dir=${WORKSPACE}/edx-platform"
extra_var_args+=" -e edxapp_user=jenkins"
# Generate the json configuration files
ansible-playbook -c local $extra_var_args --tags edxapp_cfg -i localhost, -s -U jenkins edxapp.yml
# Run migrations and replace literal '\n' with actual newlines to make the output
# easier to read
ansible-playbook -v -c local $extra_var_args -i localhost, -s -U jenkins edxapp_migrate.yml | sed 's/\\n/\n/g'
#!/usr/bin/env bash
# Ansible provisioning wrapper script that
# assumes the following parameters set
# as environment variables
#
# - github_username
# - server_type
# - instance_type
# - region
# - aws_account
# - keypair
# - ami
# - root_ebs_size
# - security_group
# - dns_zone
# - dns_name
# - environment
# - name_tag
export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/configuration/util/jenkins/ascii-convert.sh"
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
if [[ -z $sandbox_to_update ]]; then
sandbox_to_update="${BUILD_USER_ID}.m.sandbox.edx.org"
fi
cd $WORKSPACE/configuration/playbooks/edx-east
ansible-playbook connect_sandbox.yml -i $sandbox_to_update, -e@${WORKSPACE}/configuration-secure/ansible/vars/clone-db.yml -e EDXAPP_MYSQL_HOST=$EDXAPP_MYSQL_HOST --user ubuntu -v
#!/usr/bin/env bash
# A simple wrapper to run ansible from Jenkins.
# This assumes that you will be running on one or more servers
# that are tagged with Name: <environment>-<deployment>-<play>
if [[
-z $WORKSPACE ||
-z $environment_tag ||
-z $deployment_tag ||
-z $play_tag ||
-z $ansible_play ||
-z $elb_pre_post ||
-z $first_in ||
-z $serial_count
]]; then
echo "Environment incorrect for this wrapper script"
env
exit 1
fi
cd $WORKSPACE/configuration/playbooks/edx-east
ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post"
if [ ! -z "$extra_vars" ]; then
ansible_extra_vars+=" -e $extra_vars"
fi
if [[ $run_migrations == "true" ]]; then
ansible_extra_vars+=" -e migrate_db=yes"
fi
if [[ $check_mode == "true" ]]; then
ansible_extra_vars+=" --check"
fi
if [[ ! -z "$run_on_single_ip" ]]; then
ansible_limit+="$run_on_single_ip"
else
if [[ $first_in == "true" ]]; then
ansible_limit+="first_in_"
fi
ansible_limit+="tag_Name_${environment_tag}-${deployment_tag}-${play_tag}"
fi
if [[ ! -z "$task_tags" ]]; then
ansible_task_tags+="--tags $task_tags"
fi
export PYTHONUNBUFFERED=1
env
ansible-playbook -v -D -u ubuntu $ansible_play -i ./ec2.py $ansible_task_tags --limit $ansible_limit -e@"$WORKSPACE/configuration-secure/ansible/vars/${deployment_tag}.yml" -e@"$WORKSPACE/configuration-secure/ansible/vars/${environment_tag}-${deployment_tag}.yml" $ansible_extra_vars
#!/bin/bash
# A very simple check to see if the json files in the project at least compile.
# If they do not, a cryptic message that might be helpful is produced.
# Save current directory so we can come back; change to repo root
STARTED_FROM=`pwd`
cd $(git rev-parse --show-toplevel)
# Do very basic syntax check of every json file to make sure it's valid format
for file in `find . -iname '*.json'`; do
cat $file | python -m json.tool 1>/dev/null 2>json_complaint.err;
retval=$?
if [ $retval != 0 ]; then
echo "JSON errors in $file"
cat json_complaint.err
rm -f json_complaint.err
cd $STARTED_FROM
exit $retval;
fi
done
# Everything went ok!
rm -f json_complaint.err
cd $STARTED_FROM
exit 0
......@@ -11,6 +11,7 @@ try:
from boto.vpc import VPCConnection
from boto.exception import NoAuthHandlerFound, EC2ResponseError
from boto.sqs.message import RawMessage
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
print "boto required for script"
sys.exit(1)
......@@ -47,11 +48,13 @@ def parse_args():
parser.add_argument('--noop', action='store_true',
help="don't actually run the cmds",
default=False)
parser.add_argument('--secure-vars', required=False,
metavar="SECURE_VAR_FILE",
parser.add_argument('--secure-vars-file', required=False,
metavar="SECURE_VAR_FILE", default=None,
help="path to secure-vars from the root of "
"the secure repo (defaults to ansible/"
"vars/ENVIRONMENT-DEPLOYMENT.yml)")
"the secure repo. By default <deployment>.yml and "
"<environment>-<deployment>.yml will be used if they "
"exist in <secure-repo>/ansible/vars/. This secure file "
"will be used in addition to these if they exist.")
parser.add_argument('--stack-name',
help="defaults to ENVIRONMENT-DEPLOYMENT",
metavar="STACK_NAME",
......@@ -59,6 +62,10 @@ def parse_args():
parser.add_argument('-p', '--play',
help='play name without the yml extension',
metavar="PLAY", required=True)
parser.add_argument('--playbook-dir',
help='directory to find playbooks in',
default='configuration/playbooks/edx-east',
metavar="PLAYBOOKDIR", required=False)
parser.add_argument('-d', '--deployment', metavar="DEPLOYMENT",
required=True)
parser.add_argument('-e', '--environment', metavar="ENVIRONMENT",
......@@ -80,6 +87,12 @@ def parse_args():
parser.add_argument('--configuration-secure-repo', required=False,
default="git@github.com:edx-ops/prod-secure",
help="repo to use for the secure files")
parser.add_argument('--configuration-private-version', required=False,
help="configuration-private repo branch(no hashes)",
default="master")
parser.add_argument('--configuration-private-repo', required=False,
default="git@github.com:edx-ops/ansible-private",
help="repo to use for private playbooks")
parser.add_argument('-c', '--cache-id', required=True,
help="unique id to use as part of cache prefix")
parser.add_argument('-i', '--identity', required=False,
......@@ -109,6 +122,10 @@ def parse_args():
parser.add_argument("--hipchat-api-token", required=False,
default=None,
help="The API token for Hipchat integration")
parser.add_argument("--root-vol-size", required=False,
default=50,
help="The size of the root volume to use for the "
"abbey instance.")
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--base-ami', required=False,
......@@ -136,6 +153,7 @@ def get_instance_sec_group(vpc_id):
return grp_details[0].id
def get_blessed_ami():
images = ec2.get_all_images(
filters={
......@@ -152,6 +170,7 @@ def get_blessed_ami():
return images[0].id
def create_instance_args():
"""
Looks up security group, subnet
......@@ -193,6 +212,7 @@ secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
configuration_private_version="{configuration_private_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
......@@ -201,14 +221,18 @@ git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name="{configuration_secure_repo_basename}"
secure_vars_file="$base_dir/$git_repo_secure_name/{secure_vars}"
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
secure_vars_file={secure_vars_file}
environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml"
deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/configuration/playbooks/edx-east"
playbook_dir="$base_dir/{playbook_dir}"
if $config_secure; then
git_cmd="env GIT_SSH=$git_ssh git"
......@@ -221,9 +245,14 @@ SQS_NAME={queue_name}
SQS_REGION=us-east-1
SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]"
PYTHONUNBUFFERED=1
HIPCHAT_TOKEN={hipchat_token}
HIPCHAT_ROOM={hipchat_room}
HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: "
HIPCHAT_FROM="ansible-$instance_id"
HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1)
# environment for ansible
export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM HIPCHAT_MSG_COLOR
if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
echo "Installing pkg dependencies"
......@@ -270,6 +299,10 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: true
edxapp_dynamic_cache_key: {deployment}-{environment}-{play}-{cache_id}
disable_edx_services: true
# abbey should never take instances in
# and out of elbs
elb_pre_post: false
EOF
chmod 400 $secure_identity
......@@ -286,33 +319,64 @@ if $config_secure; then
cd $base_dir
fi
if [[ ! -z $git_repo_private ]]; then
$git_cmd clone $git_repo_private $git_repo_private_name
cd $git_repo_private_name
$git_cmd checkout $configuration_private_version
cd $base_dir
fi
cd $base_dir/$git_repo_name
sudo pip install -r requirements.txt
cd $playbook_dir
ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$secure_vars_file -e@$extra_vars
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$secure_vars_file -e@$extra_vars
if [[ -r "$deployment_secure_vars" ]]; then
extra_args_opts+=" -e@$deployment_secure_vars"
fi
if [[ -r "$environment_deployment_secure_vars" ]]; then
extra_args_opts+=" -e@$environment_deployment_secure_vars"
fi
if $secure_vars_file; then
extra_args_opts+=" -e@$secure_vars_file"
fi
extra_args_opts+=" -e@$extra_vars"
ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts
rm -rf $base_dir
""".format(
hipchat_token=args.hipchat_api_token,
hipchat_room=args.hipchat_room_id,
configuration_version=args.configuration_version,
configuration_secure_version=args.configuration_secure_version,
configuration_secure_repo=args.configuration_secure_repo,
configuration_secure_repo_basename=os.path.basename(
args.configuration_secure_repo),
configuration_private_version=args.configuration_private_version,
configuration_private_repo=args.configuration_private_repo,
environment=args.environment,
deployment=args.deployment,
play=args.play,
playbook_dir=args.playbook_dir,
config_secure=config_secure,
identity_contents=identity_contents,
queue_name=run_id,
extra_vars_yml=extra_vars_yml,
git_refs_yml=git_refs_yml,
secure_vars=secure_vars,
secure_vars_file=secure_vars_file,
cache_id=args.cache_id)
mapping = BlockDeviceMapping()
root_vol = BlockDeviceType(size=args.root_vol_size)
mapping['/dev/sda1'] = root_vol
ec2_args = {
'security_group_ids': [security_group_id],
'subnet_id': subnet_id,
......@@ -321,7 +385,7 @@ rm -rf $base_dir
'instance_type': args.instance_type,
'instance_profile_name': args.role_name,
'user_data': user_data,
'block_device_map': mapping,
}
return ec2_args
......@@ -376,7 +440,7 @@ def poll_sqs_ansible():
now = int(time.time())
if buf:
try:
if (now - max([msg['recv_ts'] for msg in buf])) > args.msg_delay:
if (now - min([msg['recv_ts'] for msg in buf])) > args.msg_delay:
# sort by TS instead of recv_ts
# because the sqs timestamp is not as
# accurate
......@@ -491,6 +555,7 @@ def create_ami(instance_id, name, description):
return image_id
def launch_and_configure(ec2_args):
"""
Creates an sqs queue, launches an ec2 instance,
......@@ -580,13 +645,14 @@ def launch_and_configure(ec2_args):
return run_summary, ami
def send_hipchat_message(message):
#If hipchat is configured send the details to the specified room
if args.hipchat_api_token and args.hipchat_room_id:
import hipchat
try:
hipchat = hipchat.HipChat(token=args.hipchat_api_token)
hipchat.message_room(args.hipchat_room_id,'AbbeyNormal',
hipchat.message_room(args.hipchat_room_id, 'AbbeyNormal',
message)
except Exception as e:
print("Hipchat messaging resulted in an error: %s." % e)
......@@ -615,21 +681,28 @@ if __name__ == '__main__':
git_refs_yml = ""
git_refs = {}
if args.secure_vars:
secure_vars = args.secure_vars
if args.secure_vars_file:
# explicit path to a single
# secure var file
secure_vars_file = args.secure_vars_file
else:
secure_vars = "ansible/vars/{}-{}.yml".format(
args.environment, args.deployment)
secure_vars_file = 'false'
if args.stack_name:
stack_name = args.stack_name
else:
stack_name = "{}-{}".format(args.environment, args.deployment)
try:
sqs = boto.sqs.connect_to_region(args.region)
ec2 = boto.ec2.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'You must be able to connect to sqs and ec2 to use this script'
print 'Unable to connect to ec2 in region :{}'.format(args.region)
sys.exit(1)
try:
sqs = boto.sqs.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'Unable to connect to sqs in region :{}'.format(args.region)
sys.exit(1)
if args.blessed:
......@@ -661,8 +734,7 @@ if __name__ == '__main__':
run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami)
message = 'Finished baking AMI {image_id} for {environment} ' \
'{deployment} {play}.'.format(
message = 'Finished baking AMI {image_id} for {environment} {deployment} {play}.'.format(
image_id=ami,
environment=args.environment,
deployment=args.deployment,
......
#!/usr/bin/env python
import boto
import boto.route53
import boto.route53.record
import boto.ec2.elb
import boto.rds2
import time
from argparse import ArgumentParser, RawTextHelpFormatter
import datetime
import sys
from vpcutil import rds_subnet_group_name_for_stack_name, all_stack_names
import os
description = """
Creates a new RDS instance using restore
from point in time using the latest available backup.
The new db will be the same size as the original.
The name of the db will remain the same, the master db password
will be changed and is set on the command line.
If stack-name is provided the RDS instance will be launched
in the VPC that corresponds to that name.
New db name defaults to "from-<source db name>-<human date>-<ts>"
A new DNS entry will be created for the RDS when provided
on the command line
"""
RDS_SIZES = [
'db.m1.small',
'db.m1.large',
'db.m1.xlarge',
'db.m2.xlarge',
'db.m2.2xlarge',
'db.m2.4xlarg',
]
# These are the groups for the different
# stack names that will be assigned once
# the corresponding db is cloned
SG_GROUPS = {
'stage-edx': 'sg-d2f623b7',
}
# This group must already be created
# and allows for full access to port
# 3306 from within the vpc.
# This group is assigned temporarily
# for cleaning the db
SG_GROUPS_FULL = {
'stage-edx': 'sg-0abf396f',
}
def parse_args(args=sys.argv[1:]):
stack_names = all_stack_names()
rds = boto.rds2.connect_to_region('us-east-1')
dbs = [db['DBInstanceIdentifier']
for db in rds.describe_db_instances()['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']]
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-s', '--stack-name', choices=stack_names,
default=None,
help='Stack name for where you want this RDS instance launched')
parser.add_argument('-t', '--type', choices=RDS_SIZES,
default='db.m1.small', help='RDS size to create instances of')
parser.add_argument('-d', '--db-source', choices=dbs,
default=u'stage-edx', help="source db to clone")
parser.add_argument('-p', '--password',
help="password for the new database", metavar="NEW PASSWORD")
parser.add_argument('-r', '--region', default='us-east-1',
help="region to connect to")
parser.add_argument('--dns',
help="dns entry for the new rds instance")
parser.add_argument('--clean-wwc', action="store_true",
default=False,
help="clean the wwc db after launching it into the vpc, removing sensitive data")
parser.add_argument('--clean-prod-grader', action="store_true",
default=False,
help="clean the prod_grader db after launching it into the vpc, removing sensitive data")
parser.add_argument('--dump', action="store_true",
default=False,
help="create a sql dump after launching it into the vpc")
parser.add_argument('--secret-var-file',
help="using a secret var file run ansible against the host to update db users")
return parser.parse_args(args)
def wait_on_db_status(db_name, region='us-east-1', wait_on='available', aws_id=None, aws_secret=None):
rds = boto.rds2.connect_to_region(region)
while True:
statuses = rds.describe_db_instances(db_name)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
if len(statuses) > 1:
raise Exception("More than one instance returned for {0}".format(db_name))
if statuses[0]['DBInstanceStatus'] == wait_on:
break
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(2)
return
if __name__ == '__main__':
args = parse_args()
sanitize_wwc_sql_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sanitize-db-wwc.sql")
sanitize_prod_grader_sql_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sanitize-db-prod_grader.sql")
play_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../playbooks/edx-east")
rds = boto.rds2.connect_to_region(args.region)
restore_dbid = 'from-{0}-{1}-{2}'.format(args.db_source, datetime.date.today(), int(time.time()))
restore_args = dict(
source_db_instance_identifier=args.db_source,
target_db_instance_identifier=restore_dbid,
use_latest_restorable_time=True,
db_instance_class=args.type,
)
if args.stack_name:
subnet_name = rds_subnet_group_name_for_stack_name(args.stack_name)
restore_args['db_subnet_group_name'] = subnet_name
rds.restore_db_instance_to_point_in_time(**restore_args)
wait_on_db_status(restore_dbid)
db_host = rds.describe_db_instances(restore_dbid)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['Endpoint']['Address']
if args.password or args.stack_name:
modify_args = dict(
apply_immediately=True
)
if args.password:
modify_args['master_user_password'] = args.password
if args.stack_name:
modify_args['vpc_security_group_ids'] = [SG_GROUPS[args.stack_name], SG_GROUPS_FULL[args.stack_name]]
else:
# dev-edx is the default security group for dbs that
# are not in the vpc, it allows connections from the various
# NAT boxes and from sandboxes
modify_args['db_security_groups'] = ['dev-edx']
# Update the db immediately
rds.modify_db_instance(restore_dbid, **modify_args)
if args.clean_wwc:
# Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} wwc < {sanitize_wwc_sql_file} """.format(
root_pass=args.password,
db_host=db_host,
sanitize_wwc_sql_file=sanitize_wwc_sql_file)
print("Running {}".format(sanitize_cmd))
os.system(sanitize_cmd)
if args.clean_prod_grader:
# Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} prod_grader < {sanitize_prod_grader_sql_file} """.format(
root_pass=args.password,
db_host=db_host,
sanitize_prod_grader_sql_file=sanitize_prod_grader_sql_file)
print("Running {}".format(sanitize_cmd))
os.system(sanitize_cmd)
if args.secret_var_file:
db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, update_edxapp_db_users.yml """ \
"""-e @{secret_var_file} -e "edxapp_db_root_user=root edxapp_db_root_pass={root_pass} """ \
"""EDXAPP_MYSQL_HOST={db_host}" """.format(
root_pass=args.password,
secret_var_file=args.secret_var_file,
db_host=db_host,
play_path=play_path)
print("Running {}".format(db_cmd))
os.system(db_cmd)
if args.dns:
dns_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, create_cname.yml """ \
"""-e "dns_zone=edx.org dns_name={dns} sandbox={db_host}" """.format(
play_path=play_path,
dns=args.dns,
db_host=db_host)
print("Running {}".format(dns_cmd))
os.system(dns_cmd)
if args.stack_name:
rds.modify_db_instance(restore_dbid, vpc_security_group_ids=[SG_GROUPS[args.stack_name]])
boto
docopt
python-simple-hipchat
python-simple-hipchat==0.2
SET FOREIGN_KEY_CHECKS=0;
/*
Grader has its own django core tables.
*/
UPDATE auth_user
set
email = concat('success+',cast(id AS CHAR),'@simulator.amazonses.com'),
username = concat('user-',cast(id AS CHAR)),
first_name = concat('user-',cast(id AS CHAR)),
last_name = concat('user-',cast(id AS CHAR)),
password = null,
last_login = null,
date_joined = null
where email not like ('%@edx.org');
SET FOREIGN_KEY_CHECKS=1;
SET FOREIGN_KEY_CHECKS=0;
/*
Remove all password hashes, even for edx employees
*/
UPDATE auth_user
set
password = null;
UPDATE student_passwordhistory
set
password = null;
/*
Rewrite all emails to used the SES simulator, simulating success.
Anonymize other user information. Skip @edx.org accounts
*/
UPDATE auth_user
set
email = concat('success+',cast(id AS CHAR),'@simulator.amazonses.com'),
username = concat('user-',cast(id AS CHAR)),
first_name = concat('user-',cast(id AS CHAR)),
last_name = concat('user-',cast(id AS CHAR)),
last_login = null,
date_joined = null
where email not like ('%@edx.org');
/*
There are a handful of email changes requests captured in flight.
*/
UPDATE student_pendingemailchange
set new_email = concat('success+',cast(user_id AS CHAR),'@simulator.amazonses.com');
/*
Differs slightly to prevent creating duplicate email records.
User id isn't stored here and this email is probably not used for
sending email, but cannot hurt.
*/
UPDATE student_courseenrollmentallowed
set email = concat('success+','courseenrollmentallowed_',cast(id AS CHAR),'@simulator.amazonses.com');
/*
Set the name to the userid and empty the other fields
This will also empty user profile data for edx employees
*/
UPDATE auth_userprofile
set
name = concat('user-',cast(id as CHAR)),
language = "",
location = "",
meta = "",
gender = null,
mailing_address = null,
year_of_birth = null,
level_of_education = null,
goals = null,
country = "",
city = null;
SET FOREIGN_KEY_CHECKS=1;
......@@ -27,12 +27,27 @@ import boto
import datetime
from vpcutil import vpc_for_stack_name
import xml.dom.minidom
import re
import sys
r53 = boto.connect_route53()
# These are ELBs that we do not want to create dns entries
# for because the instances attached to them are also in
# other ELBs and we want the env-deploy-play tuple which makes
# up the dns name to be unique
ELB_BAN_LIST = [
'Apros',
]
# If the ELB name has the key in its name these plays
# will be used for the DNS CNAME tuple. This is used for
# commoncluster.
ELB_PLAY_MAPPINGS = {
'RabbitMQ': 'rabbitmq',
'Xqueue': 'xqueue',
'Elastic': 'elasticsearch',
}
extra_play_dns = {"edxapp":["courses","studio"]}
class DNSRecord():
......@@ -44,12 +59,14 @@ class DNSRecord():
self.record_ttl = record_ttl
self.record_values = record_values
def add_or_update_record(dns_records):
"""
Creates or updates a DNS record in a hosted route53
zone
"""
change_set = boto.route53.record.ResourceRecordSets()
record_names = set()
for record in dns_records:
......@@ -60,9 +77,16 @@ def add_or_update_record(dns_records):
record_values: {}
""".format(record.record_name, record.record_type,
record.record_ttl, record.record_values)
if args.noop:
print("Would have updated DNS record:\n{}".format(status_msg))
else:
print("Updating DNS record:\n{}".format(status_msg))
if record.record_name in record_names:
print("Unable to create record for {} with value {} because one already exists!".format(
record.record_values, record.record_name))
sys.exit(1)
record_names.add(record.record_name)
zone_id = record.zone.Id.replace("/hostedzone/", "")
......@@ -71,8 +95,15 @@ def add_or_update_record(dns_records):
old_records = {r.name[:-1]: r for r in records}
# If the record name already points to something.
# Delete the existing connection.
# Delete the existing connection. If the record has
# the same type and name skip it.
if record.record_name in old_records.keys():
if record.record_name + "." == old_records[record.record_name].name and \
record.record_type == old_records[record.record_name].type:
print("Record for {} already exists and is identical, skipping.\n".format(
record.record_name))
continue
if args.force:
print("Deleting record:\n{}".format(status_msg))
change = change_set.add_change(
......@@ -99,11 +130,12 @@ def add_or_update_record(dns_records):
if args.noop:
print("Would have submitted the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
print xml_doc.toprettyxml()
else:
print("Submitting the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
print(xml_doc.toprettyxml(newl='')) # newl='' to remove extra newlines
if not args.noop:
r53.change_rrsets(zone_id, change_set.to_xml())
print("Updated DNS record:\n{}".format(status_msg))
def get_or_create_hosted_zone(zone_name):
......@@ -137,39 +169,42 @@ def get_or_create_hosted_zone(zone_name):
print("Updating parent zone {}".format(parent_zone_name))
dns_records = set()
dns_records.add(DNSRecord(parent_zone,zone_name,'NS',900,zone.NameServers))
dns_records.add(DNSRecord(parent_zone, zone_name, 'NS', 900, zone.NameServers))
add_or_update_record(dns_records)
return zone
def get_security_group_dns(group_name):
# stage-edx-RabbitMQELBSecurityGroup-YB8ZKIZYN1EN
environment,deployment,sec_group,salt = group_name.split('-')
play = sec_group.replace("ELBSecurityGroup","").lower()
environment, deployment, sec_group, salt = group_name.split('-')
play = sec_group.replace("ELBSecurityGroup", "").lower()
return environment, deployment, play
def get_dns_from_instances(elb):
ec2_con = boto.connect_ec2()
def get_dns_from_instances(elb):
for inst in elb.instances:
try:
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
except IndexError:
print("instance {} attached to elb {}".format(inst, elb))
sys.exit(1)
try:
env_tag = instance.tags['environment']
deployment_tag = instance.tags['deployment']
if 'play' in instance.tags:
play_tag = instance.tags['play']
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
break # only need the first instance for tag info
except KeyError:
print("Instance {}, attached to elb {} does not "
"have tags for environment and play".format(elb, inst))
raise
"have a tag for environment, play or deployment".format(inst, elb))
sys.exit(1)
return env_tag, play_tag
return env_tag, deployment_tag, play_tag
def update_elb_rds_dns(zone):
......@@ -182,10 +217,7 @@ def update_elb_rds_dns(zone):
dns_records = set()
elb_con = boto.connect_elb()
rds_con = boto.connect_rds()
vpc_id = vpc_for_stack_name(args.stack_name)
vpc_id = vpc_for_stack_name(args.stack_name, args.aws_id, args.aws_secret)
if not zone and args.noop:
# use a placeholder for zone name
......@@ -196,23 +228,26 @@ def update_elb_rds_dns(zone):
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
for elb in stack_elbs:
env_tag, deployment_tag, play_tag = get_dns_from_instances(elb)
if "RabbitMQ" in elb.source_security_group.name or "ElasticSearch" in elb.source_security_group.name:
env_tag,deployment,play_tag = get_security_group_dns(elb.source_security_group.name)
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
else:
env_tag,play_tag = get_dns_from_instances(elb)
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
# Override the play tag if a substring of the elb name
# is in ELB_PLAY_MAPPINGS
if extra_play_dns.has_key(play_tag):
for name in extra_play_dns.get(play_tag):
fqdn = "{}-{}.{}".format(env_tag, name, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
for key in ELB_PLAY_MAPPINGS.keys():
if key in elb.name:
play_tag = ELB_PLAY_MAPPINGS[key]
break
fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, play_tag, zone_name)
# Skip over ELBs if a substring of the ELB name is in
# the ELB_BAN_LIST
if any(name in elb.name for name in ELB_BAN_LIST):
print("Skipping {} because it is on the ELB ban list".format(elb.name))
continue
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [elb.dns_name]))
stack_rdss = [rds for rds in rds_con.get_all_dbinstances()
if hasattr(rds.subnet_group, 'vpc_id') and
......@@ -220,31 +255,57 @@ def update_elb_rds_dns(zone):
# TODO the current version of the RDS API doesn't support
# looking up RDS instance tags. Hence, we are using the
# env_tag that was set via the loop over instances above.
# env_tag and deployment_tag that was set via the loop over instances above.
rds_endpoints = set()
for rds in stack_rdss:
fqdn = "{}-{}.{}".format(env_tag,'rds', zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[stack_rdss[0].endpoint[0]]))
endpoint = stack_rdss[0].endpoint[0]
fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, 'rds', zone_name)
# filter out rds instances with the same endpoints (multi-AZ)
if endpoint not in rds_endpoints:
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [endpoint]))
rds_endpoints.add(endpoint)
add_or_update_record(dns_records)
if __name__ == "__main__":
description = "Give a cloudformation stack name, for an edx stack, setup \
DNS names for the ELBs in the stack."
description = """
Give a cloudformation stack name, for an edx stack, setup
DNS names for the ELBs in the stack
DNS entries will be created with the following format
<environment>-<deployment>-<play>.edx.org
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-s', '--stack-name', required=True,
help="The name of the cloudformation stack.")
parser.add_argument('-n', '--noop',
help="Don't make any changes.", action="store_true",
default=False)
parser.add_argument('-z', '--zone-name', default="vpc.edx.org",
parser.add_argument('-z', '--zone-name', default="edx.org",
help="The name of the zone under which to "
"create the dns entries.")
parser.add_argument('-f', '--force',
help="Force reuse of an existing name in a zone",
action="store_true",default=False)
action="store_true", default=False)
parser.add_argument('--aws-id', default=None,
help="read only aws key for fetching instance information"
"the account you wish add entries for")
parser.add_argument('--aws-secret', default=None,
help="read only aws id for fetching instance information for"
"the account you wish add entries for")
args = parser.parse_args()
# Connect to ec2 using the provided credentials on the commandline
ec2_con = boto.connect_ec2(args.aws_id, args.aws_secret)
elb_con = boto.connect_elb(args.aws_id, args.aws_secret)
rds_con = boto.connect_rds(args.aws_id, args.aws_secret)
# Connect to route53 using the user's .boto file
r53 = boto.connect_route53()
zone = get_or_create_hosted_zone(args.zone_name)
update_elb_rds_dns(zone)
import boto
import boto.rds2
import boto.rds
def vpc_for_stack_name(stack_name):
cfn = boto.connect_cloudformation()
CFN_TAG_KEY = 'aws:cloudformation:stack-name'
def vpc_for_stack_name(stack_name, aws_id=None, aws_secret=None):
cfn = boto.connect_cloudformation(aws_id, aws_secret)
resources = cfn.list_stack_resources(stack_name)
for resource in resources:
if resource.resource_type == 'AWS::EC2::VPC':
return resource.physical_resource_id
def stack_name_for_vpc(vpc_name):
cfn_tag_key = 'aws:cloudformation:stack-name'
vpc = boto.connect_vpc()
def stack_name_for_vpc(vpc_name, aws_id, aws_secret):
vpc = boto.connect_vpc(aws_id, aws_secret)
resource = vpc.get_all_vpcs(vpc_ids=[vpc_name])[0]
if cfn_tag_key in resource.tags:
return resource.tags[cfn_tag_key]
if CFN_TAG_KEY in resource.tags:
return resource.tags[CFN_TAG_KEY]
else:
msg = "VPC({}) is not part of a cloudformation stack.".format(vpc_name)
raise Exception(msg)
def rds_subnet_group_name_for_stack_name(stack_name, region='us-east-1', aws_id=None, aws_secret=None):
# Helper function to look up a subnet group name by stack name
rds = boto.rds2.connect_to_region(region)
vpc = vpc_for_stack_name(stack_name)
for group in rds.describe_db_subnet_groups()['DescribeDBSubnetGroupsResponse']['DescribeDBSubnetGroupsResult']['DBSubnetGroups']:
if group['VpcId'] == vpc:
return group['DBSubnetGroupName']
return None
def all_stack_names(region='us-east-1', aws_id=None, aws_secret=None):
vpc_conn = boto.connect_vpc(aws_id, aws_secret)
return [vpc.tags[CFN_TAG_KEY] for vpc in vpc_conn.get_all_vpcs()
if CFN_TAG_KEY in vpc.tags.keys()]
......@@ -12,3 +12,4 @@ There are two versions of the stack:
- ``fullstack`` is a production-like configuration running all the services on a single server. https://github.com/edx/configuration/wiki/edX-Production-Stack
- ``devstack`` is designed for local development. Although it uses the same system requirements as in production, it simplifies certain settings to make development more convenient. https://github.com/edx/configuration/wiki/edX-Developer-Stack
- ``test_role`` (under ``base`` directory) is not used for creating test edx instances. Instead, it is used for testing the configuration scripts themselves.
Vagrant.require_version ">= 1.5.3"
unless Vagrant.has_plugin?("vagrant-vbguest")
raise "Please install the vagrant-vbguest plugin by running `vagrant plugin install vagrant-vbguest`"
end
VAGRANTFILE_API_VERSION = "2"
......@@ -6,40 +9,61 @@ MEMORY = 2048
CPU_COUNT = 2
edx_platform_mount_dir = "edx-platform"
themes_mount_dir = "themes"
forum_mount_dir = "cs_comments_service"
ora_mount_dir = "ora"
if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
themes_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + themes_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir
end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates a devstack from a base Ubuntu 12.04 image
# Creates a devstack from a base Ubuntu 12.04 image for virtualbox
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.network :forwarded_port, guest: 18080, host: 18080
config.vm.network :forwarded_port, guest: 8765, host: 8765
config.vm.network :forwarded_port, guest: 9200, host: 9200
config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
# Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true
end
if ENV['VAGRANT_USE_VBOXFS'] == 'true'
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, owner: "forum", group: "www-data"
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, owner: "ora", group: "www-data"
else
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, nfs: true
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, nfs: true
end
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
......@@ -49,13 +73,27 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
# Override box url to get vmware one
override.vm.box = "precise64_vmware"
override.vm.box_url = "http://files.vagrantup.com/precise64_vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
end
# Make LC_ALL default to en_US.UTF-8 instead of en_US.
# See: https://github.com/mitchellh/vagrant/issues/1188
config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale'
# Use vagrant-vbguest plugin to make sure Guest Additions are in sync
config.vbguest.auto_reboot = true
config.vbguest.auto_update = true
config.vm.provision :ansible do |ansible|
ansible.playbook = "../../../playbooks/vagrant-devstack.yml"
ansible.verbose = "vvvv"
end
end
......@@ -23,6 +23,15 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
override.vm.box = "precise64_vmware"
override.vm.box_url = "http://files.vagrantup.com/precise64_vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
end
# Make LC_ALL default to en_US.UTF-8 instead of en_US.
# See: https://github.com/mitchellh/vagrant/issues/1188
config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale'
......
MEMORY = 2048
CPU_COUNT = 2
Vagrant.configure("2") do |config|
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
config.vm.network :private_network, ip: "192.168.33.20"
config.vm.network :forwarded_port, guest: 8080, host: 8080
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
# You can adjust this to the amount of CPUs your system has available
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
# Allow DNS to work for Ubuntu 12.10 host
# http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
override.vm.box = "precise64_vmware"
override.vm.box_url = "http://files.vagrantup.com/precise64_vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
end
config.vm.provision :ansible do |ansible|
# point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../../playbooks/run_role.yml"
ansible.verbose = "extra"
ansible.extra_vars = {
role: ENV['VAGRANT_ANSIBLE_ROLE']
}
if ENV['VAGRANT_ANSIBLE_VARS_FILE']
ansible.raw_arguments = [ '-e@' + ENV['VAGRANT_ANSIBLE_VARS_FILE']]
end
end
end
../../../playbooks/ansible.cfg
\ No newline at end of file
Vagrant.require_version ">= 1.5.3"
unless Vagrant.has_plugin?("vagrant-vbguest")
raise "Please install the vagrant-vbguest plugin by running `vagrant plugin install vagrant-vbguest`"
end
VAGRANTFILE_API_VERSION = "2"
......@@ -20,16 +23,18 @@ cd /edx/app/edx_ansible/edx_ansible/playbooks
# this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration release
ansible-playbook -i localhost, -c local vagrant-devstack.yml -e configuration_version=release
ansible-playbook -i localhost, -c local vagrant-devstack.yml --tags=deploy -e configuration_version=release
SCRIPT
edx_platform_mount_dir = "edx-platform"
themes_mount_dir = "themes"
forum_mount_dir = "cs_comments_service"
ora_mount_dir = "ora"
if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
themes_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + themes_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir
......@@ -38,25 +43,43 @@ end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX devstack VM from an official release
config.vm.box = "injera-devstack"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-devstack.box"
config.vm.box = "johnnycake-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140625-johnnycake-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.network :forwarded_port, guest: 18080, host: 18080
config.vm.network :forwarded_port, guest: 8765, host: 8765
config.vm.network :forwarded_port, guest: 9200, host: 9200
config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true
# Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true
end
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
if ENV['VAGRANT_USE_VBOXFS'] == 'true'
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, owner: "forum", group: "www-data"
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, owner: "ora", group: "www-data"
else
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, nfs: true
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, nfs: true
end
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
......@@ -67,6 +90,15 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
override.vm.box = "injera-devstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-devstack-vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
end
# Use vagrant-vbguest plugin to make sure Guest Additions are in sync
config.vbguest.auto_reboot = true
config.vbguest.auto_update = true
......@@ -74,5 +106,4 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Assume that the base box has the edx_ansible role installed
# We can then tell the Vagrant instance to update itself.
config.vm.provision "shell", inline: $script
end
......@@ -8,8 +8,9 @@ CPU_COUNT = 2
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX fullstack VM from an official release
config.vm.box = "injera-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-fullstack.box"
config.vm.box = "johnnycake-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140625-johnnycake-fullstack.box"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = true
......@@ -25,4 +26,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override|
override.vm.box = "injera-fullstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-fullstack-vmware.box"
v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s
end
end
end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment