Commit e8d218c1 by John Jarvis

fixing merge conflicts

parents 1280542e bc23df7c
......@@ -29,3 +29,5 @@ Ker Ruben Ramos <xdiscent@gmail.com>
Fred Smith <derf@edx.org>
Wang Peifeng <pku9104038@hotmail.com>
Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
- Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write
files to the tmp directory created for it by codejail.
- Role: Edxapp
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
......@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template
will build quite a number of AWS resources that cost money, so please
consider this before you start.
The configuration phase is manged by [Ansible](http://ansible.cc/).
The configuration phase is managed by [Ansible](http://ansible.com/).
We have provided a number of playbooks that will configure each of
the edX service.
the edX services.
This project is a re-write of the current edX provisioning and
configuration tools, we will be migrating features to this project
......@@ -36,3 +36,5 @@ over time, so expect frequent changes.
For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki).
For info on any large recent changes please see the [change log](https://github.com/edx/configuration/blob/master/CHANGELOG.md).
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-xqwatcher-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the XQWatcher
hosts: localhost
connection: local
gather_facts: False
tasks:
# ignore_error is used here because this module is not idempotent
# If tags already exist, the task will fail with the following message
# Tags already exists in subnet
- name: Update subnet tags
ec2_tag:
resource: "{{ item }}"
region: "{{ ec2_region }}"
state: present
tags:
Name: "{{ edp }}-subnet"
play: xqwatcher
immutable_metadata: "{'purpose':'{{ environment }}-{{ deployment }}-internal-{{ play }}','target':'ec2'}"
with_items: subnets
ignore_errors: True
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
min_size: 0
max_size: 0
desired_capacity: 0
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
tags:
- foo
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
......@@ -24,13 +24,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -56,13 +67,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -79,13 +101,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -117,25 +150,39 @@
},
"Mappings":{
"AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" },
"m1.small": { "Arch":"64" },
"m1.medium": { "Arch":"64" },
"m1.large": { "Arch":"64" },
"m1.xlarge": { "Arch":"64" },
"m2.xlarge": { "Arch":"64" },
"m2.2xlarge": { "Arch":"64" },
"m2.4xlarge": { "Arch":"64" },
"m3.xlarge": { "Arch":"64" },
"m3.2xlarge": { "Arch":"64" },
"c1.medium": { "Arch":"64" },
"c1.xlarge": { "Arch":"64" },
"cg1.4xlarge": { "Arch":"64HVM" }
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" },
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
......@@ -183,7 +230,7 @@
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
"AZone1"
]
},
"Tags":[
......@@ -205,7 +252,7 @@
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
"AZone1"
]
},
"Tags":[
......@@ -364,7 +411,7 @@
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"InboundSMTPPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
......@@ -376,6 +423,23 @@
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"587",
"To":"587"
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"104",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"1024",
"To":"65535"
}
......@@ -582,6 +646,12 @@
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
}
],
"SecurityGroupEgress":[
......@@ -604,6 +674,12 @@
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
}
]
}
......@@ -688,10 +764,11 @@
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":"10.0.0.0/16"
"CidrIp":"10.254.0.0/16"
},
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
......@@ -827,7 +904,23 @@
]
]
}
},
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs":{
"VolumeSize": 100
}
},
{
"DeviceName": "/dev/sdb",
"VirtualName": "ephemeral0"
},
{
"DeviceName": "/dev/sdc",
"VirtualName": "ephemeral1"
}
]
}
},
"AdminSecurityGroup":{
......
......@@ -29,13 +29,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -52,13 +63,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -75,13 +97,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -98,13 +131,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -121,13 +165,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -144,13 +199,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -167,13 +233,58 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"XQWatcherInstanceType":{
"Description":"Xserver server EC2 instance type",
"Type":"String",
"Default":"m1.small",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -229,13 +340,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -252,13 +374,24 @@
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
......@@ -272,6 +405,11 @@
"Type":"Number",
"Default":"2"
},
"XQWatcherDesiredCapacity":{
"Description":"The Auto-scaling group desired capacity for the xqueue watcher hosts",
"Type":"Number",
"Default":"2"
},
"CommonClusterDesiredCapacity":{
"Description":"The Auto-scaling group desired capacity for the CommonCluster hosts",
"Type":"Number",
......@@ -309,7 +447,22 @@
"cache.m2.xlarge",
"cache.m2.2xlarge",
"cache.m2.4xlarge",
"cache.c1.xlarge"
"cache.c1.medium",
"cache.c1.xlarge",
"cache.m3.medium",
"cache.m3.large",
"cache.m3.xlarge",
"cache.m3.2xlarge",
"cache.c3.large",
"cache.c3.xlarge",
"cache.c3.2xlarge",
"cache.c3.4xlarge",
"cache.c3.8xlarge",
"cache.r3.large",
"cache.r3.xlarge",
"cache.r3.2xlarge",
"cache.r3.4xlarge",
"cache.r3.8xlarge"
],
"ConstraintDescription":"must select a valid Cache Node type."
},
......@@ -354,13 +507,29 @@
"Description":"Database instance class",
"Type":"String",
"AllowedValues":[
"db.m1.micro",
"db.t1.micro",
"db.m1.small",
"db.m1.large",
"db.m1.xlarge",
"db.m2.xlarge",
"db.m2.2xlarge",
"db.m2.4xlarge"
"db.m2.4xlarge",
"db.c1.medium",
"db.c1.xlarge",
"db.m3.medium",
"db.m3.large",
"db.m3.xlarge",
"db.m3.2xlarge",
"db.c3.large",
"db.c3.xlarge",
"db.c3.2xlarge",
"db.c3.4xlarge",
"db.c3.8xlarge",
"db.r3.large",
"db.r3.xlarge",
"db.r3.2xlarge",
"db.r3.4xlarge",
"db.r3.8xlarge"
],
"ConstraintDescription":"must select a valid database instance type."
},
......@@ -401,25 +570,39 @@
},
"Mappings":{
"AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" },
"m1.small": { "Arch":"64" },
"m1.medium": { "Arch":"64" },
"m1.large": { "Arch":"64" },
"m1.xlarge": { "Arch":"64" },
"m2.xlarge": { "Arch":"64" },
"m2.2xlarge": { "Arch":"64" },
"m2.4xlarge": { "Arch":"64" },
"m3.xlarge": { "Arch":"64" },
"m3.2xlarge": { "Arch":"64" },
"c1.medium": { "Arch":"64" },
"c1.xlarge": { "Arch":"64" },
"cg1.4xlarge": { "Arch":"64HVM" }
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" },
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
......@@ -443,9 +626,11 @@
"Edxapp02": { "CIDR":".11.0/24" },
"XServerJail01": { "CIDR":".20.0/24" },
"XServerJail02": { "CIDR":".21.0/24" },
"CommonCluster01": { "CIDR":".46.0/24"},
"CommonCluster02": { "CIDR":".47.0/24"},
"CommonCluster03": { "CIDR":".48.0/24"},
"XQWatcherJail01": { "CIDR":".30.0/24" },
"XQWatcherJail02": { "CIDR":".31.0/24" },
"CommonCluster01": { "CIDR":".46.0/24" },
"CommonCluster02": { "CIDR":".47.0/24" },
"CommonCluster03": { "CIDR":".48.0/24" },
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" },
"Cache01": { "CIDR":".60.0/24" },
......@@ -457,8 +642,8 @@
"Mongo01": { "CIDR":".90.0/24" },
"Mongo02": { "CIDR":".91.0/24" },
"Mongo03": { "CIDR":".92.0/24" },
"Notifier01": { "CIDR":".100.0/24" },
"Admin": { "CIDR":".200.0/24" }
"Notifier01": { "CIDR":".100.0/24"},
"Admin": { "CIDR":".200.0/24"}
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
......@@ -932,6 +1117,102 @@
]
}
},
"XQWatcherSubnet01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"XQWatcherJail01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"play",
"Value":"xqwatcher"
},
{
"Key":"Network",
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xqwatcher','target':'ec2'}"
]
]
}
}
]
}
},
"XQWatcherSubnet02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"XQWatcherJail02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"play",
"Value":"xqwatcher"
},
{
"Key":"Network",
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xqwatcher','target':'ec2'}"
]
]
}
}
]
}
},
"Data01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
......@@ -1855,6 +2136,28 @@
}
}
},
"PrivateSubnetRouteTableAssociationXQWatcher01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationXQWatcher02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationData01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
......@@ -2129,6 +2432,28 @@
}
}
},
"PrivateSubnetNetworkAclAssociationXQWatcher01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationXQWatcher02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"XQWatcherSubnet02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationData01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
......@@ -2817,6 +3142,44 @@
} ]
}
},
"XQWatcherRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [ {
"Effect": "Allow",
"Principal": {
"Service": [ "ec2.amazonaws.com" ]
},
"Action": [ "sts:AssumeRole" ]
} ]
},
"Path": "/",
"Policies": [ {
"PolicyName": "XQWatcherBasePolicy",
"PolicyDocument": {
"Statement":[
{
"Effect":"Allow",
"Action":[
"ec2:DescribeTags"
],
"Resource":"*"
}
]
}
} ]
}
},
"XQWatcherInstanceProfile": {
"Type": "AWS::IAM::InstanceProfile",
"Properties": {
"Path": "/",
"Roles": [ {
"Ref": "XQWatcherRole"
} ]
}
},
"ForumRole": {
"Type": "AWS::IAM::Role",
"Properties": {
......@@ -4216,6 +4579,229 @@
]
}
},
"XQWatcherServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
"IamInstanceProfile":{ "Ref":"XQWatcherInstanceProfile" },
"SecurityGroups":[
{
"Ref":"XQWatcherServerSecurityGroup"
}
],
"ImageId":{
"Fn::FindInMap":[
"AWSRegionArch2AMI",
{
"Ref":"AWS::Region"
},
{
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"XQWatcherInstanceType"
},
"Arch"
]
}
]
},
"KeyName":{
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"XQWatcherInstanceType"
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"100"
}
}
]
}
},
"XQWatcherServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
{
"Fn::GetAtt":[
"XQWatcherSubnet01",
"AvailabilityZone"
]
},
{
"Fn::GetAtt":[
"XQWatcherSubnet02",
"AvailabilityZone"
]
}
],
"VPCZoneIdentifier":[
{
"Ref":"XQWatcherSubnet01"
},
{
"Ref":"XQWatcherSubnet02"
}
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"xqwatcher"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"xqwatcher",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"xqwatcher",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
},
"PropagateAtLaunch":true
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
],
"LaunchConfigurationName":{
"Ref":"XQWatcherServer"
},
"MinSize":{
"Ref":"XQWatcherDesiredCapacity"
},
"MaxSize":{
"Ref":"XQWatcherDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"XQWatcherDesiredCapacity"
}
}
},
"XQWatcherServerScaleUpPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XQWatcherServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
}
},
"XQWatcherServerScaleDownPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XQWatcherServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
}
},
"XQWatcherCPUAlarmHigh":{
"Type":"AWS::CloudWatch::Alarm",
"Properties":{
"AlarmDescription":"Scale-up if CPU > 90% for 10 minutes",
"MetricName":"CPUUtilization",
"Namespace":"AWS/EC2",
"Statistic":"Average",
"Period":"300",
"EvaluationPeriods":"2",
"Threshold":"90",
"AlarmActions":[
{
"Ref":"XQWatcherServerScaleUpPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XQWatcherServerAsGroup"
}
}
],
"ComparisonOperator":"GreaterThanThreshold"
}
},
"XQWatcherCPUAlarmLow":{
"Type":"AWS::CloudWatch::Alarm",
"Properties":{
"AlarmDescription":"Scale-down if CPU < 70% for 10 minutes",
"MetricName":"CPUUtilization",
"Namespace":"AWS/EC2",
"Statistic":"Average",
"Period":"300",
"EvaluationPeriods":"2",
"Threshold":"70",
"AlarmActions":[
{
"Ref":"XQWatcherServerScaleDownPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XQWatcherServerAsGroup"
}
}
],
"ComparisonOperator":"LessThanThreshold"
}
},
"XQWatcherServerSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up SSH access.",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}
}
],
"Tags":[
{
"Key":"play",
"Value":"xqwatcher"
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
}
}
]
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
......
......@@ -47,10 +47,25 @@
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" }
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI" : {
......
......@@ -46,10 +46,25 @@
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" }
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
},
"AWSRegionArch2AMI" : {
......
......@@ -11,7 +11,7 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all
regions = us-east-1
regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server.
......
......@@ -217,7 +217,14 @@ class Ec2Inventory(object):
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
if 'EC2_CACHE_PATH' in os.environ:
cache_path = os.environ['EC2_CACHE_PATH']
elif self.args.cache_path:
cache_path = self.args.cache_path
else:
cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
......@@ -241,6 +248,10 @@ class Ec2Inventory(object):
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
parser.add_argument(
'--cache-path',
help='Override the cache path set in ini file',
required=False)
self.args = parser.parse_args()
......
......@@ -2,16 +2,12 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- certs
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -2,15 +2,11 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- common
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -4,9 +4,26 @@
sudo: True
serial: 1
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
......@@ -14,17 +31,29 @@
- xqueue
- role: xqueue
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
#
# In order to reconfigure the host resolution we are issuing a
# reboot.
......
- name: connect a sandbox to production data
hosts: all
gather_facts: False
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
#
# What we are doing here is updating the sandbox
# server-vars config file so that when update
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
- "EDXAPP_MYSQL_USER: {{ EDXAPP_MYSQL_USER }}"
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
tags: update_edxapp_mysql_host
......@@ -2,15 +2,11 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- demo
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -13,8 +9,8 @@
- discern
- discern
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -6,10 +6,6 @@
vars:
migrate_db: "yes"
openid_workaround: True
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
edx_internal: False
roles:
- aws
- role: nginx
......@@ -33,14 +29,14 @@
- elasticsearch
- forum
- { role: "xqueue", update_users: True }
- { role: xserver, when: edx_internal }
- xserver
- ora
- discern
- certs
- edx_ansible
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
- flower
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -16,8 +12,8 @@
- lms
- edxapp
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -3,7 +3,41 @@
vars_files:
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml"
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- common
- oraclejdk
- elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy celery flower (monitoring tool)
hosts: all
sudo: True
gather_facts: True
roles:
- flower
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -13,10 +9,8 @@
- forum
- forum
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -3,19 +3,16 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- alton
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -28,6 +28,7 @@
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart workers"
- name: syncdb and migrate
shell: >
......@@ -38,10 +39,11 @@
DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}"
notify:
- "restart edxapp"
- "restart workers"
handlers:
- name: restart edxapp
shell: "{{ supervisorctl_path }} restart edxapp:{{ item }}"
with_items:
- lms
- cms
shell: "{{ supervisorctl_path }} restart edxapp:"
- name: restart workers
shell: "{{ supervisorctl_path }} restart edxapp_worker:"
......@@ -5,6 +5,40 @@
# ansible_default_ipv4 so
# gather_facts must be set to True
gather_facts: True
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- aws
- rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- hosts: all
sudo: true
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
......@@ -2,17 +2,13 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: edxapp
celery_worker: True
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
......@@ -3,9 +3,26 @@
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles:
- aws
- role: nginx
......@@ -13,8 +30,21 @@
- xqueue
- role: xqueue
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy xqueue-watcher
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- xqwatcher
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
......@@ -2,10 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- role: nginx
......@@ -13,8 +9,8 @@
- xserver
- role: xserver
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
../ansible.cfg
\ No newline at end of file
*
!prod
!stage
!data
!.gitignore
This temp directory created here so that we can make sure it doesn't
collide with other users doing ansible operations on the same machine;
or concurrent installs to different environments, say to prod and stage.
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
# These are environment-specific defaults
forks=10
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
ssh_args= -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
scp_if_ssh=True
......@@ -3,6 +3,6 @@ regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=/tmp
cache_path=ec2_cache/prod
cache_max_age=300
route53=False
[ec2]
regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=ec2_cache/stage
cache_max_age=300
route53=False
......@@ -14,9 +14,8 @@
EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master'
# These should stay false for the public AMI
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
roles:
- role: nginx
nginx_sites:
......@@ -41,8 +40,8 @@
- certs
- edx_ansible
- role: datadog
when: ENABLE_DATADOG
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
when: COMMON_ENABLE_NEWRELIC
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
wait_timeout:
description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
try:
import boto
import boto.ec2
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def get_aws_connection_info(module):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
elif 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
if not access_key:
if 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
elif 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
elif 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
elif 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
else:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# profile_name only works as a key in boto >= 2.24
# so only set profile_name if passed as an argument
if profile_name:
if not boto_supports_profile_name():
module.fail_json("boto does not support profile_name before 2.24")
boto_params['profile_name'] = profile_name
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
boto_params['validate_certs'] = validate_certs
return region, ec2_url, boto_params
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# The instance isn't registered with this ELB so just
# return unchanged
return
lb.deregister_instances([self.instance_id])
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
# already OutOfService is being deregistered.
self.changed = True
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state agains the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availabilty zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg="{} {} {}".format(e, self.region, self.aws_connect_params))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs,
region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
---
# Build a kibana/logstash/elasticsearch server for capturing and
# analyzing logs.
- name: Configure syslog server
hosts: all
sudo: yes
roles:
- common
- oraclejdk
- elasticsearch
- logstash
- kibana
- role: nginx
nginx_sites:
- kibana
---
AIDE_REPORT_EMAIL: 'root'
---
# install and configure aide IDS
#
- name: install aide
apt: pkg="aide" state="present"
- name: configure aide defaults
template: >
src=etc/default/aide.j2 dest=/etc/default/aide
owner=root group=root mode=0644
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
sudo: yes
# These settings are mainly for the wrapper scripts around aide,
# such as aideinit and /etc/cron.daily/aide
# This is used as the host name in the AIDE reports that are sent out
# via e-mail. It defaults to the output of $(hostname --fqdn), but can
# be set to arbitrary values.
# FQDN=
# This is used as the subject for the e-mail reports.
# If your mail system only threads by subject, you might want to add
# some variable content here (for example $(date +%Y-%m-%d)).
MAILSUBJ="Daily AIDE report for $FQDN"
# This is the email address reports get mailed to
# default is root
# This variable is expanded before it is used, so you can use variables
# here. For example, MAILTO=$FQDN-aide@domain.example will send the
# report to host.name.example-aide@domain.example is the local FQDN is
# host.name.example.
MAILTO={{ AIDE_REPORT_EMAIL }}
# Set this to yes to suppress mailings when no changes have been
# detected during the AIDE run and no error output was given.
#QUIETREPORTS=no
# This parameter defines which AIDE command to run from the cron script.
# Sensible values are "update" and "check".
# Default is "check", ensuring backwards compatibility.
# Since "update" does not take any longer, it is recommended to use "update",
# so that a new database is created every day. The new database needs to be
# manually copied over the current one, though.
COMMAND=update
# This parameter defines what to do with a new database created by
# COMMAND=update. It is ignored if COMMAND!=update.
# no: Do not copy new database to old database. This is the default.
# yes: Copy new database to old database. This means that changes to the
# file system are only reported once. Possibly dangerous.
# ifnochange: Copy new database to old database if no changes have
# been reported. This is needed for ANF/ARF to work reliably.
COPYNEWDB=no
# Set this to yes to truncate the detailed changes part in the mail. The full
# output will still be listed in the log file.
TRUNCATEDETAILS=yes
# Set this to yes to suppress file changes by package and security
# updates from appearing in the e-mail report. Filtered file changes will
# still be listed in the log file. This option parses the /var/log/dpkg.log
# file and implies TRUNCATEDETAILS=yes
FILTERUPDATES=yes
# Set this to yes to suppress file changes by package installations
# from appearing in the e-mail report. Filtered file changes will still
# be listed in the log file. This option parses the /var/log/dpkg.log file and
# implies TRUNCATEDETAILS=yes.
FILTERINSTALLATIONS=yes
# This parameter defines how many lines to return per e-mail. Output longer
# than this value will be truncated in the e-mail sent out.
# Set value to "0" to disable this option.
LINES=1000
# This parameter gives a grep regular expression. If given, all output lines
# that _don't_ match the regexp are listed first in the script's output. This
# allows to easily remove noise from the AIDE report.
NOISE=""
# This parameter defines which options are given to aide in the daily
# cron job. The default is "-V4".
AIDEARGS=""
# These parameters control update-aide.conf and give the defaults for
# the --confdir, --confd and --settingsd options
# UPAC_CONFDIR="/etc/aide"
# UPAC_CONFD="$UPAC_CONFDIR/aide.conf.d"
# UPAC_SETTINGSD="$UPAC_CONFDIR/aide.settings.d"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role alton
#
#
# vars are namespace with the module name.
#
ALTON_USERNAME: '1234_1234@chat.hipchat.com'
ALTON_PASSWORD: 'password'
ALTON_V2_TOKEN: 'HIPCHAT_V2_TOKEN'
ALTON_ROOMS: 'Hammer'
ALTON_NAME: 'Alton W. Daemon'
ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@redis.url:port'
ALTON_HTTPSERVER_PORT: '8081'
alton_role_name: alton
alton_user: alton
alton_app_dir: "{{ COMMON_APP_DIR }}/alton"
alton_code_dir: "{{ alton_app_dir }}/alton"
alton_venvs_dir: "{{ alton_app_dir }}/venvs"
alton_venv_dir: "{{ alton_venvs_dir }}/alton"
alton_venv_bin: "{{ alton_venv_dir }}/bin"
alton_source_repo: "https://github.com/edx/alton.git"
alton_version: "HEAD"
alton_requirements_file: "{{ alton_code_dir }}/requirements.txt"
alton_supervisor_wrapper: "{{ alton_app_dir }}/alton-supervisor.sh"
alton_environment:
WILL_USERNAME: "{{ ALTON_USERNAME }}"
WILL_PASSWORD: "{{ ALTON_PASSWORD }}"
WILL_V2_TOKEN: "{{ ALTON_V2_TOKEN }}"
WILL_ROOMS: "{{ ALTON_ROOMS }}"
WILL_NAME: "{{ ALTON_NAME }}"
WILL_HANDLE: "{{ ALTON_HANDLE }}"
WILL_REDIS_URL: "{{ ALTON_REDIS_URL }}"
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
#
# OS packages
#
alton_debian_pkgs: []
alton_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role alton
#
# Overview:
#
#
- name: restart alton
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role alton
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- supervisor
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
- name: install the requirements
pip: >
requirements="{{ alton_requirements_file }}"
virtualenv="{{ alton_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ alton_user }}"
- name: create the supervisor wrapper
template: >
src="{{ alton_supervisor_wrapper|basename }}.j2"
dest="{{ alton_supervisor_wrapper }}"
mode=0755
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create a supervisor config
template: >
src=alton.conf.j2 dest="{{ supervisor_available_dir }}/alton.conf"
owner="{{ supervisor_user }}"
group="{{ supervisor_user }}"
sudo_user: "{{ supervisor_user }}"
notify: restart alton
- name: enable the supervisor config
file: >
src="{{ supervisor_available_dir }}/alton.conf"
dest="{{ supervisor_cfg_dir }}/alton.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
notify: restart alton
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure alton is started
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role alton
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create application user
user: >
name="{{ alton_user }}" home="{{ alton_app_dir }}"
createhome=no shell=/bin/false
- name: create alton user dirs
file: >
path="{{ item }}" state=directory
owner="{{ alton_user }}" group="{{ common_web_group }}"
with_items:
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
#!/bin/bash
source {{ alton_app_dir }}/alton_env
cd {{ alton_code_dir }}
{{ alton_venv_bin }}/python run_alton.py
[program:alton]
command={{ alton_supervisor_wrapper }}
priority=999
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
stopsignal=QUIT
# {{ ansible_managed }}
{% for name,value in alton_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
......@@ -56,6 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
- boto==2.20.1
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
......
......@@ -47,7 +47,7 @@
- db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh
when: COMMON_MYSQL_READ_ONLY_PASS
when: COMMON_MYSQL_READ_ONLY_PASS is defined
# These templates rely on there being a global
# read_only mongo user, you must override the default
......@@ -67,4 +67,4 @@
db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh
when: COMMON_MONGO_READ_ONLY_PASS
when: COMMON_MONGO_READ_ONLY_PASS is defined
......@@ -60,7 +60,9 @@
notify: restart certs
- name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
pip: >
requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ certs_user }}"
notify: restart certs
......
......@@ -18,6 +18,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
# do not include http/https
COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname
......@@ -46,7 +47,11 @@ COMMON_MYSQL_MIGRATE_PASS: !!null
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
COMMON_NEWRELIC_LICENSE: "YOUR_NEWRELIC_LICENSE"
common_debian_pkgs:
- ntp
- ack-grep
......@@ -67,6 +72,7 @@ common_debian_pkgs:
common_pip_pkgs:
- pip==1.5.4
- setuptools==3.6
- virtualenv==1.11.4
- virtualenvwrapper
......
......@@ -109,7 +109,7 @@
path={{ item }}
with_items:
- "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo"
- "/usr/share/landscape/landscape-sysinfo.wrapper"
- "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade"
......
---
DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_debian_pkgs:
......
......@@ -45,7 +45,7 @@
lineinfile: >
dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*"
line="api_key:{{ DATADOG_API_KEY }}"
line="api_key:{{ COMMON_DATADOG_API_KEY }}"
notify:
- restart the datadog service
tags:
......
......@@ -88,6 +88,8 @@ EDXAPP_BOOK_URL: ''
# if xqueue is run on the same server
# as the lms (it's sent in the request)
EDXAPP_SITE_NAME: 'localhost'
EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}"
EDXAPP_CMS_SITE_NAME: 'localhost'
EDXAPP_MEDIA_URL: ''
EDXAPP_ANALYTICS_SERVER_URL: ''
EDXAPP_FEEDBACK_SUBMISSION_EMAIL: ''
......@@ -106,6 +108,7 @@ EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_CMS_SSL_NGINX_PORT: 48010
EDXAPP_LANG: 'en_US.UTF-8'
EDXAPP_LANGUAGE_CODE : 'en'
EDXAPP_TIME_ZONE: 'America/New_York'
EDXAPP_TECH_SUPPORT_EMAIL: 'technical@example.com'
......@@ -146,7 +149,7 @@ EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$']
EDXAPP_NO_PREREQ_INSTALL: 1
# whether to setup the python codejail or not
EDXAPP_PYTHON_SANDBOX: false
EDXAPP_PYTHON_SANDBOX: true
# this next setting, if true, turns on actual sandbox enforcement. If not true,
# it puts the sandbox in 'complain' mode, for reporting but not enforcement
EDXAPP_SANDBOX_ENFORCE: true
......@@ -184,6 +187,33 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: false
# set to true
EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false
EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "UA-DUMMY"
EDXAPP_PEARSON_TEST_PASSWORD: ""
EDXAPP_SEGMENT_IO_LMS_KEY: ""
EDXAPP_EDX_API_KEY: ""
# This is the default set in common.py
EDXAPP_VERIFY_STUDENT:
DAYS_GOOD_FOR: 365
EDXAPP_GOOGLE_ANALYTICS_LINKEDIN: ""
EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS: {}
EDXAPP_BULK_EMAIL_EMAILS_PER_TASK: 500
# If using microsites this should point to the microsite repo
EDXAPP_MICROSITE_ROOT_DIR: "{{ edxapp_app_dir }}/edx-microsite"
# this dictionary defines what microsites are configured
EDXAPP_MICROSITE_CONFIGRATION: {}
# Instructor code that will not be run in the code sandbox
EDXAPP_COURSES_WITH_UNSAFE_CODE: []
EDXAPP_SESSION_COOKIE_DOMAIN: ""
# XML Course related flags
EDXAPP_XML_FROM_GIT: false
EDXAPP_XML_S3_BUCKET: !!null
EDXAPP_XML_S3_KEY: !!null
EDXAPP_NEWRELIC_LMS_APPNAME: "edX-LMS"
EDXAPP_NEWRELIC_CMS_APPNAME: "edX-CMS"
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -204,6 +234,7 @@ edxapp_gem_bin: "{{ edxapp_gem_root }}/bin"
edxapp_user: edxapp
edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_rbenv_bin }}:{{ edxapp_rbenv_shims }}:{{ edxapp_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_course_static_dir: "{{ edxapp_data_dir }}/course_static"
edxapp_course_data_dir: "{{ edxapp_data_dir }}/data"
edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads"
edxapp_theme_dir: "{{ edxapp_data_dir }}/themes"
......@@ -246,6 +277,7 @@ edxapp_chksum_req_files:
- "{{ pre_requirements_file }}"
- "{{ post_requirements_file }}"
- "{{ base_requirements_file }}"
- "{{ custom_requirements_file }}"
- "{{ paver_requirements_file }}"
- "{{ sandbox_post_requirements }}"
- "{{ sandbox_base_requirements }}"
......@@ -256,7 +288,6 @@ edxapp_all_req_files:
- "{{ post_requirements_file }}"
- "{{ base_requirements_file }}"
- "{{ paver_requirements_file }}"
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}"
- "{{ sandbox_post_requirements }}"
- "{{ sandbox_local_requirements }}"
......@@ -303,6 +334,7 @@ edxapp_generic_auth_config: &edxapp_generic_auth
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
user: $EDXAPP_MONGO_USER
ADDITIONAL_OPTIONS: $EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
MODULESTORE:
default: &edxapp_generic_default_modulestore
......@@ -343,8 +375,13 @@ edxapp_generic_auth_config: &edxapp_generic_auth
ZENDESK_API_KEY: $EDXAPP_ZENDESK_API_KEY
CELERY_BROKER_USER: $EDXAPP_CELERY_USER
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
GOOGLE_ANALYTICS_ACCOUNT: $EDXAPP_GOOGLE_ANALYTICS_ACCOUNT
generic_env_config: &edxapp_generic_env
COURSES_WITH_UNSAFE_CODE: $EDXAPP_COURSES_WITH_UNSAFE_CODE
BULK_EMAIL_EMAILS_PER_TASK: $EDXAPP_BULK_EMAIL_EMAILS_PER_TASK
MICROSITE_ROOT_DIR: $EDXAPP_MICROSITE_ROOT_DIR
MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGRATION
GRADES_DOWNLOAD:
STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE
BUCKET: $EDXAPP_GRADE_BUCKET
......@@ -362,12 +399,12 @@ generic_env_config: &edxapp_generic_env
FEATURES: $EDXAPP_FEATURES
WIKI_ENABLED: true
SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
SITE_NAME: $EDXAPP_SITE_NAME
LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/edx"
MEDIA_URL: $EDXAPP_MEDIA_URL
ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
TIME_ZONE: $EDXAPP_TIME_ZONE
LANGUAGE_CODE : $EDXAPP_LANGUAGE_CODE
MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP
MKTG_URLS: $EDXAPP_MKTG_URLS
# repo root for courses
......@@ -384,27 +421,25 @@ generic_env_config: &edxapp_generic_env
mongo_metadata_inheritance:
<<: *default_generic_cache
KEY_PREFIX: 'integration_mongo_metadata_inheritance'
TIMEOUT: 300
staticfiles:
<<: *default_generic_cache
KEY_PREFIX: 'integration_static_files'
celery:
<<: *default_generic_cache
KEY_PREFIX: 'integration_celery'
TIMEOUT: "7200"
CELERY_BROKER_TRANSPORT: 'amqp'
CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME
COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL
LOGGING_ENV: $EDXAPP_LOGGING_ENV
SESSION_COOKIE_DOMAIN: !!null
SESSION_COOKIE_DOMAIN: $EDXAPP_SESSION_COOKIE_DOMAIN
COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY
SEGMENT_IO_LMS: true
THEME_NAME: $edxapp_theme_name
TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL
CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL
BUGS_EMAIL: $EDXAPP_BUGS_EMAIL
CODE_JAIL:
limits:
VMEM: 0
REALTIME: 3
DEFAULT_FROM_EMAIL: $EDXAPP_DEFAULT_FROM_EMAIL
DEFAULT_FEEDBACK_EMAIL: $EDXAPP_DEFAULT_FEEDBACK_EMAIL
SERVER_EMAIL: $EDXAPP_DEFAULT_SERVER_EMAIL
......@@ -412,12 +447,19 @@ generic_env_config: &edxapp_generic_env
CAS_SERVER_URL: $EDXAPP_CAS_SERVER_URL
CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS:
'preview\.': 'draft'
lms_auth_config:
<<: *edxapp_generic_auth
PEARSON_TEST_PASSWORD: $EDXAPP_PEARSON_TEST_PASSWORD
SEGMENT_IO_LMS_KEY: $EDXAPP_SEGMENT_IO_LMS_KEY
EDX_API_KEY: $EDXAPP_EDX_API_KEY
VERIFY_STUDENT: $EDXAPP_VERIFY_STUDENT
GOOGLE_ANALYTICS_LINKEDIN: $EDXAPP_GOOGLE_ANALYTICS_LINKEDIN
CC_PROCESSOR: $EDXAPP_CC_PROCESSOR
MODULESTORE:
default:
default: &lms_default_modulestore
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
OPTIONS:
mappings: $EDXAPP_XML_MAPPINGS
......@@ -440,30 +482,31 @@ lms_auth_config:
fs_root: $edxapp_course_data_dir
ENGINE: 'xmodule.modulestore.mongo.MongoModuleStore'
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
draft:
<<: *edxapp_generic_default_modulestore
ENGINE: 'xmodule.modulestore.mongo.DraftMongoModuleStore'
lms_env_config:
<<: *edxapp_generic_env
PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY
'CODE_JAIL':
SITE_NAME: $EDXAPP_LMS_SITE_NAME
CODE_JAIL:
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
'limits':
'VMEM': 0
'REALTIME': 5
'user': '{{ edxapp_sandbox_user }}'
python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
limits:
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# Time in seconds that the jailed process has to run.
REALTIME: 1
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
user: '{{ edxapp_sandbox_user }}'
cms_auth_config:
<<: *edxapp_generic_auth
cms_env_config:
<<: *edxapp_generic_env
lms_preview_auth_config:
<<: *edxapp_generic_auth
MODULESTORE:
default: *edxapp_generic_default_modulestore
lms_preview_env_config:
<<: *edxapp_generic_env
SITE_NAME: $EDXAPP_CMS_SITE_NAME
# install dir for the edx-platform repo
edxapp_code_dir: "{{ edxapp_app_dir }}/edx-platform"
......@@ -474,9 +517,6 @@ edxapp_cms_gunicorn_port: 8010
edxapp_cms_gunicorn_host: 127.0.0.1
edxapp_lms_gunicorn_port: 8000
edxapp_lms_gunicorn_host: 127.0.0.1
edxapp_lms_preview_gunicorn_port: 8020
edxapp_lms_preview_gunicorn_host: 127.0.0.1
# These vars are for creating the application json config
# files. There are two for each service that uses the
......@@ -494,7 +534,6 @@ edxapp_cms_env: 'cms.envs.aws'
#Number of gunicorn worker processes to spawn, as a multiplier to number of virtual cores
worker_core_mult:
lms: 4
lms_preview: 2
cms: 2
# Theming
......@@ -506,7 +545,7 @@ worker_core_mult:
edxapp_use_custom_theme: false
edxapp_theme_name: ""
edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git'
edxapp_theme_version: 'HEAD'
edxapp_theme_version: 'master'
# make this the public URL instead of writable
edx_platform_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-platform.git"
......@@ -517,9 +556,9 @@ local_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/local.txt"
pre_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/pre.txt"
post_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/post.txt"
base_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/base.txt"
custom_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/custom.txt"
paver_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/paver.txt"
github_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/github.txt"
repo_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/repo.txt"
private_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/edx-private.txt"
sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base.txt"
......
......@@ -6,6 +6,7 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi
- nltk
- role: user
user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}"
......
......@@ -108,7 +108,7 @@
# Set the npm registry
- name: Set the npm registry
shell:
npm config set registry 'http://registry.npmjs.org'
npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
......@@ -192,7 +192,6 @@
{{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }}
with_items:
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}"
- "{{ local_requirements_file }}"
sudo_user: "{{ edxapp_user }}"
......@@ -219,7 +218,6 @@
- "restart edxapp"
- "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module
......@@ -250,6 +248,33 @@
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
......@@ -334,6 +359,9 @@
# service variants configured, runs
# gather_assets and db migrations
- include: service_variant_config.yml
tags:
- service_variant_config
- deploy
# call supervisorctl update. this reloads
# the supervisorctl config and restarts
......
......@@ -25,6 +25,7 @@
- "{{ edxapp_venvs_dir }}"
- "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- name: create edxapp log dir
file: >
......
- name: clone the xml course repo
git: >
repo="{{ item.repo_url }}"
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
version="{{ item.version }}"
accept_hostkey=True
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
with_items: EDXAPP_XML_COURSES
- name: update course.xml
template: >
src=course.xml.j2
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/course.xml"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
- name: make symlinks for the static data
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: make symlinks so code works
file: >
src="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
dest="{{ edxapp_course_data_dir }}/{{ item.course }}"
state=link
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: import courses with nostatic flag
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import --nostatic {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "no static import"
- name: import courses including static data
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete courses that were fully imported
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode=put
overwrite=True
src="/tmp/static_course_content.tar.gz"
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
- name: remove archive from disk
file: path="/tmp/static_course_content.tar.gz" state=absent
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
[program:cms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ COMMON_NEWRELIC_LICENSE }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
#include <tunables/global>
{{ edxapp_sandbox_venv_dir }}/bin/python flags=(complain) {
{{ edxapp_sandbox_venv_dir }}/bin/python {
#include <abstractions/base>
{{ edxapp_sandbox_venv_dir }}/** mr,
{{ edxapp_code_dir }}/common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix,
/tmp/codejail-*/** rix,
/tmp/codejail-*/** wrix,
#
# Whitelist particiclar shared objects from the system
......@@ -19,6 +19,7 @@
/usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
#
# Allow access to selections from /proc
......
<course org="{{ item.org }}" course="{{ item.course }}" url_name="{{ item.run }}" />
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{{ lms_preview_auth_config | to_nice_json }}
[program:lms-preview]
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms-preview"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
{% do lms_preview_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_preview_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ lms_preview_env_config | to_nice_json }}
[program:lms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ COMMON_NEWRELIC_LICENSE }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
---
# By default, point to the RabbitMQ broker running locally
FLOWER_BROKER_USERNAME: "celery"
FLOWER_BROKER_PASSWORD: "celery"
FLOWER_BROKER_HOST: "127.0.0.1"
FLOWER_BROKER_PORT: 5672
FLOWER_ADDRESS: "0.0.0.0"
FLOWER_PORT: "5555"
flower_user: "flower"
flower_app_dir: "{{ COMMON_APP_DIR }}/flower"
flower_data_dir: "{{ COMMON_DATA_DIR }}/flower"
flower_log_dir: "{{ COMMON_LOG_DIR }}/flower"
flower_venv_dir: "{{ flower_app_dir }}/venvs/flower"
flower_venv_bin: "{{ flower_venv_dir }}/bin"
flower_python_reqs:
- "flower==0.7.0"
flower_deploy_path: "{{ flower_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/bin:/sbin:/bin"
flower_broker: "amqp://{{ FLOWER_BROKER_USERNAME }}:{{ FLOWER_BROKER_PASSWORD }}@{{ FLOWER_BROKER_HOST }}:{{ FLOWER_BROKER_PORT }}"
flower_environment:
PATH: $flower_deploy_path
---
- name: restart flower
supervisorctl_local: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="flower"
sudo_user: "{{supervisor_service_user }}"
---
dependencies:
- supervisor
- devpi
---
- name: create application user
user: >
name="{{ flower_user }}" home="{{ flower_app_dir }}"
createhome=no shell=/bin/false
notify:
- "restart flower"
- name: create flower user dirs
file: >
path="{{ item }}" state=directory
owner="{{ flower_user }}" group="{{ common_web_group }}"
notify:
- "restart flower"
with_items:
- "{{ flower_app_dir }}"
- "{{ flower_data_dir }}"
- "{{ flower_venv_dir }}"
- "{{ flower_log_dir }}"
- name: create flower environment script
template: >
src=flower_env.j2 dest={{ flower_app_dir }}/flower_env
owner={{ flower_user }} group={{ common_web_group }}
mode=0644
notify:
- "restart flower"
- name: create virtualenv and install Python requirements
pip: >
name="{{ item }}"
virtualenv="{{ flower_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ flower_user }}"
environment: "{{ flower_environment }}"
with_items: "flower_python_reqs"
notify:
- "restart flower"
- name: create supervisor configuration
template: >
src=flower.conf.j2 dest={{ supervisor_available_dir }}/flower.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
notify:
- "restart flower"
- name: enable supervisor configuration
file: >
src={{ supervisor_available_dir }}/flower.conf
dest={{ supervisor_cfg_dir }}/flower.conf
state=link
force=yes
sudo_user: "{{ supervisor_user }}"
notify:
- "restart flower"
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
notify:
- "restart flower"
[program:flower]
environment=PATH="{{ flower_deploy_path }}"
user={{ common_web_user }}
command={{ flower_venv_bin }}/celery flower --broker {{ flower_broker }} --address={{ FLOWER_ADDRESS }} --port={{ FLOWER_PORT }}
stdout_logfile={{ supervisor_log_dir }}/flower-stdout.log
stderr_logfile={{ supervisor_log_dir }}/flower-stderr.log
# {{ ansible_managed }}
{% for name,value in flower_environment.items() %}
{%- if value %}
export {{ name }}="{{ value }}"
{%- endif %}
{% endfor %}
......@@ -26,8 +26,12 @@ FORUM_API_KEY: "password"
FORUM_ELASTICSEARCH_HOST: "localhost"
FORUM_ELASTICSEARCH_PORT: "9200"
FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}"
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
FORUM_LISTEN_PORT: "4567"
......@@ -44,6 +48,7 @@ forum_environment:
SEARCH_SERVER: "{{ FORUM_ELASTICSEARCH_URL }}"
MONGOHQ_URL: "{{ FORUM_MONGO_URL }}"
HOME: "{{ forum_app_dir }}"
NEW_RELIC_ENABLE: "{{ FORUM_NEW_RELIC_ENABLE }}"
NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}"
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
......
......@@ -133,5 +133,9 @@
mode=0440 validate='visudo -cf %s'
- name: install global gem dependencies
gem: name={{ item.name }} state=present version={{ item.version }}
gem: >
name={{ item.name }}
state=present
version={{ item.version }}
user_install=no
with_items: jenkins_admin_gem_pkgs
......@@ -44,6 +44,7 @@ jenkins_plugins:
- { name: "violations", version: "0.7.11" }
- { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"}
jenkins_bundled_plugins:
- "credentials"
......
......@@ -126,4 +126,4 @@ jenkins_wheels:
- { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "MySQL-python==1.2.4", wheel: "MySQL_python-1.2.4-cp27-none-linux_x86_64.whl" }
- { pkg: "MySQL-python==1.2.5", wheel: "MySQL_python-1.2.5-cp27-none-linux_x86_64.whl" }
......@@ -10,4 +10,5 @@
- include: system.yml
- include: python.yml
- include: ruby.yml
- include: jscover.yml
......@@ -44,3 +44,27 @@
template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# Run the wheel_venv.sh script for the first time
# This was previously done in the Jenkins global
# configuration as part of the AMI Init script.
# Moving here so that we can archive a clean snapshot
# of the virtualenv with only the defined packages
# from jenkins_wheels.
- name: Run the wheel_venv.sh script
command: >
./wheel_venv.sh edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv
sudo_user: "{{ jenkins_user }}"
# Archive the current state of the virtualenv
# as a starting point for new builds.
# The edx-venv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean virtualenv archive
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
---
# Archive the current state of the rbenv
# as a starting point for new builds.
# The edx-rbenv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean rbenv archive
command: >
tar -cpzf edx-rbenv_clean.tar.gz .rbenv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-rbenv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
---
KIBANA_SERVER_NAME: "192.168.33.10"
KIBANA_NGINX_PORT: 80
KIBANA_SSL_NGINX_PORT: 443
kibana_app_dir: /edx/app/kibana
kibana_file: kibana-3.0.0.tar.gz
kibana_url: "https://download.elasticsearch.org/kibana/kibana/{{ kibana_file }}"
{
"title": "edX Log Analysis",
"services": {
"query": {
"idQueue": [],
"list": {
"0": {
"query": "@message: WARNING",
"alias": "",
"color": "#EAB839",
"id": 0,
"pin": false,
"type": "lucene",
"enable": true
},
"1": {
"id": 1,
"color": "#7EB26D",
"query": "@message: INFO",
"alias": "",
"pin": false,
"type": "lucene",
"enable": true
},
"2": {
"id": 2,
"color": "#BF1B00",
"query": "@message: ERROR",
"alias": "",
"pin": false,
"type": "lucene",
"enable": true
},
"3": {
"id": 3,
"color": "#F9D9F9",
"query": "*",
"alias": "",
"pin": false,
"type": "lucene",
"enable": true
}
},
"ids": [
0,
1,
2,
3
]
},
"filter": {
"idQueue": [
1,
2,
3
],
"list": {
"0": {
"type": "time",
"field": "@timestamp",
"from": "now-1h",
"to": "now",
"mandate": "must",
"active": true,
"alias": "",
"id": 0
},
"1": {
"type": "querystring",
"query": "*pika*",
"mandate": "mustNot",
"active": true,
"alias": "",
"id": 1
},
"2": {
"type": "querystring",
"query": "*connectionpool*",
"mandate": "mustNot",
"active": true,
"alias": "",
"id": 3
}
},
"ids": [
0,
1,
2
]
}
},
"rows": [
{
"title": "Graph",
"height": "350px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"span": 12,
"editable": true,
"group": [
"default"
],
"type": "histogram",
"mode": "count",
"time_field": "@timestamp",
"value_field": null,
"auto_int": true,
"resolution": 100,
"interval": "30s",
"fill": 3,
"linewidth": 3,
"timezone": "browser",
"spyable": true,
"zoomlinks": true,
"bars": false,
"stack": true,
"points": false,
"lines": true,
"legend": true,
"x-axis": true,
"y-axis": true,
"percentage": false,
"interactive": true,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"title": "Events over time",
"intervals": [
"auto",
"1s",
"1m",
"5m",
"10m",
"30m",
"1h",
"3h",
"12h",
"1d",
"1w",
"1M",
"1y"
],
"options": true,
"tooltip": {
"value_type": "cumulative",
"query_as_alias": true
},
"scale": 1,
"y_format": "none",
"grid": {
"max": null,
"min": 0
},
"annotate": {
"enable": false,
"query": "*",
"size": 20,
"field": "_type",
"sort": [
"_score",
"desc"
]
},
"pointradius": 5,
"show_query": true,
"legend_counts": true,
"zerofill": true,
"derivative": false
}
],
"notice": false
},
{
"title": "Charts",
"height": "250px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"span": 4,
"editable": true,
"type": "hits",
"loadingEditor": false,
"query": {
"field": "syslog_severity",
"goal": 100
},
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"size": 10,
"exclude": [],
"donut": true,
"tilt": true,
"legend": "above",
"labels": true,
"mode": "terms",
"default_field": "DEFAULT",
"spyable": true,
"title": "Log Severity",
"style": {
"font-size": "10pt"
},
"arrangement": "horizontal",
"chart": "pie",
"counter_pos": "above"
},
{
"span": 4,
"editable": true,
"type": "hits",
"loadingEditor": false,
"query": {
"field": "@source_host",
"goal": 100
},
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"size": 10,
"exclude": [],
"donut": true,
"tilt": true,
"legend": "above",
"labels": true,
"mode": "terms",
"default_field": "DEFAULT",
"spyable": true,
"title": "Logs by Host",
"style": {
"font-size": "10pt"
},
"arrangement": "horizontal",
"chart": "pie",
"counter_pos": "above"
},
{
"span": 4,
"editable": true,
"type": "hits",
"loadingEditor": false,
"style": {
"font-size": "10pt"
},
"arrangement": "horizontal",
"chart": "pie",
"counter_pos": "above",
"donut": true,
"tilt": true,
"labels": true,
"spyable": true,
"queries": {
"mode": "selected",
"ids": [
0,
1,
2
]
},
"title": "Percent by Python Severity"
}
],
"notice": false
},
{
"title": "Trends",
"height": "50px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"span": 4,
"editable": true,
"type": "trends",
"loadingEditor": false,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"style": {
"font-size": "14pt"
},
"ago": "1h",
"arrangement": "vertical",
"spyable": true,
"title": "Hourly"
},
{
"span": 4,
"editable": true,
"type": "trends",
"loadingEditor": false,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"style": {
"font-size": "14pt"
},
"ago": "1d",
"arrangement": "vertical",
"spyable": true,
"title": "Daily"
},
{
"span": 4,
"editable": true,
"type": "trends",
"loadingEditor": false,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"style": {
"font-size": "14pt"
},
"ago": "1w",
"arrangement": "vertical",
"spyable": true,
"title": "Weekly"
}
],
"notice": false
},
{
"title": "Error Events",
"height": "550px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"error": false,
"span": 12,
"editable": true,
"type": "table",
"loadingEditor": false,
"status": "Stable",
"queries": {
"mode": "selected",
"ids": [
2
]
},
"size": 100,
"pages": 5,
"offset": 0,
"sort": [
"@timestamp",
"desc"
],
"group": "default",
"style": {
"font-size": "9pt"
},
"overflow": "min-height",
"fields": [
"@timestamp",
"@source_host",
"message"
],
"highlight": [],
"sortable": true,
"header": true,
"paging": true,
"field_list": true,
"all_fields": false,
"trimFactor": 300,
"normTimes": true,
"spyable": true,
"title": "Errors",
"localTime": false,
"timeField": "@timestamp"
}
],
"notice": false
},
{
"title": "Events",
"height": "350px",
"editable": true,
"collapse": false,
"collapsable": true,
"panels": [
{
"title": "All events",
"error": false,
"span": 12,
"editable": true,
"group": [
"default"
],
"type": "table",
"size": 100,
"pages": 5,
"offset": 0,
"sort": [
"@timestamp",
"desc"
],
"style": {
"font-size": "9pt"
},
"overflow": "min-height",
"fields": [
"@source_host",
"message"
],
"highlight": [],
"sortable": true,
"header": true,
"paging": true,
"spyable": true,
"queries": {
"mode": "all",
"ids": [
0,
1,
2,
3
]
},
"field_list": true,
"status": "Stable",
"trimFactor": 300,
"normTimes": true,
"all_fields": false,
"localTime": false,
"timeField": "@timestamp"
}
],
"notice": false
}
],
"editable": true,
"failover": false,
"index": {
"interval": "day",
"pattern": "[logstash-]YYYY.MM.DD",
"default": "NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED",
"warm_fields": true
},
"style": "dark",
"panel_hints": true,
"pulldowns": [
{
"type": "query",
"collapse": false,
"notice": false,
"query": "*",
"pinned": true,
"history": [
"*",
"@message: ERROR",
"@message: INFO",
"@message: WARNING",
"@message: WARN",
"*corresponding*",
"@message: INFO OR syslog_severity: info",
"@message: INFO OR @log_severity: info",
"ERROR",
"WARNING"
],
"remember": 10,
"enable": true
},
{
"type": "filtering",
"collapse": true,
"notice": false,
"enable": true
}
],
"nav": [
{
"type": "timepicker",
"collapse": false,
"notice": false,
"status": "Stable",
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
],
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"timefield": "@timestamp",
"now": true,
"filter_id": 0,
"enable": true
}
],
"loader": {
"save_gist": false,
"save_elasticsearch": true,
"save_local": true,
"save_default": true,
"save_temp": true,
"save_temp_ttl_enable": true,
"save_temp_ttl": "30d",
"load_gist": true,
"load_elasticsearch": true,
"load_elasticsearch_size": 20,
"load_local": true,
"hide": false
},
"refresh": "1m"
}
\ No newline at end of file
---
- name: restart nginx
service: name=nginx state=restarted
- name: reload nginx
service: name=nginx state=reloaded
---
dependencies:
- common
- nginx
# requires:
# - oraclejdk
# - elasticsearch
# - nginx
---
- name: Ensure app apt dependencies are installed
apt: pkg={{ item }} state=installed
with_items:
- python-software-properties
- git
- nginx
- name: Ensure {{ kibana_app_dir }} exists
file: path={{ kibana_app_dir }} state=directory owner=root group=root mode=0755
- name: Ensure subdirectories exist
file: path={{ kibana_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
with_items:
- htdocs
- share
- name: ensure we have the specified kibana release
get_url: url={{ kibana_url }} dest={{ kibana_app_dir }}/share/{{ kibana_file }}
- name: extract
shell: >
chdir={{ kibana_app_dir }}/share
tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}
creates={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
- name: install
shell: >
chdir={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
cp -R * {{ kibana_app_dir }}/htdocs/
- name: copy config
template: src=config.js.j2 dest={{ kibana_app_dir }}/htdocs/config.js
/**
* These is the app's configuration, If you need to configure
* the default dashboard, please see dashboards/default
*/
define(['settings'],
function (Settings) {
return new Settings({
/**
* URL to your elasticsearch server. You almost certainly don't
* want 'http://localhost:9200' here. Even if Kibana and ES are on
* the same host
*
* By default this will attempt to reach ES at the same host you have
* elasticsearch installed on. You probably want to set it to the FQDN of your
* elasticsearch host
* @type {String}
*/
//elasticsearch: "http://"+window.location.hostname+":9200",
{% if NGINX_ENABLE_SSL %}
elasticsearch: "https://{{ KIBANA_SERVER_NAME }}/e",
{% else %}
elasticsearch: "http://{{ KIBANA_SERVER_NAME }}/e",
{% endif %}
/**
* The default ES index to use for storing Kibana specific object
* such as stored dashboards
* @type {String}
*/
kibana_index: "kibana-int",
/**
* Panel modules available. Panels will only be loaded when they are defined in the
* dashboard, but this list is used in the "add panel" interface.
* @type {Array}
*/
panel_names: [
'histogram',
'map',
'table',
'filtering',
'timepicker',
'text',
'hits',
'column',
'trends',
'bettermap',
'query',
'terms',
'stats',
'sparklines',
'goal',
]
});
});
......@@ -16,7 +16,7 @@
- name: set forum rbenv and gem permissions
file:
path={{ item }} state=directory mode=770
path={{ item }} state=directory recurse=yes mode=770
with_items:
- "{{ forum_app_dir }}/.gem"
- "{{ forum_app_dir }}/.rbenv"
......
---
LOGSTASH_DAYS_TO_KEEP: 30
LOGSTASH_ROTATE: true
logstash_app_dir: /edx/app/logstash
logstash_log_dir: /edx/var/log/logstash
logstash_data_dir: /edx/var/logstash/file_logs
logstash_syslog_port: 514
logstash_file: logstash-1.3.3-flatjar.jar
logstash_url: "https://download.elasticsearch.org/logstash/logstash/{{ logstash_file }}"
logstash_python_requirements:
- pyes==0.19.0
logstash_scripts_repo: https://github.com/crashdump/logstash-elasticsearch-scripts
logstash_rotate_cron:
hour: 5
minute: 42
logstash_optimize_cron:
hour: 6
minute: 15
{
"template": "logstash-*",
"settings" : {
"number_of_shards" : 1,
"number_of_replicas" : 0,
"index" : {
"query" : { "default_field" : "message" },
"store" : { "compress" : { "stored" : true, "tv": true } }
}
},
"mappings": {
"_default_": {
"_all": { "enabled": false },
"_source": { "compress": true },
"dynamic_templates": [
{
"string_template" : {
"match" : "*",
"mapping": { "type": "string", "index": "not_analyzed" },
"match_mapping_type" : "string"
}
}
],
"properties" : {
"@fields": { "type": "object", "dynamic": true, "path": "full" },
"@message" : { "type" : "string", "index" : "analyzed" },
"@source" : { "type" : "string", "index" : "not_analyzed" },
"@source_host" : { "type" : "string", "index" : "not_analyzed" },
"@source_path" : { "type" : "string", "index" : "not_analyzed" },
"@tags": { "type": "string", "index" : "not_analyzed" },
"@timestamp" : { "type" : "date", "index" : "not_analyzed" },
"@type" : { "type" : "string", "index" : "not_analyzed" }
}
}
}
}
---
- name: restart logstash
service: name=logstash state=restarted
---
dependencies:
- common
- elasticsearch
# requires:
# - oraclejdk
# - elasticsearch
---
- name: Ensure app apt dependencies are installed
apt: pkg={{ item }} state=installed
with_items:
- redis-server
- name: Ensure {{ logstash_app_dir }} exists
file: path={{ logstash_app_dir }} state=directory owner=root group=root mode=0755
- name: Ensure subdirectories exist
file: path={{ logstash_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
with_items:
- bin
- etc
- share
- name: ensure logstash config is in place
template: src=logstash.conf.j2 dest={{ logstash_app_dir }}/etc/logstash.conf owner=root group=root mode=0644
notify: restart logstash
- name: ensure logstash upstart job is in place
template: src=logstash.upstart.conf.j2 dest=/etc/init/logstash.conf owner=root group=root mode=0755
- name: ensure logstash has a logging dir at {{ logstash_log_dir }}
file: path={{ logstash_log_dir }} owner=root group=root mode=0755 state=directory
- name: ensure we have the specified logstash release
get_url: url={{ logstash_url }} dest={{ logstash_app_dir }}/share/{{ logstash_file }}
- name: ensure symlink with no version exists at {{ logstash_app_dir }}/share/logstash.jar
file: src={{ logstash_app_dir }}/share/${logstash_file} dest={{ logstash_app_dir }}/share/logstash.jar state=link
- name: start logstash
action: service name=logstash state=started enabled=yes
- name: Ensure we are running
wait_for: port={{ logstash_syslog_port }} host=localhost timeout=60
- name: Copy logstash es index template
copy: src=template_logstash.json dest=/etc/elasticsearch/template_logstash.json
- name: Enable logstash es index template
shell: chdir=/etc/elasticsearch executable=/bin/bash curl -XPUT 'http://localhost:9200/_template/template_logstash' -d @template_logstash.json
- name: Install python requirements
pip: name={{ item }} state=present
with_items: logstash_python_requirements
- name: Checkout logstash rotation scripts
git: repo={{ logstash_scripts_repo }} dest={{ logstash_app_dir }}/share/logstash-elasticsearch-scripts
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index rotation"
hour={{ logstash_rotate_cron.hour }}
minute={{ logstash_rotate_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_cleaner.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/rotation_cron"
when: LOGSTASH_ROTATE|bool
- name: Setup cron to run rotation
cron: >
user=root
name="Elasticsearch logstash index optimization"
hour={{ logstash_optimize_cron.hour }}
minute={{ logstash_optimize_cron.minute }}
job="/usr/bin/python {{ logstash_app_dir }}/share/logstash-elasticsearch-scripts/logstash_index_optimize.py -d {{ LOGSTASH_DAYS_TO_KEEP }} > {{ logstash_log_dir }}/optimize_cron"
when: LOGSTASH_ROTATE|bool
input {
tcp {
port => {{ logstash_syslog_port }}
type => syslog
}
udp {
port => {{ logstash_syslog_port }}
type => syslog
}
}
filter {
if [type] == "syslog" {
grok {
match => { "message" => "<%{POSINT:syslog_pri}>%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{@source_host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
if !("_grokparsefailure" in [tags]) {
mutate {
replace => [ "@source_host", "%{syslog_hostname}" ]
replace => [ "@message", "%{syslog_message}" ]
}
}
mutate {
remove_field => [ "syslog_hostname", "syslog_message", "syslog_timestamp" ]
}
}
}
output {
# Example just to output to elasticsearch
elasticsearch { }
# And gzip for each host and program
file {
path => '{{ logstash_data_dir }}/%{@source_host}/all.%{+yyyyMMdd}.gz'
gzip => true
}
# Should add option for S3 as well.
}
# logstash-indexer.conf# logstash - indexer instance
#
description "logstash indexer instance"
start on virtual-filesystems
stop on runlevel [06]
respawn
respawn limit 5 30
limit nofile 65550 65550
env HOME={{ logstash_app_dir }}
env JAVA_OPTS='-Xms512m -Xmx512m'
env PATH=$PATH:/usr/lib/jvm/{{ oraclejdk_base }}/bin
chdir {{ logstash_app_dir }}
setuid root
console log
# for versions 1.1.1 - 1.1.4 the internal web service crashes when touched
# and the current workaround is to just not run it and run Kibana instead
script
exec java -jar {{ logstash_app_dir }}/share/logstash.jar agent -f {{ logstash_app_dir }}/etc/logstash.conf --log {{ logstash_log_dir }}/logstash-indexer.out
end script
......@@ -29,6 +29,10 @@ nginx_debian_pkgs:
- nginx
- python-passlib
CMS_HOSTNAME: '~^((stage|prod)-)?studio.*'
nginx_template_dir: "edx/app/nginx/sites-available"
nginx_xserver_gunicorn_hosts:
- 127.0.0.1
nginx_xqueue_gunicorn_hosts:
......
......@@ -58,7 +58,7 @@
- name: Copying nginx configs for {{ nginx_sites }}
template: >
src=edx/app/nginx/sites-available/{{ item }}.j2
src={{ nginx_template_dir }}/{{ item }}.j2
dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: reload nginx
......
......@@ -27,8 +27,7 @@ server {
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
{% endif %}
server_name ~^((stage|prod)-)?studio\..*;
server_name {{ CMS_HOSTNAME }};
access_log {{ nginx_log_dir }}/access.log;
error_log {{ nginx_log_dir }}/error.log error;
......
{%- if "kibana" in nginx_default_sites -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
upstream elasticsearch_server {
server 127.0.0.1:9200;
}
server {
# Kibana server, templated by ansible
{% if NGINX_ENABLE_SSL %}
listen {{KIBANA_NGINX_PORT}} {{default_site}};
listen {{KIBANA_SSL_NGINX_PORT}} {{default_site}} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% else %}
listen {{KIBANA_NGINX_PORT}} {{default_site}};
{% endif %}
server_name {{ KIBANA_SERVER_NAME }};
root {{ kibana_app_dir }}/htdocs;
access_log {{ nginx_log_dir }}/kibana.access.log;
error_log {{ nginx_log_dir }}/kibana.error.log error;
# Access restriction
{% include "basic-auth.j2" %}
# Set image format types to expire in a very long time
location ~* ^.+\.(jpg|jpeg|gif|png|ico)$ {
access_log off;
expires max;
}
# Set css and js to expire in a very long time
location ~* ^.+\.(css|js)$ {
access_log off;
expires max;
}
# Elastic Search
location /e {
rewrite /e/(.*) /$1 break;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_redirect off;
proxy_pass http://elasticsearch_server;
}
# Kibana
location / {
root {{ kibana_app_dir }}/htdocs;
index index.html;
expires 1d;
try_files $uri/ $uri;
if (-f $request_filename) {
break;
}
}
}
---
NLTK_DATA_DIR: "/usr/local/share/nltk_data"
# Once the file is downloaded, it won't be downloaded again,
# so if you need to version the data files, you should upload
# your own version of the files with the version appended to the filename.
NLTK_DATA:
- { path: "taggers/maxent_treebank_pos_tagger",
url: "http://nltk.github.com/nltk_data/packages/taggers/maxent_treebank_pos_tagger.zip" }
- { path: "corpora/stopwords",
url: "http://nltk.github.com/nltk_data/packages/corpora/stopwords.zip" }
- { path: "corpora/wordnet",
url: "http://nltk.github.com/nltk_data/packages/corpora/wordnet.zip" }
---
- name: Install unzip
apt: pkg=unzip state=present
- name: create the nltk data directory and subdirectories
file: path={{ NLTK_DATA_DIR }}/{{ item.path|dirname }} state=directory
with_items: NLTK_DATA
tags:
- deploy
- name: download nltk data
get_url: >
dest={{ NLTK_DATA_DIR }}/{{ item.url|basename }}
url={{ item.url }}
with_items: NLTK_DATA
register: nltk_download
tags:
- deploy
- name: unarchive nltk data
shell: >
unzip {{NLTK_DATA_DIR}}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: NLTK_DATA
when: nltk_download|changed
tags:
- deploy
......@@ -89,7 +89,7 @@ notifier_env_vars:
CS_API_KEY: "{{ NOTIFIER_COMMENT_SERVICE_API_KEY }}"
US_URL_BASE: "{{ NOTIFIER_USER_SERVICE_BASE }}"
US_API_KEY: "{{ NOTIFIER_USER_SERVICE_API_KEY }}"
DATADOG_API_KEY: "{{ DATADOG_API_KEY }}"
DATADOG_API_KEY: "{{ COMMON_DATADOG_API_KEY }}"
LOG_LEVEL: "{{ NOTIFIER_LOG_LEVEL }}"
RSYSLOG_ENABLED: "{{ NOTIFIER_RSYSLOG_ENABLED }}"
BROKER_URL: "{{ NOTIFIER_CELERY_BROKER_URL }}"
......
......@@ -11,10 +11,6 @@ ora_venv_dir: "{{ ora_venvs_dir }}/ora"
ora_venv_bin: "{{ ora_venv_dir }}/bin"
ora_user: "ora"
ora_deploy_path: "{{ ora_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
ora_nltk_data_dir: "{{ ora_data_dir}}/nltk_data"
ora_nltk_download_url: http://edx-static.s3.amazonaws.com/nltk/nltk-data-20131113.tar.gz
ora_nltk_tmp_file: "{{ ora_data_dir }}/nltk.tmp.tar.tz"
ora_source_repo: https://github.com/edx/edx-ora.git
ora_version: 'master'
......@@ -143,7 +139,6 @@ ora_auth_config:
ora_environment:
SERVICE_VARIANT: ora
NLTK_DATA: $ora_nltk_data_dir
LANG: $ORA_LANG
PATH: $ora_deploy_path
......
---
dependencies:
- supervisor
- nltk
......@@ -43,17 +43,3 @@
notify:
- restart ora
- restart ora_celery
- name: download and install nltk
shell: |
set -e
curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }}
tar zxf {{ ora_nltk_tmp_file }}
rm -f {{ ora_nltk_tmp_file }}
touch {{ ora_nltk_download_url|basename }}-installed
creates={{ ora_data_dir }}/{{ ora_nltk_download_url|basename }}-installed
chdir={{ ora_data_dir }}
sudo_user: "{{ common_web_user }}"
notify:
- restart ora
- restart ora_celery
......@@ -5,7 +5,7 @@ command={{ ora_venv_bin }}/gunicorn --preload -b {{ ora_gunicorn_host }}:{{ ora_
user={{ common_web_user }}
directory={{ ora_code_dir }}
environment=PID=/var/run/gunicorn/edx-ora.pid,WORKERS={{ ora_gunicorn_workers }},PORT={{ ora_gunicorn_port }},ADDRESS={{ ora_gunicorn_host }},LANG={{ ORA_LANG }},DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }}
environment=PID=/var/run/gunicorn/edx-ora.pid,WORKERS={{ ora_gunicorn_workers }},PORT={{ ora_gunicorn_port }},ADDRESS={{ ora_gunicorn_host }},LANG={{ ORA_LANG }},DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
......@@ -5,7 +5,7 @@ command={{ ora_venv_bin }}/python {{ ora_code_dir }}/manage.py celeryd --logleve
user={{ common_web_user }}
directory={{ ora_code_dir }}
environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }}
environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
---
SNORT_OINKCODE: 'oinkcode'
SNORT_RULES_URL: [ 'http://www.snort.org/pub-bin/oinkmaster.cgi/{{ SNORT_OINKCODE }}/snortrules-snapshot-2931.tar.gz',
'http://rules.emergingthreats.net/open/snort-2.9.0/emerging.rules.tar.gz' ]
---
# install and configure snort IDS
#
- name: install snort
apt: pkg={{ item }} state="present"
with_items:
- snort
- oinkmaster
- name: configure snort
template: >
src=etc/snort/snort.conf.j2 dest=/etc/snort/snort.conf
owner=root group=root mode=0644
- name: configure snort (debian)
template: >
src=etc/snort/snort.debian.conf.j2 dest=/etc/snort/snort.debian.conf
owner=root group=root mode=0644
- name: configure oinkmaster
template: >
src=etc/oinkmaster.conf.j2 dest=/etc/oinkmaster.conf
owner=root group=root mode=0644
- name: update snort
shell: oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/
sudo: yes
- name: snort service
service: >
name="snort"
state="started"
- name: open read permissions on snort logs
file: >
name="/var/log/snort"
state="directory"
mode="755"
- name: install oinkmaster cronjob
template: >
src=etc/cron.daily/oinkmaster.j2 dest=/etc/cron.daily/oinkmaster
owner=root group=root mode=0755
#! /bin/bash
oinkmaster -C /etc/oinkmaster.conf -o /etc/snort/rules/ > /dev/null
service snort restart
{% for url in SNORT_RULES_URL %}
url = {{ url }}
{% endfor %}
# Ignore local.rules from the rules archive by default since we might
# have put some local rules in our own local.rules and we don't want it
# to get overwritten by the empty one from the archive after each
# update.
skipfile local.rules
# The file deleted.rules contains rules that have been deleted from
# other files, so there is usually no point in updating it.
skipfile deleted.rules
# Also skip snort.conf by default since we don't want to overwrite our
# own snort.conf if we have it in the same directory as the rules. If
# you have your own production copy of snort.conf in another directory,
# it may be really nice to check for changes in this file though,
# especially since variables are sometimes added or modified and
# new/old files are included/excluded.
skipfile snort.conf
#--------------------------------------------------
# VRT Rule Packages Snort.conf
#
# For more information visit us at:
# http://www.snort.org Snort Website
# http://vrt-sourcefire.blogspot.com/ Sourcefire VRT Blog
#
# Mailing list Contact: snort-sigs@lists.sourceforge.net
# False Positive reports: fp@sourcefire.com
# Snort bugs: bugs@snort.org
#
# Compatible with Snort Versions:
# VERSIONS : 2.9.2.0
#
# Snort build options:
# OPTIONS : --enable-ipv6 --enable-gre --enable-mpls --enable-targetbased --enable-decoder-preprocessor-rules --enable-ppm --enable-perfprofiling --enable-zlib --enable-active-response --enable-normalizer --enable-reload --enable-react --enable-flexresp3
#
# Additional information:
# This configuration file enables active response, to run snort in
# test mode -T you are required to supply an interface -i <interface>
# or test mode will fail to fully validate the configuration and
# exit with a FATAL error
#--------------------------------------------------
###################################################
# This file contains a sample snort configuration.
# You should take the following steps to create your own custom configuration:
#
# 1) Set the network variables.
# 2) Configure the decoder
# 3) Configure the base detection engine
# 4) Configure dynamic loaded libraries
# 5) Configure preprocessors
# 6) Configure output plugins
# 7) Customize your rule set
# 8) Customize preprocessor and decoder rule set
# 9) Customize shared object rule set
###################################################
###################################################
# Step #1: Set the network variables. For more information, see README.variables
###################################################
# Setup the network addresses you are protecting
ipvar HOME_NET any
# Set up the external network addresses. Leave as "any" in most situations
ipvar EXTERNAL_NET any
#ipvar EXTERNAL_NET !$HOME_NET
# List of DNS servers on your network
ipvar DNS_SERVERS $HOME_NET
# List of SMTP servers on your network
ipvar SMTP_SERVERS $HOME_NET
# List of web servers on your network
ipvar HTTP_SERVERS $HOME_NET
# List of sql servers on your network
ipvar SQL_SERVERS $HOME_NET
# List of telnet servers on your network
ipvar TELNET_SERVERS $HOME_NET
# List of ssh servers on your network
ipvar SSH_SERVERS $HOME_NET
# List of ftp servers on your network
ipvar FTP_SERVERS $HOME_NET
# List of sip servers on your network
ipvar SIP_SERVERS $HOME_NET
# List of ports you run web servers on
portvar HTTP_PORTS [80,8000,18000,18010,18020,18030,18040,18050,18060,18070,18080,18090,18100]
# List of ports you want to look for SHELLCODE on.
portvar SHELLCODE_PORTS !80
# List of ports you might see oracle attacks on
portvar ORACLE_PORTS 1024:
# List of ports you want to look for SSH connections on:
portvar SSH_PORTS 22
# List of ports you run ftp servers on
portvar FTP_PORTS [21,2100,3535]
# List of ports you run SIP servers on
portvar SIP_PORTS [5060,5061,5600]
# other variables, these should not be modified
ipvar AIM_SERVERS [64.12.24.0/23,64.12.28.0/23,64.12.161.0/24,64.12.163.0/24,64.12.200.0/24,205.188.3.0/24,205.188.5.0/24,205.188.7.0/24,205.188.9.0/24,205.188.153.0/24,205.188.179.0/24,205.188.248.0/24]
# Path to your rules files (this can be a relative path)
# Note for Windows users: You are advised to make this an absolute path,
# such as: c:\snort\rules
var RULE_PATH /etc/snort/rules
var SO_RULE_PATH /etc/snort/so_rules
var PREPROC_RULE_PATH /etc/snort/preproc_rules
###################################################
# Step #2: Configure the decoder. For more information, see README.decode
###################################################
# Stop generic decode events:
config disable_decode_alerts
# Stop Alerts on experimental TCP options
config disable_tcpopt_experimental_alerts
# Stop Alerts on obsolete TCP options
config disable_tcpopt_obsolete_alerts
# Stop Alerts on T/TCP alerts
config disable_tcpopt_ttcp_alerts
# Stop Alerts on all other TCPOption type events:
config disable_tcpopt_alerts
# Stop Alerts on invalid ip options
config disable_ipopt_alerts
# Alert if value in length field (IP, TCP, UDP) is greater th elength of the packet
# config enable_decode_oversized_alerts
# Same as above, but drop packet if in Inline mode (requires enable_decode_oversized_alerts)
# config enable_decode_oversized_drops
# Configure IP / TCP checksum mode
config checksum_mode: all
# Configure maximum number of flowbit references. For more information, see README.flowbits
# config flowbits_size: 64
# Configure ports to ignore
# config ignore_ports: tcp 21 6667:6671 1356
# config ignore_ports: udp 1:17 53
# Configure active response for non inline operation. For more information, see REAMDE.active
# config response: eth0 attempts 2
# Configure DAQ related options for inline operation. For more information, see README.daq
#
# config daq: <type>
# config daq_dir: <dir>
# config daq_mode: <mode>
# config daq_var: <var>
#
# <type> ::= pcap | afpacket | dump | nfq | ipq | ipfw
# <mode> ::= read-file | passive | inline
# <var> ::= arbitrary <name>=<value passed to DAQ
# <dir> ::= path as to where to look for DAQ module so's
# Configure specific UID and GID to run snort as after dropping privs. For more information see snort -h command line options
#
# config set_gid:
# config set_uid:
# Configure default snaplen. Snort defaults to MTU of in use interface. For more information see README
#
# config snaplen:
#
# Configure default bpf_file to use for filtering what traffic reaches snort. For more information see snort -h command line options (-F)
#
# config bpf_file:
#
# Configure default log directory for snort to log to. For more information see snort -h command line options (-l)
#
# config logdir:
###################################################
# Step #3: Configure the base detection engine. For more information, see README.decode
###################################################
# Configure PCRE match limitations
config pcre_match_limit: 3500
config pcre_match_limit_recursion: 1500
# Configure the detection engine See the Snort Manual, Configuring Snort - Includes - Config
config detection: search-method ac-split search-optimize max-pattern-len 20
# Configure the event queue. For more information, see README.event_queue
config event_queue: max_queue 8 log 3 order_events content_length
###################################################
# Per packet and rule latency enforcement
# For more information see README.ppm
###################################################
# Per Packet latency configuration
#config ppm: max-pkt-time 250, \
# fastpath-expensive-packets, \
# pkt-log
# Per Rule latency configuration
#config ppm: max-rule-time 200, \
# threshold 3, \
# suspend-expensive-rules, \
# suspend-timeout 20, \
# rule-log alert
###################################################
# Configure Perf Profiling for debugging
# For more information see README.PerfProfiling
###################################################
#config profile_rules: print all, sort avg_ticks
#config profile_preprocs: print all, sort avg_ticks
###################################################
# Step #4: Configure dynamic loaded libraries.
# For more information, see Snort Manual, Configuring Snort - Dynamic Modules
###################################################
# path to dynamic preprocessor libraries
dynamicpreprocessor directory /usr/lib/snort_dynamicpreprocessor/
# path to base preprocessor engine
dynamicengine /usr/lib/snort_dynamicengine/libsf_engine.so
# path to dynamic rules libraries
# dynamicdetection directory /usr/lib/snort_dynamicrules
###################################################
# Step #5: Configure preprocessors
# For more information, see the Snort Manual, Configuring Snort - Preprocessors
###################################################
# Inline packet normalization. For more information, see README.normalize
# Does nothing in IDS mode
preprocessor normalize_ip4
preprocessor normalize_tcp: ips ecn stream
preprocessor normalize_icmp4
preprocessor normalize_ip6
preprocessor normalize_icmp6
# Target-based IP defragmentation. For more inforation, see README.frag3
preprocessor frag3_global: max_frags 65536
preprocessor frag3_engine: policy windows detect_anomalies overlap_limit 10 min_fragment_length 100 timeout 180
# Target-Based stateful inspection/stream reassembly. For more inforation, see README.stream5
preprocessor stream5_global: track_tcp yes, \
track_udp yes, \
track_icmp no, \
max_tcp 262144, \
max_udp 131072, \
max_active_responses 2, \
min_response_seconds 5
preprocessor stream5_tcp: policy windows, detect_anomalies, require_3whs 180, \
overlap_limit 10, small_segments 3 bytes 150, timeout 180, \
ports client 21 22 23 25 42 53 79 109 110 111 113 119 135 136 137 139 143 \
161 445 513 514 587 593 691 1433 1521 2100 3306 6070 6665 6666 6667 6668 6669 \
7000 8181 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779, \
ports both 80 81 311 443 465 563 591 593 636 901 989 992 993 994 995 1220 1414 1830 2301 2381 2809 3128 3702 5250 7907 7001 7802 7777 7779 \
7801 7900 7901 7902 7903 7904 7905 7906 7908 7909 7910 7911 7912 7913 7914 7915 7916 \
7917 7918 7919 7920 8000 8008 8028 8080 8088 8118 8123 8180 8243 8280 8888 9090 9091 9443 9999 11371 \
18000 18010 18020 18030 18040 18050 18060 18070 18080 18090 18100
preprocessor stream5_udp: timeout 180
# performance statistics. For more information, see the Snort Manual, Configuring Snort - Preprocessors - Performance Monitor
# preprocessor perfmonitor: time 300 file /var/snort/snort.stats pktcnt 10000
# HTTP normalization and anomaly detection. For more information, see README.http_inspect
preprocessor http_inspect: global iis_unicode_map unicode.map 1252 compress_depth 65535 decompress_depth 65535
preprocessor http_inspect_server: server default \
chunk_length 500000 \
server_flow_depth 0 \
client_flow_depth 0 \
post_depth 65495 \
oversize_dir_length 500 \
max_header_length 750 \
max_headers 100 \
ports { 80 8000 18000 18010 18020 18030 18040 18050 18060 18070 18080 18090 18100 } \
non_rfc_char { 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 } \
enable_cookie \
extended_response_inspection \
inspect_gzip \
normalize_utf \
unlimited_decompress \
apache_whitespace no \
ascii no \
bare_byte no \
directory no \
double_decode no \
iis_backslash no \
iis_delimiter no \
iis_unicode no \
multi_slash no \
utf_8 no \
u_encode yes \
webroot no
# ONC-RPC normalization and anomaly detection. For more information, see the Snort Manual, Configuring Snort - Preprocessors - RPC Decode
preprocessor rpc_decode: 111 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779 no_alert_multiple_requests no_alert_large_fragments no_alert_incomplete
# Back Orifice detection.
preprocessor bo
# FTP / Telnet normalization and anomaly detection. For more information, see README.ftptelnet
preprocessor ftp_telnet: global inspection_type stateful encrypted_traffic no
preprocessor ftp_telnet_protocol: telnet \
ayt_attack_thresh 20 \
normalize ports { 23 } \
detect_anomalies
preprocessor ftp_telnet_protocol: ftp server default \
def_max_param_len 100 \
ports { 21 2100 3535 } \
telnet_cmds yes \
ignore_telnet_erase_cmds yes \
ftp_cmds { ABOR ACCT ADAT ALLO APPE AUTH CCC CDUP } \
ftp_cmds { CEL CLNT CMD CONF CWD DELE ENC EPRT } \
ftp_cmds { EPSV ESTA ESTP FEAT HELP LANG LIST LPRT } \
ftp_cmds { LPSV MACB MAIL MDTM MIC MKD MLSD MLST } \
ftp_cmds { MODE NLST NOOP OPTS PASS PASV PBSZ PORT } \
ftp_cmds { PROT PWD QUIT REIN REST RETR RMD RNFR } \
ftp_cmds { RNTO SDUP SITE SIZE SMNT STAT STOR STOU } \
ftp_cmds { STRU SYST TEST TYPE USER XCUP XCRC XCWD } \
ftp_cmds { XMAS XMD5 XMKD XPWD XRCP XRMD XRSQ XSEM } \
ftp_cmds { XSEN XSHA1 XSHA256 } \
alt_max_param_len 0 { ABOR CCC CDUP ESTA FEAT LPSV NOOP PASV PWD QUIT REIN STOU SYST XCUP XPWD } \
alt_max_param_len 200 { ALLO APPE CMD HELP NLST RETR RNFR STOR STOU XMKD } \
alt_max_param_len 256 { CWD RNTO } \
alt_max_param_len 400 { PORT } \
alt_max_param_len 512 { SIZE } \
chk_str_fmt { ACCT ADAT ALLO APPE AUTH CEL CLNT CMD } \
chk_str_fmt { CONF CWD DELE ENC EPRT EPSV ESTP HELP } \
chk_str_fmt { LANG LIST LPRT MACB MAIL MDTM MIC MKD } \
chk_str_fmt { MLSD MLST MODE NLST OPTS PASS PBSZ PORT } \
chk_str_fmt { PROT REST RETR RMD RNFR RNTO SDUP SITE } \
chk_str_fmt { SIZE SMNT STAT STOR STRU TEST TYPE USER } \
chk_str_fmt { XCRC XCWD XMAS XMD5 XMKD XRCP XRMD XRSQ } \
chk_str_fmt { XSEM XSEN XSHA1 XSHA256 } \
cmd_validity ALLO < int [ char R int ] > \
cmd_validity EPSV < [ { char 12 | char A char L char L } ] > \
cmd_validity MACB < string > \
cmd_validity MDTM < [ date nnnnnnnnnnnnnn[.n[n[n]]] ] string > \
cmd_validity MODE < char ASBCZ > \
cmd_validity PORT < host_port > \
cmd_validity PROT < char CSEP > \
cmd_validity STRU < char FRPO [ string ] > \
cmd_validity TYPE < { char AE [ char NTC ] | char I | char L [ number ] } >
preprocessor ftp_telnet_protocol: ftp client default \
max_resp_len 256 \
bounce yes \
ignore_telnet_erase_cmds yes \
telnet_cmds yes
# SMTP normalization and anomaly detection. For more information, see README.SMTP
preprocessor smtp: ports { 25 465 587 691 } \
inspection_type stateful \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0 \
log_mailfrom \
log_rcptto \
log_filename \
log_email_hdrs \
normalize cmds \
normalize_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \
normalize_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \
normalize_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \
normalize_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
max_command_line_len 512 \
max_header_line_len 1000 \
max_response_line_len 512 \
alt_max_command_line_len 260 { MAIL } \
alt_max_command_line_len 300 { RCPT } \
alt_max_command_line_len 500 { HELP HELO ETRN EHLO } \
alt_max_command_line_len 255 { EXPN VRFY ATRN SIZE BDAT DEBUG EMAL ESAM ESND ESOM EVFY IDENT NOOP RSET } \
alt_max_command_line_len 246 { SEND SAML SOML AUTH TURN ETRN DATA RSET QUIT ONEX QUEU STARTTLS TICK TIME TURNME VERB X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
valid_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \
valid_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \
valid_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \
valid_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
xlink2state { enabled }
# Portscan detection. For more information, see README.sfportscan
# preprocessor sfportscan: proto { all } memcap { 10000000 } sense_level { low }
# ARP spoof detection. For more information, see the Snort Manual - Configuring Snort - Preprocessors - ARP Spoof Preprocessor
# preprocessor arpspoof
# preprocessor arpspoof_detect_host: 192.168.40.1 f0:0f:00:f0:0f:00
# SSH anomaly detection. For more information, see README.ssh
preprocessor ssh: server_ports { 22 } \
autodetect \
max_client_bytes 19600 \
max_encrypted_packets 20 \
max_server_version_len 100 \
enable_respoverflow enable_ssh1crc32 \
enable_srvoverflow enable_protomismatch
# SMB / DCE-RPC normalization and anomaly detection. For more information, see README.dcerpc2
preprocessor dcerpc2: memcap 102400, events [co ]
preprocessor dcerpc2_server: default, policy WinXP, \
detect [smb [139,445], tcp 135, udp 135, rpc-over-http-server 593], \
autodetect [tcp 1025:, udp 1025:, rpc-over-http-server 1025:], \
smb_max_chain 3, smb_invalid_shares ["C$", "D$", "ADMIN$"]
# DNS anomaly detection. For more information, see README.dns
preprocessor dns: ports { 53 } enable_rdata_overflow
# SSL anomaly detection and traffic bypass. For more information, see README.ssl
preprocessor ssl: ports { 443 465 563 636 989 992 993 994 995 7801 7802 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 }, trustservers, noinspect_encrypted
# SDF sensitive data preprocessor. For more information see README.sensitive_data
preprocessor sensitive_data: alert_threshold 25
# SIP Session Initiation Protocol preprocessor. For more information see README.sip
preprocessor sip: max_sessions 10000, \
ports { 5060 5061 5600 }, \
methods { invite \
cancel \
ack \
bye \
register \
options \
refer \
subscribe \
update \
join \
info \
message \
notify \
benotify \
do \
qauth \
sprack \
publish \
service \
unsubscribe \
prack }, \
max_uri_len 512, \
max_call_id_len 80, \
max_requestName_len 20, \
max_from_len 256, \
max_to_len 256, \
max_via_len 1024, \
max_contact_len 512, \
max_content_len 1024
# IMAP preprocessor. For more information see README.imap
preprocessor imap: \
ports { 143 } \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0
# POP preprocessor. For more information see README.pop
preprocessor pop: \
ports { 110 } \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0
###################################################
# Step #6: Configure output plugins
# For more information, see Snort Manual, Configuring Snort - Output Modules
###################################################
# unified2
# Recommended for most installs
# output unified2: filename merged.log, limit 128, nostamp, mpls_event_types, vlan_event_types
# Additional configuration for specific types of installs
# output alert_unified2: filename snort.alert, limit 128, nostamp
# output log_unified2: filename snort.log, limit 128, nostamp
# syslog
output alert_syslog: LOG_AUTH LOG_ALERT
# pcap
output log_tcpdump: tcpdump.log
# database
# output database: alert, <db_type>, user=<username> password=<password> test dbname=<name> host=<hostname>
# output database: log, <db_type>, user=<username> password=<password> test dbname=<name> host=<hostname>
#
# On Debian Systems, the database configuration is kept in a separate file:
# /etc/snort/database.conf.
# This file can be empty, if you are not using any database information
# If you are using databases, please edit that file instead of this one, to
# ensure smoother upgrades to future versions of this package.
include database.conf
#
# prelude
# output alert_prelude
# metadata reference data. do not modify these lines
include classification.config
include reference.config
###################################################
# Step #7: Customize your rule set
# For more information, see Snort Manual, Writing Snort Rules
#
# NOTE: All categories are enabled in this conf file
###################################################
# site specific rules
include $RULE_PATH/local.rules
include $RULE_PATH/attack-responses.rules
include $RULE_PATH/backdoor.rules
include $RULE_PATH/bad-traffic.rules
# include $RULE_PATH/blacklist.rules
# include $RULE_PATH/botnet-cnc.rules
include $RULE_PATH/chat.rules
# include $RULE_PATH/content-replace.rules
include $RULE_PATH/ddos.rules
include $RULE_PATH/dns.rules
include $RULE_PATH/dos.rules
include $RULE_PATH/community-dos.rules
include $RULE_PATH/exploit.rules
include $RULE_PATH/community-exploit.rules
include $RULE_PATH/finger.rules
include $RULE_PATH/ftp.rules
include $RULE_PATH/community-ftp.rules
include $RULE_PATH/icmp.rules
include $RULE_PATH/icmp-info.rules
include $RULE_PATH/imap.rules
include $RULE_PATH/community-imap.rules
include $RULE_PATH/info.rules
include $RULE_PATH/misc.rules
include $RULE_PATH/multimedia.rules
include $RULE_PATH/mysql.rules
include $RULE_PATH/netbios.rules
include $RULE_PATH/nntp.rules
include $RULE_PATH/community-nntp.rules
include $RULE_PATH/oracle.rules
include $RULE_PATH/community-oracle.rules
include $RULE_PATH/other-ids.rules
include $RULE_PATH/p2p.rules
# include $RULE_PATH/phishing-spam.rules
include $RULE_PATH/policy.rules
# include $RULE_PATH/community-policy.rules
# include $RULE_PATH/community-inappropriate.rules
# include $RULE_PATH/community-game.rules
# include $RULE_PATH/community-misc.rules
include $RULE_PATH/pop2.rules
include $RULE_PATH/pop3.rules
include $RULE_PATH/rpc.rules
include $RULE_PATH/rservices.rules
# include $RULE_PATH/scada.rules
include $RULE_PATH/scan.rules
# Note: this rule is extremely chatty, enable with care
include $RULE_PATH/shellcode.rules
include $RULE_PATH/smtp.rules
include $RULE_PATH/community-smtp.rules
include $RULE_PATH/snmp.rules
# include $RULE_PATH/specific-threats.rules
# include $RULE_PATH/spyware-put.rules
include $RULE_PATH/sql.rules
include $RULE_PATH/telnet.rules
include $RULE_PATH/tftp.rules
include $RULE_PATH/virus.rules
include $RULE_PATH/community-virus.rules
include $RULE_PATH/community-bot.rules
# include $RULE_PATH/voip.rules
include $RULE_PATH/community-sip.rules
# Specific web server rules:
# include $RULE_PATH/web-activex.rules
include $RULE_PATH/web-attacks.rules
include $RULE_PATH/web-cgi.rules
include $RULE_PATH/web-client.rules
include $RULE_PATH/web-coldfusion.rules
include $RULE_PATH/web-frontpage.rules
include $RULE_PATH/web-iis.rules
include $RULE_PATH/web-misc.rules
include $RULE_PATH/web-php.rules
include $RULE_PATH/web-attacks.rules
include $RULE_PATH/community-sql-injection.rules
include $RULE_PATH/community-web-client.rules
include $RULE_PATH/community-web-dos.rules
include $RULE_PATH/community-web-iis.rules
include $RULE_PATH/community-web-misc.rules
include $RULE_PATH/community-web-php.rules
include $RULE_PATH/web-attacks.rules
include $RULE_PATH/community-sql-injection.rules
include $RULE_PATH/community-web-client.rules
include $RULE_PATH/community-web-dos.rules
include $RULE_PATH/community-web-iis.rules
include $RULE_PATH/community-web-misc.rules
include $RULE_PATH/community-web-php.rules
include $RULE_PATH/x11.rules
###################################################
# Step #8: Customize your preprocessor and decoder alerts
# For more information, see README.decoder_preproc_rules
###################################################
# decoder and preprocessor event rules
# include $PREPROC_RULE_PATH/preprocessor.rules
# include $PREPROC_RULE_PATH/decoder.rules
# include $PREPROC_RULE_PATH/sensitive-data.rules
###################################################
# Step #9: Customize your Shared Object Snort Rules
# For more information, see http://vrt-sourcefire.blogspot.com/2009/01/using-vrt-certified-shared-object-rules.html
###################################################
# dynamic library rules
# include $SO_RULE_PATH/bad-traffic.rules
# include $SO_RULE_PATH/chat.rules
# include $SO_RULE_PATH/dos.rules
# include $SO_RULE_PATH/exploit.rules
# include $SO_RULE_PATH/icmp.rules
# include $SO_RULE_PATH/imap.rules
# include $SO_RULE_PATH/misc.rules
# include $SO_RULE_PATH/multimedia.rules
# include $SO_RULE_PATH/netbios.rules
# include $SO_RULE_PATH/nntp.rules
# include $SO_RULE_PATH/pop3.rules
# include $SO_RULE_PATH/p2p.rules
# include $SO_RULE_PATH/smtp.rules
# include $SO_RULE_PATH/snmp.rules
# include $SO_RULE_PATH/specific-threats.rules
# include $SO_RULE_PATH/sql.rules
# include $SO_RULE_PATH/web-activex.rules
# include $SO_RULE_PATH/web-client.rules
# include $SO_RULE_PATH/web-iis.rules
# include $SO_RULE_PATH/web-misc.rules
# Event thresholding or suppression commands. See threshold.conf
include threshold.conf
# snort.debian.config (Debian Snort configuration file)
#
# This file was generated by the post-installation script of the snort
# package using values from the debconf database.
#
# It is used for options that are changed by Debian to leave
# the original configuration files untouched.
#
# This file is automatically updated on upgrades of the snort package
# *only* if it has not been modified since the last upgrade of that package.
#
# If you have edited this file but would like it to be automatically updated
# again, run the following command as root:
# dpkg-reconfigure snort
DEBIAN_SNORT_STARTUP="boot"
DEBIAN_SNORT_HOME_NET=""
DEBIAN_SNORT_OPTIONS=""
DEBIAN_SNORT_INTERFACE="eth0"
DEBIAN_SNORT_SEND_STATS="true"
DEBIAN_SNORT_STATS_RCPT="root"
DEBIAN_SNORT_STATS_THRESHOLD="1"
......@@ -23,10 +23,18 @@ SPLUNKFORWARDER_DEB: !!null
SPLUNKFORWARDER_PASSWORD: !!null
SPLUNKFORWARDER_LOG_ITEMS:
- directory: '{{ COMMON_LOG_DIR }}'
- directory: '{{ COMMON_LOG_DIR }}/lms'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'edx'
- directory: '{{ COMMON_LOG_DIR }}/cms'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'edx'
- directory: '{{ COMMON_LOG_DIR }}'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'syslog'
- directory: '/var/log'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
......
......@@ -13,6 +13,11 @@
---
SUPERVISOR_HTTP_BIND_IP: '127.0.0.1'
# Used by the pre-supervisor script if you want to
# notify a hipchat room with the output.
SUPERVISOR_HIPCHAT_API_KEY: !!null
SUPERVISOR_HIPCHAT_ROOM: default
# do not override the bind_port since
# all supervisors will then try to listen
# on the same one
......@@ -35,3 +40,7 @@ supervisor_cfg: "{{ supervisor_app_dir }}/supervisord.conf"
# upstart service name and user
supervisor_service: supervisor
supervisor_service_user: "{{ common_web_user }}"
supervisor_pip_pkgs:
- boto
- python-simple-hipchat
# Get the tags for this instance
import argparse
import boto
import boto.utils
from boto.utils import get_instance_metadata
from boto.exception import AWSConnectionError
import hipchat
import os
import subprocess
import traceback
# Services that should be checked for migrations.
MIGRATION_COMMANDS = {
'lms': "{python} {code_dir}/manage.py lms migrate --noinput --settings=aws --db-dry-run --merge",
'cms': "{python} {code_dir}/manage.py cms migrate --noinput --settings=aws --db-dry-run --merge",
'xqueue': "{python} {code_dir}/manage.py xqueue migrate --noinput --settings=aws --db-dry-run --merge",
}
HIPCHAT_USER = "PreSupervisor"
def services_for_instance(instance_id):
"""
Get the list of all services named by the services tag in this
instance's tags.
"""
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
services = instance.tags['services'].split(',')
except KeyError as ke:
msg = "Tag named 'services' not found on this instance({})".format(instance_id)
raise Exception(msg)
for service in services:
yield service
def edp_for_instance(instance_id):
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
environment = instance.tags['environment']
deployment = instance.tags['deployment']
play = instance.tags['play']
except KeyError as ke:
msg = "{} tag not found on this instance({})".format(ke.message, instance_id)
raise Exception(msg)
return (environment, deployment, play)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
......@@ -14,24 +57,95 @@ if __name__ == '__main__':
parser.add_argument("-e","--enabled",
help="The location of the enabled services.")
migration_args = parser.add_argument_group("edxapp_migrations",
"Args for running edxapp migration checks.")
migration_args.add_argument("--edxapp-code-dir",
help="Location of the edx-platform code.")
migration_args.add_argument("--edxapp-python",
help="Path to python to use for executing migration check.")
xq_migration_args = parser.add_argument_group("xqueue_migrations",
"Args for running xqueue migration checks.")
xq_migration_args.add_argument("--xqueue-code-dir",
help="Location of the edx-platform code.")
xq_migration_args.add_argument("--xqueue-python",
help="Path to python to use for executing migration check.")
hipchat_args = parser.add_argument_group("hipchat",
"Args for hipchat notification.")
hipchat_args.add_argument("-c","--hipchat-api-key",
help="Hipchat token if you want to receive notifications via hipchat.")
hipchat_args.add_argument("-r","--hipchat-room",
help="Room to send messages to.")
args = parser.parse_args()
ec2 = boto.connect_ec2()
instance_id = boto.utils.get_instance_metadata()['instance-id']
reservations = ec2.get_all_instances(instance_ids=[instance_id])
report = []
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
services = instance.tags['services'].split(',')
for service in services:
prefix = None
notify = None
try:
if args.hipchat_api_key:
hc = hipchat.HipChat(token=args.hipchat_api_key)
notify = lambda message: hc.message_room(room_id=args.hipchat_room,
message_from=HIPCHAT_USER, message=message)
except Exception as e:
print("Failed to initialize hipchat, {}".format(e))
traceback.print_exc()
instance_id = get_instance_metadata()['instance-id']
prefix = instance_id
try:
environment, deployment, play = edp_for_instance(instance_id)
prefix = "{environment}-{deployment}-{play}-{instance_id}".format(
environment=environment,
deployment=deployment,
play=play,
instance_id=instance_id)
for service in services_for_instance(instance_id):
if service in MIGRATION_COMMANDS:
# Do extra migration related stuff.
if (service == 'lms' or service == 'cms') and args.edxapp_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.edxapp_python,
code_dir=args.edxapp_code_dir)
if os.path.exists(args.edxapp_code_dir):
os.chdir(args.edxapp_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
elif service == 'xqueue' and args.xqueue_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.xqueue_python,
code_dir=xqueue_code_dir)
if os.path.exists(args.xqueue_code_dir):
os.chdir(args.xqueue_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
# Link to available service.
available_file = "{}/{}.conf".format(args.available, service)
link_location = "{}/{}.conf".format(args.enabled, service)
available_file = os.path.join(args.available, "{}.conf".format(service))
link_location = os.path.join(args.enabled, "{}.conf".format(service))
if os.path.exists(available_file):
subprocess.call("ln -sf {} {}".format(available_file, link_location), shell=True)
report.append("Linking service: {}".format(service))
else:
report.append("No conf available for service: {}".format(link_location))
print("\n".join(report))
raise Exception("No conf available for service: {}".format(link_location))
except AWSConnectionError as ae:
msg = "{}: ERROR : {}".format(prefix, ae)
if notify:
notify(msg)
notify(traceback.format_exc())
raise ae
except Exception as e:
msg = "{}: ERROR : {}".format(prefix, e)
print(msg)
if notify:
notify(msg)
else:
msg = "{}: {}".format(prefix, " | ".join(report))
print(msg)
if notify:
notify(msg)
......@@ -94,10 +94,10 @@
- name: install supervisor in its venv
pip: >
name=boto virtualenv="{{supervisor_venv_dir}}" state=present
name={{ item }} virtualenv="{{supervisor_venv_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ supervisor_user }}"
when: supervisor_service == "supervisor" and disable_edx_services and not devstack
with_items: supervisor_pip_pkgs
- name: create supervisor upstart job
template: >
......
......@@ -4,4 +4,5 @@ start on runlevel [2345]
task
setuid {{ supervisor_user }}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{supervisor_available_dir}} --enabled={{supervisor_cfg_dir}}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{supervisor_available_dir}} --enabled={{supervisor_cfg_dir}} {% if SUPERVISOR_HIPCHAT_API_KEY is defined %}--hipchat-api-key {{ SUPERVISOR_HIPCHAT_API_KEY }} --hipchat-room {{ SUPERVISOR_HIPCHAT_ROOM }} {% endif %} {% if edxapp_code_dir is defined %}--edxapp-python {{ COMMON_BIN_DIR }}/python.edxapp --edxapp-code-dir {{ edxapp_code_dir }}{% endif %} {% if xqueue_code_dir is defined %}--xqueue-code-dir {{ xqueue_code_dir }} --xqueue-python {{ COMMON_BIN_DIR }}/python.xqueue {% endif %}
......@@ -30,6 +30,7 @@ XQUEUE_MYSQL_USER: 'xqueue001'
XQUEUE_MYSQL_PASSWORD: 'password'
XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306'
XQUEUE_NEWRELIC_APPNAME: "edX-xqueue"
# Internal vars below this line
#############################################
......
[program:xqueue]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = xqueue_venv_bin + '/newrelic-admin run-program ' + xqueue_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = xqueue_venv_bin + '/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %}
command={{ xqueue_venv_bin }}/gunicorn --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% else %}
command={{ xqueue_venv_bin }}/gunicorn --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ xqueue_code_dir }}
environment=PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue"
environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ XQUEUE_NEWRELIC_APPNAME }},NEW_RELIC_LICENSE_KEY={{ COMMON_NEWRELIC_LICENSE }},{% endif -%}PID=/var/tmp/xqueue.pid,PORT={{ xqueue_gunicorn_port }},ADDRESS={{ xqueue_gunicorn_host }},LANG={{ XQUEUE_LANG }},DJANGO_SETTINGS_MODULE=xqueue.aws_settings,SERVICE_VARIANT="xqueue"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role xqwatcher
#
XQWATCHER_COURSES:
- COURSE: "exampleX-101x"
GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
GIT_REF: "master"
QUEUE_NAME: "exampleX-101x"
QUEUE_CONFIG:
SERVER: "https://xqueue.example.com"
CONNECTIONS: 5
AUTH: ["user", "password"]
HANDLERS:
- HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
CODEJAIL:
name: "exampleX-101x"
python_bin: "{{ xqwatcher_venv_base }}/exampleX-101x/bin/python"
user: "exampleX-101x"
KWARGS:
grader_root: "../data/exampleX-101x/graders/"
- COURSE: "exampleX-202x"
GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
GIT_REF: "master"
QUEUE_NAME: "exampleX-202x"
QUEUE_CONFIG:
SERVER: "https://xqueue.example.com"
CONNECTIONS: 5
AUTH: ["user", "password"]
HANDLERS:
- HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
CODEJAIL:
name: "exampleX-202x"
python_bin: "{{ xqwatcher_venv_base }}/exampleX-202x/bin/python"
user: "exampleX-202x"
KWARGS:
grader_root: "../data/exampleX-202x/graders/"
XQWATCHER_GIT_IDENTITY: |
-----BEGIN RSA PRIVATE KEY-----
Your key if you need to access any private repositories
-----END RSA PRIVATE KEY-----
#
#
# vars are namespace with the module name.
#
xqwatcher_role_name: "xqwatcher"
xqwatcher_service_name: "xqwatcher"
xqwatcher_user: "xqwatcher"
xqwatcher_module: "xqueue_watcher"
xqwatcher_app_dir: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_home: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_venv_base: "{{ xqwatcher_home }}/venvs"
xqwatcher_venv_dir: "{{ xqwatcher_venv_base }}/{{ xqwatcher_service_name }}"
xqwatcher_code_dir: "{{ xqwatcher_app_dir }}/src"
xqwatcher_conf_dir: "{{ xqwatcher_home }}/conf.d"
xqwatcher_data_dir: "{{ xqwatcher_home }}/data"
xqwatcher_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/xqueue-watcher.git"
xqwatcher_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xqwatcher_git_identity }}"
xqwatcher_version: "master"
xqwatcher_git_identity: "{{ xqwatcher_app_dir }}/git-identity"
xqwatcher_requirements_file: "{{ xqwatcher_code_dir }}/requirements.txt"
xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}"
#
# supervisor related config
#
xqwatcher_supervisor_app_dir: "{{ xqwatcher_app_dir }}/supervisor"
xqwatcher_supervisor_data_dir: "{{ COMMON_DATA_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_supervisor_log_dir: "{{ xqwatcher_log_dir }}"
xqwatcher_supervisor_venv_dir: "{{ xqwatcher_venv_base }}/supervisor"
xqwatcher_supervisor_user: "{{ xqwatcher_user }}"
xqwatcher_supervisor_venv_bin: "{{ xqwatcher_supervisor_venv_dir }}/bin"
xqwatcher_supervisor_ctl: "{{ xqwatcher_supervisor_venv_bin }}/supervisorctl"
xqwatcher_supervisor_cfg_dir: "{{ xqwatcher_supervisor_app_dir }}/conf.d"
xqwatcher_supervisor_available_dir: "{{ xqwatcher_supervisor_app_dir }}/conf.available.d"
#
# OS packages
#
xqwatcher_debian_pkgs:
- apparmor-utils
xqwatcher_redhat_pkgs: []
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role xqwatcher
#
# Overview:
#
#
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role xqwatcher
#
# the role name are service name differ by _ and -, the latter isn't safe
# random corners of ansible/jinga/python variable expansion.
dependencies:
- role: edx_service
edx_role_name: "{{ xqwatcher_role_name }}"
edx_service_name: "{{ xqwatcher_service_name }}"
- role: supervisor
supervisor_app_dir: "{{ xqwatcher_supervisor_app_dir }}"
supervisor_data_dir: "{{ xqwatcher_supervisor_data_dir }}"
supervisor_log_dir: "{{ xqwatcher_supervisor_log_dir }}"
supervisor_venv_dir: "{{ xqwatcher_supervisor_venv_dir }}"
supervisor_service_user: "{{ xqwatcher_supervisor_user }}"
supervisor_available_dir: "{{ xqwatcher_supervisor_available_dir }}"
supervisor_service: "supervisor.xqwatcher"
supervisor_http_bind_port: '9003'
---
#
# Tasks related to deploying the code jail for the XQWatcher
#
- name: Create sandboxed user
user: >
name="{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}"
shell=/bin/false
home="/dev/null"
with_items: XQWATCHER_COURSES
#
# Need to disable aa to update the virutalenv
- name: write out apparmor config
template: >
src=etc/apparmor.d/code.jail.j2
dest="/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
mode=0644 owner=root group=root
with_items: XQWATCHER_COURSES
- name: write out sudoers config jail user
template: >
src=etc/sudoers.d/95-jailed-user.j2
dest=/etc/sudoers.d/95-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: write out sudoers for watcher
template: >
src=etc/sudoers.d/95-xqwatcher.j2
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: create jail virtualenv
shell: >
/usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" && /usr/local/bin/virtualenv --no-site-packages {{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}
with_items: XQWATCHER_COURSES
- name: give other read permissions to the virtualenv
shell: >
chown -R {{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} {{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}
with_items: XQWATCHER_COURSES
- name: start apparmor service
service: name=apparmor state=started
- name: load code sandbox profile
command: apparmor_parser -r "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
with_items: XQWATCHER_COURSES
- name: put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPLAIN
with_items: XQWATCHER_COURSES
- name: put code sandbox into aa-enforce
command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: not CODE_JAIL_COMPLAIN
\ No newline at end of file
- name: install read-only ssh key
copy: >
content="{{ XQWATCHER_GIT_IDENTITY }}" dest={{ xqwatcher_git_identity }}
owner={{ xqwatcher_user }} group={{ xqwatcher_user }} mode=0600
- include: deploy_watcher.yml
tags:
- deploy-watcher
- include: deploy_courses.yml
tags:
- deploy-courses
- name: remove read-only ssh key for the content repo
file: path={{ xqwatcher_git_identity }} state=absent
# Iterates over the data structure documented in tasks/main.yml
# checking out the grader code from the repository specified on
# a per queue basis.
- name: checkout grader code
git: >
dest={{ xqwatcher_data_dir }}/{{ item.COURSE }} repo={{ item.GIT_REPO }}
version={{ item.GIT_REF }}
ssh_opts="{{ xqwatcher_git_ssh_opts }}"
with_items: XQWATCHER_COURSES
# Installs the xqueue watcher code and supervisor scripts.
# The watcher can watch one or many queues and dispatch submissions
# to the appropriate grader which lives in a separate SCM repository.
- name: checkout watcher code
git: >
dest={{ xqwatcher_code_dir }} repo={{ xqwatcher_source_repo }} version={{ xqwatcher_version }}
accept_hostkey=yes
ssh_opts="{{ xqwatcher_git_ssh_opts }}"
- name: install application requirements
pip: >
requirements="{{ xqwatcher_requirements_file }}"
virtualenv="{{ xqwatcher_venv_dir }}" state=present
sudo: true
sudo_user: "{{ xqwatcher_user }}"
- name: write out course config files
template: >
src=edx/app/xqwatcher/conf.d/course.json.j2
dest={{ xqwatcher_conf_dir }}/{{ item.COURSE }}.json
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }}
with_items: XQWATCHER_COURSES
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d/xqwatcher.conf.j2
dest="{{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf"
group={{ xqwatcher_user }} mode=0650
- name: enable supervisor script
file: >
src={{ xqwatcher_supervisor_available_dir }}/xqwatcher.conf
dest={{ xqwatcher_supervisor_cfg_dir }}/xqwatcher.conf
state=link
force=yes
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ xqwatcher_supervisor_ctl }} -c /edx/app/xqwatcher/supervisor/supervisord.conf update"
when: not disable_edx_services
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role xqwatcher
#
# Overview:
#
# This play installs a sandboxed, pull grader that integrates with XQueue. The
# code for the XQWatcher lives here: https://github.com/edx/xqueue-watcher
#
# Multiple courses can be installed on a single server with distinct sandboxes.
#
# Example play:
#
# A play to install the XQWatcher would look like this:
#
# - name: Deploy xqueue-watcher
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# COMMON_APP_DIR: "/edx/app"
# common_web_group: "www-data"
# roles:
# - aws
# - datadog
# - splunkforwarder
# - newrelic
# - xqwatcher
#
# You would use a commone like the following to run the play.
#
# ansible-playbook -i ec2.py ./xqwatcher.yml -e@./example-config.yml
#
# The contents of the example-config.yml would include the queue
# meta data and details related to the repository including the
# grader code.
#
# XQWATCHER_COURSES:
# - COURSE: "exampleX-101x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
# GIT_REF: "master"
# QUEUE_NAME: "exampleX-101x"
# QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com"
# CONNECTIONS: 5
# AUTH: ["user", "password"]
# HANDLERS:
# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
# CODEJAIL:
# name: "exampleX-101x"
# python_bin: "{{ xqwatcher_venv_base }}/exampleX-101x/bin/python"
# user: "exampleX-101x"
# KWARGS:
# grader_root: "../data/exampleX-101x/graders/"
# - COURSE: "exampleX-202x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
# GIT_REF: "master"
# QUEUE_NAME: "exampleX-202x"
# QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com"
# CONNECTIONS: 5
# AUTH: ["user", "password"]
# HANDLERS:
# - HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
# CODEJAIL:
# name: "exampleX-202x"
# python_bin: "{{ xqwatcher_venv_base }}/exampleX-202x/bin/python"
# user: "exampleX-202x"
# KWARGS:
# grader_root: "../data/exampleX-202x/graders/"
# XQWATCHER_GIT_IDENTITY: |
# -----BEGIN RSA PRIVATE KEY-----
# Your key if you need to access any private repositories
# -----END RSA PRIVATE KEY-----
#
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- name: create conf dir
file: >
path="{{ xqwatcher_conf_dir }}"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- name: create supervisor dirs
file: >
path="{{ xqwatcher_conf_dir }}"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
with_items:
- "{{ xqwatcher_supervisor_data_dir }}"
- "{{ xqwatcher_supervisor_log_dir }}"
- name: write out course config files
template: >
src=edx/app/xqwatcher/conf.d/course.json.j2
dest={{ xqwatcher_conf_dir }}/{{ item.COURSE }}.json
mode=0644 owner={{ xqwatcher_user }} group={{ xqwatcher_user }}
with_items: XQWATCHER_COURSES
- include: deploy.yml tags=deploy
; {{ ansible_managed }}
;
[program:xqwatcher]
command={{ xqwatcher_venv_dir }}/bin/python -m {{ xqwatcher_module }} -d ../conf.d/
process_name=%(program_name)s
directory={{ xqwatcher_code_dir }}
stdout_logfile={{ xqwatcher_supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ xqwatcher_supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
{
"{{ item.QUEUE_NAME }}":
{{ item.QUEUE_CONFIG | to_nice_json }}
}
\ No newline at end of file
#include <tunables/global>
{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python {
#include <abstractions/base>
{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/** mr,
#todo need a way of providing.
# edxapp_code_dir /common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix,
/tmp/codejail-*/** wrix,
#
# Whitelist particiclar shared objects from the system
# python installation
#
/usr/lib/python2.7/lib-dynload/_json.so mr,
/usr/lib/python2.7/lib-dynload/_ctypes.so mr,
/usr/lib/python2.7/lib-dynload/_heapq.so mr,
/usr/lib/python2.7/lib-dynload/_io.so mr,
/usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
#
# Allow access to selections from /proc
#
/proc/*/mounts r,
}
{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ item.QUEUE.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
ansible==1.5.4
ansible==1.5.5
PyYAML==3.11
Jinja2==2.7.2
MarkupSafe==0.21
MarkupSafe==0.23
argparse==1.2.1
boto==2.20.1
boto==2.28.0
ecdsa==0.11
paramiko==1.13.0
paramiko==1.14.0
pycrypto==2.6.1
wsgiref==0.1.2
docopt==0.6.1
python-simple-hipchat==0.2
# Import XML Courses from git repos into the CMS.
# Run with sudo and make sure the user can clone
# the course repos.
# Output Has per course
#{
# repo_url:
# repo_name:
# org:
# course:
# run:
# disposition:
# version:
#}
import argparse
from os.path import basename
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import XML courses from git repos.")
parser.add_argument("-c", "--courses-csv", required=True,
help="A CSV of xml courses to import.")
args = parser.parse_args()
courses = open(args.courses_csv, 'r')
all_course_data = []
all_xml_mappings = {}
for line in courses:
cols = line.strip().split(',')
slug = cols[0]
author_format = cols[1]
disposition = cols[2]
repo_url = cols[4]
version = cols[5]
if author_format.lower() != 'xml' \
or disposition.lower() == "don't import":
continue
# Checkout w/tilde
org, course, run = slug.split("/")
repo_name = "{}~{}".format(basename(repo_url).rstrip('.git'), run)
course_info = {
"repo_url": repo_url,
"repo_name": repo_name,
"org": org,
"course": course,
"run": run,
"disposition": disposition.lower(),
"version": version,
}
all_course_data.append(course_info)
if disposition.lower() == "on disk":
all_xml_mappings[slug] = 'xml'
edxapp_xml_courses = { "EDXAPP_XML_COURSES": all_course_data, "EDXAPP_XML_MAPPINGS": all_xml_mappings }
print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)
......@@ -20,7 +20,6 @@ fi
## Install system pre-requisites
##
sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev
wget https://bitbucket.org/pypa/setuptools/raw/0.8/ez_setup.py -O - | sudo python
sudo pip install --upgrade pip
sudo pip install --upgrade virtualenv
......
......@@ -115,10 +115,12 @@ $extra_vars
EOF
if [[ $basic_auth == "true" ]]; then
# vars specific to provisioning added to $extra-vars
cat << EOF_AUTH >> $extra_vars_file
NGINX_HTPASSWD_USER: $auth_user
NGINX_HTPASSWD_PASS: $auth_pass
XQUEUE_BASIC_AUTH_USER: $auth_user
XQUEUE_BASIC_AUTH_PASSWORD: $auth_pass
EOF_AUTH
fi
......@@ -163,7 +165,10 @@ COMMON_USER_INFO:
- name: ${github_username}
github: true
type: admin
dns_zone: $dns_zone
rabbitmq_refresh: True
USER_CMD_PROMPT: '[$name_tag] '
elb: $elb
EOF
fi
......@@ -193,7 +198,6 @@ if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scra
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," $extra_var_arg --user ubuntu
fi
if [[ $server_type == "full_edx_installation" ]]; then
# Run deploy tasks for the roles selected
for i in $roles; do
......
......@@ -9,5 +9,3 @@ BUILD_USER_LAST_NAME=$(ascii_convert $BUILD_USER_LAST_NAME)
BUILD_USER_FIRST_NAME=$(ascii_convert $BUILD_USER_FIRST_NAME)
BUILD_USER_ID=$(ascii_convert $BUILD_USER_ID)
BUILD_USER=$(ascii_convert $BUILD_USER)
......@@ -83,6 +83,34 @@ if [[ "$use_blessed" == "true" ]]; then
blessed_params="--blessed"
fi
playbookdir_params=""
if [[ ! -z "$playbook_dir" ]]; then
playbookdir_params="--playbook-dir $playbook_dir"
fi
configurationprivate_params=""
if [[ ! -z "$configurationprivaterepo" ]]; then
configurationprivate_params="--configuration-private-repo $configurationprivaterepo"
if [[ ! -z "$configurationprivateversion" ]]; then
configurationprivate_params="$configurationprivate_params --configuration-private-version $configurationprivateversion"
fi
fi
stackname_params=""
if [[ ! -z "$playbook_dir" ]]; then
stackname_params="--playbook-dir $playbook_dir"
fi
hipchat_params=""
if [[ ! -z "$hipchat_room_id" ]] && [[ ! -z "$hipchat_api_token" ]]; then
hipchat_params="--hipchat-room-id $hipchat_room_id --hipchat-api-token $hipchat_api_token"
fi
cleanup_params=""
if [[ "$cleanup" == "false" ]]; then
cleanup_params="--no-cleanup"
fi
cd configuration
pip install -r requirements.txt
......@@ -94,4 +122,4 @@ cat /var/tmp/$BUILD_ID-refs.yml
echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p $play -t c1.medium -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo
python -u abbey.py -p $play -t c3.large -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $hipchat_params $cleanup_params
#!/usr/bin/env bash
# Ansible provisioning wrapper script that
# assumes the following parameters set
# as environment variables
#
# - github_username
# - server_type
# - instance_type
# - region
# - aws_account
# - keypair
# - ami
# - root_ebs_size
# - security_group
# - dns_zone
# - dns_name
# - environment
# - name_tag
export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/configuration/util/jenkins/ascii-convert.sh"
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
if [[ -z $sandbox_to_update ]]; then
sandbox_to_update="${BUILD_USER_ID}.m.sandbox.edx.org"
fi
cd $WORKSPACE/configuration/playbooks/edx-east
ansible-playbook connect_sandbox.yml -i $sandbox_to_update, -e@${WORKSPACE}/configuration-secure/ansible/vars/clone-db.yml -e EDXAPP_MYSQL_HOST=$EDXAPP_MYSQL_HOST --user ubuntu -v
#!/usr/bin/env bash
# A simple wrapper to run ansible from Jenkins.
# This assumes that you will be running on one or more servers
# that are tagged with Name: <environment>-<deployment>-<play>
if [[
-z $WORKSPACE ||
-z $environment_tag ||
-z $deployment_tag ||
-z $play_tag ||
-z $ansible_play ||
-z $elb_pre_post ||
-z $first_in ||
-z $serial_count
]]; then
echo "Environment incorrect for this wrapper script"
env
exit 1
fi
cd $WORKSPACE/configuration/playbooks/edx-east
ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post"
if [ ! -z "$extra_vars" ]; then
ansible_extra_vars+=" -e $extra_vars"
fi
if [[ $run_migrations == "true" ]]; then
ansible_extra_vars+=" -e migrate_db=yes"
fi
if [[ ! -z "$run_on_single_ip" ]]; then
ansible_limit+="$run_on_single_ip"
else
if [[ $first_in == "true" ]]; then
ansible_limit+="first_in_"
fi
ansible_limit+="tag_Name_${environment_tag}-${deployment_tag}-${play_tag}"
fi
if [[ ! -z "$task_tags" ]]; then
ansible_task_tags+="--tags $task_tags"
fi
export PYTHONUNBUFFERED=1
env
ansible-playbook -v -u ubuntu $ansible_play -i ./ec2.py $ansible_task_tags --limit $ansible_limit -e@"$WORKSPACE/configuration-secure/ansible/vars/${deployment_tag}.yml" -e@"$WORKSPACE/configuration-secure/ansible/vars/${environment_tag}-${deployment_tag}.yml" $ansible_extra_vars
#!/bin/bash
# A very simple check to see if the json files in the project at least compile.
# If they do not, a cryptic message that might be helpful is produced.
# Save current directory so we can come back; change to repo root
STARTED_FROM=`pwd`
cd $(git rev-parse --show-toplevel)
# Do very basic syntax check of every json file to make sure it's valid format
for file in `find . -iname '*.json'`; do
cat $file | python -m json.tool 1>/dev/null 2>json_complaint.err;
retval=$?
if [ $retval != 0 ]; then
echo "JSON errors in $file"
cat json_complaint.err
rm -f json_complaint.err
cd $STARTED_FROM
exit $retval;
fi
done
# Everything went ok!
rm -f json_complaint.err
cd $STARTED_FROM
exit 0
......@@ -11,6 +11,7 @@ try:
from boto.vpc import VPCConnection
from boto.exception import NoAuthHandlerFound, EC2ResponseError
from boto.sqs.message import RawMessage
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
print "boto required for script"
sys.exit(1)
......@@ -47,11 +48,13 @@ def parse_args():
parser.add_argument('--noop', action='store_true',
help="don't actually run the cmds",
default=False)
parser.add_argument('--secure-vars', required=False,
metavar="SECURE_VAR_FILE",
parser.add_argument('--secure-vars-file', required=False,
metavar="SECURE_VAR_FILE", default=None,
help="path to secure-vars from the root of "
"the secure repo (defaults to ansible/"
"vars/ENVIRONMENT-DEPLOYMENT.yml)")
"the secure repo. By default <deployment>.yml and "
"<environment>-<deployment>.yml will be used if they "
"exist in <secure-repo>/ansible/vars/. This secure file "
"will be used in addition to these if they exist.")
parser.add_argument('--stack-name',
help="defaults to ENVIRONMENT-DEPLOYMENT",
metavar="STACK_NAME",
......@@ -59,6 +62,10 @@ def parse_args():
parser.add_argument('-p', '--play',
help='play name without the yml extension',
metavar="PLAY", required=True)
parser.add_argument('--playbook-dir',
help='directory to find playbooks in',
default='configuration/playbooks/edx-east',
metavar="PLAYBOOKDIR", required=False)
parser.add_argument('-d', '--deployment', metavar="DEPLOYMENT",
required=True)
parser.add_argument('-e', '--environment', metavar="ENVIRONMENT",
......@@ -80,6 +87,12 @@ def parse_args():
parser.add_argument('--configuration-secure-repo', required=False,
default="git@github.com:edx-ops/prod-secure",
help="repo to use for the secure files")
parser.add_argument('--configuration-private-version', required=False,
help="configuration-private repo branch(no hashes)",
default="master")
parser.add_argument('--configuration-private-repo', required=False,
default="git@github.com:edx-ops/ansible-private",
help="repo to use for private playbooks")
parser.add_argument('-c', '--cache-id', required=True,
help="unique id to use as part of cache prefix")
parser.add_argument('-i', '--identity', required=False,
......@@ -109,6 +122,10 @@ def parse_args():
parser.add_argument("--hipchat-api-token", required=False,
default=None,
help="The API token for Hipchat integration")
parser.add_argument("--root-vol-size", required=False,
default=50,
help="The size of the root volume to use for the "
"abbey instance.")
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--base-ami', required=False,
......@@ -136,6 +153,7 @@ def get_instance_sec_group(vpc_id):
return grp_details[0].id
def get_blessed_ami():
images = ec2.get_all_images(
filters={
......@@ -152,6 +170,7 @@ def get_blessed_ami():
return images[0].id
def create_instance_args():
"""
Looks up security group, subnet
......@@ -193,6 +212,7 @@ secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
configuration_private_version="{configuration_private_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
......@@ -201,14 +221,18 @@ git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name="{configuration_secure_repo_basename}"
secure_vars_file="$base_dir/$git_repo_secure_name/{secure_vars}"
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
secure_vars_file={secure_vars_file}
environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml"
deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/configuration/playbooks/edx-east"
playbook_dir="$base_dir/{playbook_dir}"
if $config_secure; then
git_cmd="env GIT_SSH=$git_ssh git"
......@@ -270,6 +294,10 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: true
edxapp_dynamic_cache_key: {deployment}-{environment}-{play}-{cache_id}
disable_edx_services: true
# abbey should never take instances in
# and out of elbs
elb_pre_post: false
EOF
chmod 400 $secure_identity
......@@ -286,13 +314,35 @@ if $config_secure; then
cd $base_dir
fi
if [[ ! -z $git_repo_private ]]; then
$git_cmd clone $git_repo_private $git_repo_private_name
cd $git_repo_private_name
$git_cmd checkout $configuration_private_version
cd $base_dir
fi
cd $base_dir/$git_repo_name
sudo pip install -r requirements.txt
cd $playbook_dir
ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$secure_vars_file -e@$extra_vars
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$secure_vars_file -e@$extra_vars
if [[ -r "$deployment_secure_vars" ]]; then
extra_args_opts+=" -e@$deployment_secure_vars"
fi
if [[ -r "$environment_deployment_secure_vars" ]]; then
extra_args_opts+=" -e@$environment_deployment_secure_vars"
fi
if $secure_vars_file; then
extra_args_opts+=" -e@$secure_vars_file"
fi
extra_args_opts+=" -e@$extra_vars"
ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts
rm -rf $base_dir
......@@ -302,17 +352,24 @@ rm -rf $base_dir
configuration_secure_repo=args.configuration_secure_repo,
configuration_secure_repo_basename=os.path.basename(
args.configuration_secure_repo),
configuration_private_version=args.configuration_private_version,
configuration_private_repo=args.configuration_private_repo,
environment=args.environment,
deployment=args.deployment,
play=args.play,
playbook_dir=args.playbook_dir,
config_secure=config_secure,
identity_contents=identity_contents,
queue_name=run_id,
extra_vars_yml=extra_vars_yml,
git_refs_yml=git_refs_yml,
secure_vars=secure_vars,
secure_vars_file=secure_vars_file,
cache_id=args.cache_id)
mapping = BlockDeviceMapping()
root_vol = BlockDeviceType(size=args.root_vol_size)
mapping['/dev/sda1'] = root_vol
ec2_args = {
'security_group_ids': [security_group_id],
'subnet_id': subnet_id,
......@@ -321,7 +378,7 @@ rm -rf $base_dir
'instance_type': args.instance_type,
'instance_profile_name': args.role_name,
'user_data': user_data,
'block_device_map': mapping,
}
return ec2_args
......@@ -376,7 +433,7 @@ def poll_sqs_ansible():
now = int(time.time())
if buf:
try:
if (now - max([msg['recv_ts'] for msg in buf])) > args.msg_delay:
if (now - min([msg['recv_ts'] for msg in buf])) > args.msg_delay:
# sort by TS instead of recv_ts
# because the sqs timestamp is not as
# accurate
......@@ -491,6 +548,7 @@ def create_ami(instance_id, name, description):
return image_id
def launch_and_configure(ec2_args):
"""
Creates an sqs queue, launches an ec2 instance,
......@@ -580,13 +638,14 @@ def launch_and_configure(ec2_args):
return run_summary, ami
def send_hipchat_message(message):
#If hipchat is configured send the details to the specified room
if args.hipchat_api_token and args.hipchat_room_id:
import hipchat
try:
hipchat = hipchat.HipChat(token=args.hipchat_api_token)
hipchat.message_room(args.hipchat_room_id,'AbbeyNormal',
hipchat.message_room(args.hipchat_room_id, 'AbbeyNormal',
message)
except Exception as e:
print("Hipchat messaging resulted in an error: %s." % e)
......@@ -615,21 +674,28 @@ if __name__ == '__main__':
git_refs_yml = ""
git_refs = {}
if args.secure_vars:
secure_vars = args.secure_vars
if args.secure_vars_file:
# explicit path to a single
# secure var file
secure_vars_file = args.secure_vars_file
else:
secure_vars = "ansible/vars/{}-{}.yml".format(
args.environment, args.deployment)
secure_vars_file = 'false'
if args.stack_name:
stack_name = args.stack_name
else:
stack_name = "{}-{}".format(args.environment, args.deployment)
try:
sqs = boto.sqs.connect_to_region(args.region)
ec2 = boto.ec2.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'You must be able to connect to sqs and ec2 to use this script'
print 'Unable to connect to ec2 in region :{}'.format(args.region)
sys.exit(1)
try:
sqs = boto.sqs.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'Unable to connect to sqs in region :{}'.format(args.region)
sys.exit(1)
if args.blessed:
......@@ -661,8 +727,7 @@ if __name__ == '__main__':
run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami)
message = 'Finished baking AMI {image_id} for {environment} ' \
'{deployment} {play}.'.format(
message = 'Finished baking AMI {image_id} for {environment} {deployment} {play}.'.format(
image_id=ami,
environment=args.environment,
deployment=args.deployment,
......
#!/usr/bin/env python
import boto
import boto.route53
import boto.route53.record
import boto.ec2.elb
import boto.rds2
import time
from argparse import ArgumentParser, RawTextHelpFormatter
import datetime
import sys
from vpcutil import rds_subnet_group_name_for_stack_name, all_stack_names
import os
description = """
Creates a new RDS instance using restore
from point in time using the latest available backup.
The new db will be the same size as the original.
The name of the db will remain the same, the master db password
will be changed and is set on the command line.
If stack-name is provided the RDS instance will be launched
in the VPC that corresponds to that name.
New db name defaults to "from-<source db name>-<human date>-<ts>"
A new DNS entry will be created for the RDS when provided
on the command line
"""
RDS_SIZES = [
'db.m1.small',
'db.m1.large',
'db.m1.xlarge',
'db.m2.xlarge',
'db.m2.2xlarge',
'db.m2.4xlarg',
]
# These are the groups for the different
# stack names that will be assigned once
# the corresponding db is cloned
SG_GROUPS = {
'stage-edx': 'sg-d2f623b7',
}
# This group must already be created
# and allows for full access to port
# 3306 from within the vpc.
# This group is assigned temporarily
# for cleaning the db
SG_GROUPS_FULL = {
'stage-edx': 'sg-0abf396f',
}
def parse_args(args=sys.argv[1:]):
stack_names = all_stack_names()
rds = boto.rds2.connect_to_region('us-east-1')
dbs = [db['DBInstanceIdentifier']
for db in rds.describe_db_instances()['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']]
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-s', '--stack-name', choices=stack_names,
default=None,
help='Stack name for where you want this RDS instance launched')
parser.add_argument('-t', '--type', choices=RDS_SIZES,
default='db.m1.small', help='RDS size to create instances of')
parser.add_argument('-d', '--db-source', choices=dbs,
default=u'stage-edx', help="source db to clone")
parser.add_argument('-p', '--password',
help="password for the new database", metavar="NEW PASSWORD")
parser.add_argument('-r', '--region', default='us-east-1',
help="region to connect to")
parser.add_argument('--dns',
help="dns entry for the new rds instance")
parser.add_argument('--clean-wwc', action="store_true",
default=False,
help="clean the wwc db after launching it into the vpc, removing sensitive data")
parser.add_argument('--clean-prod-grader', action="store_true",
default=False,
help="clean the prod_grader db after launching it into the vpc, removing sensitive data")
parser.add_argument('--dump', action="store_true",
default=False,
help="create a sql dump after launching it into the vpc")
parser.add_argument('--secret-var-file',
help="using a secret var file run ansible against the host to update db users")
return parser.parse_args(args)
def wait_on_db_status(db_name, region='us-east-1', wait_on='available', aws_id=None, aws_secret=None):
rds = boto.rds2.connect_to_region(region)
while True:
statuses = rds.describe_db_instances(db_name)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
if len(statuses) > 1:
raise Exception("More than one instance returned for {0}".format(db_name))
if statuses[0]['DBInstanceStatus'] == wait_on:
break
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(2)
return
if __name__ == '__main__':
args = parse_args()
sanitize_wwc_sql_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sanitize-db-wwc.sql")
sanitize_prod_grader_sql_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sanitize-db-prod_grader.sql")
play_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../playbooks/edx-east")
rds = boto.rds2.connect_to_region(args.region)
restore_dbid = 'from-{0}-{1}-{2}'.format(args.db_source, datetime.date.today(), int(time.time()))
restore_args = dict(
source_db_instance_identifier=args.db_source,
target_db_instance_identifier=restore_dbid,
use_latest_restorable_time=True,
db_instance_class=args.type,
)
if args.stack_name:
subnet_name = rds_subnet_group_name_for_stack_name(args.stack_name)
restore_args['db_subnet_group_name'] = subnet_name
rds.restore_db_instance_to_point_in_time(**restore_args)
wait_on_db_status(restore_dbid)
db_host = rds.describe_db_instances(restore_dbid)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['Endpoint']['Address']
if args.password or args.stack_name:
modify_args = dict(
apply_immediately=True
)
if args.password:
modify_args['master_user_password'] = args.password
if args.stack_name:
modify_args['vpc_security_group_ids'] = [SG_GROUPS[args.stack_name], SG_GROUPS_FULL[args.stack_name]]
else:
# dev-edx is the default security group for dbs that
# are not in the vpc, it allows connections from the various
# NAT boxes and from sandboxes
modify_args['db_security_groups'] = ['dev-edx']
# Update the db immediately
rds.modify_db_instance(restore_dbid, **modify_args)
if args.clean_wwc:
# Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} wwc < {sanitize_wwc_sql_file} """.format(
root_pass=args.password,
db_host=db_host,
sanitize_wwc_sql_file=sanitize_wwc_sql_file)
print("Running {}".format(sanitize_cmd))
os.system(sanitize_cmd)
if args.clean_prod_grader:
# Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} prod_grader < {sanitize_prod_grader_sql_file} """.format(
root_pass=args.password,
db_host=db_host,
sanitize_prod_grader_sql_file=sanitize_prod_grader_sql_file)
print("Running {}".format(sanitize_cmd))
os.system(sanitize_cmd)
if args.secret_var_file:
db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, update_edxapp_db_users.yml """ \
"""-e @{secret_var_file} -e "edxapp_db_root_user=root edxapp_db_root_pass={root_pass} """ \
"""EDXAPP_MYSQL_HOST={db_host}" """.format(
root_pass=args.password,
secret_var_file=args.secret_var_file,
db_host=db_host,
play_path=play_path)
print("Running {}".format(db_cmd))
os.system(db_cmd)
if args.dns:
dns_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, create_cname.yml """ \
"""-e "dns_zone=edx.org dns_name={dns} sandbox={db_host}" """.format(
play_path=play_path,
dns=args.dns,
db_host=db_host)
print("Running {}".format(dns_cmd))
os.system(dns_cmd)
if args.stack_name:
rds.modify_db_instance(restore_dbid, vpc_security_group_ids=[SG_GROUPS[args.stack_name]])
boto
docopt
python-simple-hipchat
python-simple-hipchat==0.2
SET FOREIGN_KEY_CHECKS=0;
/*
Grader has its own django core tables.
*/
UPDATE auth_user
set
email = concat('success+',cast(id AS CHAR),'@simulator.amazonses.com'),
username = concat('user-',cast(id AS CHAR)),
first_name = concat('user-',cast(id AS CHAR)),
last_name = concat('user-',cast(id AS CHAR)),
password = null,
last_login = null,
date_joined = null
where email not like ('%@edx.org');
SET FOREIGN_KEY_CHECKS=1;
SET FOREIGN_KEY_CHECKS=0;
/*
Remove all password hashes, even for edx employees
*/
UPDATE auth_user
set
password = null;
UPDATE student_passwordhistory
set
password = null;
/*
Rewrite all emails to used the SES simulator, simulating success.
Anonymize other user information. Skip @edx.org accounts
*/
UPDATE auth_user
set
email = concat('success+',cast(id AS CHAR),'@simulator.amazonses.com'),
username = concat('user-',cast(id AS CHAR)),
first_name = concat('user-',cast(id AS CHAR)),
last_name = concat('user-',cast(id AS CHAR)),
last_login = null,
date_joined = null
where email not like ('%@edx.org');
/*
There are a handful of email changes requests captured in flight.
*/
UPDATE student_pendingemailchange
set new_email = concat('success+',cast(user_id AS CHAR),'@simulator.amazonses.com');
/*
Differs slightly to prevent creating duplicate email records.
User id isn't stored here and this email is probably not used for
sending email, but cannot hurt.
*/
UPDATE student_courseenrollmentallowed
set email = concat('success+','courseenrollmentallowed_',cast(id AS CHAR),'@simulator.amazonses.com');
/*
Set the name to the userid and empty the other fields
This will also empty user profile data for edx employees
*/
UPDATE auth_userprofile
set
name = concat('user-',cast(id as CHAR)),
language = "",
location = "",
meta = "",
gender = null,
mailing_address = null,
year_of_birth = null,
level_of_education = null,
goals = null,
country = "",
city = null;
SET FOREIGN_KEY_CHECKS=1;
......@@ -27,12 +27,27 @@ import boto
import datetime
from vpcutil import vpc_for_stack_name
import xml.dom.minidom
import re
import sys
r53 = boto.connect_route53()
# These are ELBs that we do not want to create dns entries
# for because the instances attached to them are also in
# other ELBs and we want the env-deploy-play tuple which makes
# up the dns name to be unique
ELB_BAN_LIST = [
'Apros',
]
# If the ELB name has the key in its name these plays
# will be used for the DNS CNAME tuple. This is used for
# commoncluster.
ELB_PLAY_MAPPINGS = {
'RabbitMQ': 'rabbitmq',
'Xqueue': 'xqueue',
'Elastic': 'elasticsearch',
}
extra_play_dns = {"edxapp":["courses","studio"]}
class DNSRecord():
......@@ -44,12 +59,14 @@ class DNSRecord():
self.record_ttl = record_ttl
self.record_values = record_values
def add_or_update_record(dns_records):
"""
Creates or updates a DNS record in a hosted route53
zone
"""
change_set = boto.route53.record.ResourceRecordSets()
record_names = set()
for record in dns_records:
......@@ -60,9 +77,16 @@ def add_or_update_record(dns_records):
record_values: {}
""".format(record.record_name, record.record_type,
record.record_ttl, record.record_values)
if args.noop:
print("Would have updated DNS record:\n{}".format(status_msg))
else:
print("Updating DNS record:\n{}".format(status_msg))
if record.record_name in record_names:
print("Unable to create record for {} with value {} because one already exists!".format(
record.record_values, record.record_name))
sys.exit(1)
record_names.add(record.record_name)
zone_id = record.zone.Id.replace("/hostedzone/", "")
......@@ -71,8 +95,15 @@ def add_or_update_record(dns_records):
old_records = {r.name[:-1]: r for r in records}
# If the record name already points to something.
# Delete the existing connection.
# Delete the existing connection. If the record has
# the same type and name skip it.
if record.record_name in old_records.keys():
if record.record_name + "." == old_records[record.record_name].name and \
record.record_type == old_records[record.record_name].type:
print("Record for {} already exists and is identical, skipping.\n".format(
record.record_name))
continue
if args.force:
print("Deleting record:\n{}".format(status_msg))
change = change_set.add_change(
......@@ -99,11 +130,12 @@ def add_or_update_record(dns_records):
if args.noop:
print("Would have submitted the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
print xml_doc.toprettyxml()
else:
print("Submitting the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
print(xml_doc.toprettyxml(newl='')) # newl='' to remove extra newlines
if not args.noop:
r53.change_rrsets(zone_id, change_set.to_xml())
print("Updated DNS record:\n{}".format(status_msg))
def get_or_create_hosted_zone(zone_name):
......@@ -137,39 +169,42 @@ def get_or_create_hosted_zone(zone_name):
print("Updating parent zone {}".format(parent_zone_name))
dns_records = set()
dns_records.add(DNSRecord(parent_zone,zone_name,'NS',900,zone.NameServers))
dns_records.add(DNSRecord(parent_zone, zone_name, 'NS', 900, zone.NameServers))
add_or_update_record(dns_records)
return zone
def get_security_group_dns(group_name):
# stage-edx-RabbitMQELBSecurityGroup-YB8ZKIZYN1EN
environment,deployment,sec_group,salt = group_name.split('-')
play = sec_group.replace("ELBSecurityGroup","").lower()
environment, deployment, sec_group, salt = group_name.split('-')
play = sec_group.replace("ELBSecurityGroup", "").lower()
return environment, deployment, play
def get_dns_from_instances(elb):
ec2_con = boto.connect_ec2()
def get_dns_from_instances(elb):
for inst in elb.instances:
try:
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
except IndexError:
print("instance {} attached to elb {}".format(inst, elb))
sys.exit(1)
try:
env_tag = instance.tags['environment']
deployment_tag = instance.tags['deployment']
if 'play' in instance.tags:
play_tag = instance.tags['play']
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
break # only need the first instance for tag info
except KeyError:
print("Instance {}, attached to elb {} does not "
"have tags for environment and play".format(elb, inst))
raise
"have a tag for environment, play or deployment".format(inst, elb))
sys.exit(1)
return env_tag, play_tag
return env_tag, deployment_tag, play_tag
def update_elb_rds_dns(zone):
......@@ -182,10 +217,7 @@ def update_elb_rds_dns(zone):
dns_records = set()
elb_con = boto.connect_elb()
rds_con = boto.connect_rds()
vpc_id = vpc_for_stack_name(args.stack_name)
vpc_id = vpc_for_stack_name(args.stack_name, args.aws_id, args.aws_secret)
if not zone and args.noop:
# use a placeholder for zone name
......@@ -196,23 +228,26 @@ def update_elb_rds_dns(zone):
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
for elb in stack_elbs:
env_tag, deployment_tag, play_tag = get_dns_from_instances(elb)
if "RabbitMQ" in elb.source_security_group.name or "ElasticSearch" in elb.source_security_group.name:
env_tag,deployment,play_tag = get_security_group_dns(elb.source_security_group.name)
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
else:
env_tag,play_tag = get_dns_from_instances(elb)
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
# Override the play tag if a substring of the elb name
# is in ELB_PLAY_MAPPINGS
if extra_play_dns.has_key(play_tag):
for name in extra_play_dns.get(play_tag):
fqdn = "{}-{}.{}".format(env_tag, name, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
for key in ELB_PLAY_MAPPINGS.keys():
if key in elb.name:
play_tag = ELB_PLAY_MAPPINGS[key]
break
fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, play_tag, zone_name)
# Skip over ELBs if a substring of the ELB name is in
# the ELB_BAN_LIST
if any(name in elb.name for name in ELB_BAN_LIST):
print("Skipping {} because it is on the ELB ban list".format(elb.name))
continue
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [elb.dns_name]))
stack_rdss = [rds for rds in rds_con.get_all_dbinstances()
if hasattr(rds.subnet_group, 'vpc_id') and
......@@ -220,31 +255,57 @@ def update_elb_rds_dns(zone):
# TODO the current version of the RDS API doesn't support
# looking up RDS instance tags. Hence, we are using the
# env_tag that was set via the loop over instances above.
# env_tag and deployment_tag that was set via the loop over instances above.
rds_endpoints = set()
for rds in stack_rdss:
fqdn = "{}-{}.{}".format(env_tag,'rds', zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[stack_rdss[0].endpoint[0]]))
endpoint = stack_rdss[0].endpoint[0]
fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, 'rds', zone_name)
# filter out rds instances with the same endpoints (multi-AZ)
if endpoint not in rds_endpoints:
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [endpoint]))
rds_endpoints.add(endpoint)
add_or_update_record(dns_records)
if __name__ == "__main__":
description = "Give a cloudformation stack name, for an edx stack, setup \
DNS names for the ELBs in the stack."
description = """
Give a cloudformation stack name, for an edx stack, setup
DNS names for the ELBs in the stack
DNS entries will be created with the following format
<environment>-<deployment>-<play>.edx.org
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-s', '--stack-name', required=True,
help="The name of the cloudformation stack.")
parser.add_argument('-n', '--noop',
help="Don't make any changes.", action="store_true",
default=False)
parser.add_argument('-z', '--zone-name', default="vpc.edx.org",
parser.add_argument('-z', '--zone-name', default="edx.org",
help="The name of the zone under which to "
"create the dns entries.")
parser.add_argument('-f', '--force',
help="Force reuse of an existing name in a zone",
action="store_true",default=False)
action="store_true", default=False)
parser.add_argument('--aws-id', default=None,
help="read only aws key for fetching instance information"
"the account you wish add entries for")
parser.add_argument('--aws-secret', default=None,
help="read only aws id for fetching instance information for"
"the account you wish add entries for")
args = parser.parse_args()
# Connect to ec2 using the provided credentials on the commandline
ec2_con = boto.connect_ec2(args.aws_id, args.aws_secret)
elb_con = boto.connect_elb(args.aws_id, args.aws_secret)
rds_con = boto.connect_rds(args.aws_id, args.aws_secret)
# Connect to route53 using the user's .boto file
r53 = boto.connect_route53()
zone = get_or_create_hosted_zone(args.zone_name)
update_elb_rds_dns(zone)
import boto
import boto.rds2
import boto.rds
def vpc_for_stack_name(stack_name):
cfn = boto.connect_cloudformation()
CFN_TAG_KEY = 'aws:cloudformation:stack-name'
def vpc_for_stack_name(stack_name, aws_id=None, aws_secret=None):
cfn = boto.connect_cloudformation(aws_id, aws_secret)
resources = cfn.list_stack_resources(stack_name)
for resource in resources:
if resource.resource_type == 'AWS::EC2::VPC':
return resource.physical_resource_id
def stack_name_for_vpc(vpc_name):
cfn_tag_key = 'aws:cloudformation:stack-name'
vpc = boto.connect_vpc()
def stack_name_for_vpc(vpc_name, aws_id, aws_secret):
vpc = boto.connect_vpc(aws_id, aws_secret)
resource = vpc.get_all_vpcs(vpc_ids=[vpc_name])[0]
if cfn_tag_key in resource.tags:
return resource.tags[cfn_tag_key]
if CFN_TAG_KEY in resource.tags:
return resource.tags[CFN_TAG_KEY]
else:
msg = "VPC({}) is not part of a cloudformation stack.".format(vpc_name)
raise Exception(msg)
def rds_subnet_group_name_for_stack_name(stack_name, region='us-east-1', aws_id=None, aws_secret=None):
# Helper function to look up a subnet group name by stack name
rds = boto.rds2.connect_to_region(region)
vpc = vpc_for_stack_name(stack_name)
for group in rds.describe_db_subnet_groups()['DescribeDBSubnetGroupsResponse']['DescribeDBSubnetGroupsResult']['DBSubnetGroups']:
if group['VpcId'] == vpc:
return group['DBSubnetGroupName']
return None
def all_stack_names(region='us-east-1', aws_id=None, aws_secret=None):
vpc_conn = boto.connect_vpc(aws_id, aws_secret)
return [vpc.tags[CFN_TAG_KEY] for vpc in vpc_conn.get_all_vpcs()
if CFN_TAG_KEY in vpc.tags.keys()]
......@@ -27,7 +27,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.network :forwarded_port, guest: 18080, host: 18080
config.vm.network :forwarded_port, guest: 8765, host: 8765
config.vm.network :forwarded_port, guest: 9200, host: 9200
config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true
......
Vagrant.require_version ">= 1.5.3"
unless Vagrant.has_plugin?("vagrant-vbguest")
raise "Please install the vagrant-vbguest plugin by running `vagrant plugin install vagrant-vbguest`"
end
VAGRANTFILE_API_VERSION = "2"
......@@ -24,12 +27,14 @@ ansible-playbook -i localhost, -c local vagrant-devstack.yml -e configuration_ve
SCRIPT
edx_platform_mount_dir = "edx-platform"
themes_mount_dir = "themes"
forum_mount_dir = "cs_comments_service"
ora_mount_dir = "ora"
if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
themes_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + themes_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir
......@@ -45,8 +50,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.network :forwarded_port, guest: 18080, host: 18080
config.vm.network :forwarded_port, guest: 8765, host: 8765
config.vm.network :forwarded_port, guest: 9200, host: 9200
config.ssh.insert_key = true
# Enable X11 forwarding so we can interact with GUI applications
......@@ -55,6 +61,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment